Introduction
We will create a ZFS NAS using Ubuntu Server 24.04 with 4 disk RAIDz2. Boot disk will be on NVMe and 1 hot spare will be used for non critical data.
Inventory
frank@freedom:~$ sudo blkid | sort
/dev/mapper/ubuntu--vg-ubuntu--lv: UUID="5442358b-315d-4421-9dd2-ae496eb7e7b5" BLOCK_SIZE="4096" TYPE="ext4"
/dev/nvme0n1p1: PARTUUID="02e93dbd-4336-46fc-aad5-da96c66acfdc"
/dev/nvme0n1p2: UUID="e8663ac1-418b-40b7-8d9b-4542ceb1de9e" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="ccf2f51b-92f0-4d53-8709-89b7d6f675a7"
/dev/nvme0n1p3: UUID="nssNYn-vKkp-KKpf-YMIk-erQm-AjMq-Y38fhi" TYPE="LVM2_member" PARTUUID="84a0b85d-6e54-4fa6-b5f8-14446cc528e3"
/dev/sda: UUID="LSI ^P" TYPE="ddf_raid_member"
/dev/sdb: UUID="LSI ^P" TYPE="ddf_raid_member"
/dev/sdc: UUID="LSI ^P" TYPE="ddf_raid_member"
/dev/sdd: UUID="LSI ^P" TYPE="ddf_raid_member"
/dev/sde: UUID="LSI ^P" TYPE="ddf_raid_member"
frank@freedom:~$ ls -l /dev/disk/by-id/
total 0
lrwxrwxrwx 1 root root 10 Dec 25 06:18 dm-name-ubuntu--vg-ubuntu--lv -> ../../dm-0
lrwxrwxrwx 1 root root 10 Dec 25 06:18 dm-uuid-LVM-5IgekFOo3RjG3sR7f6xXu2pLQIp3JbpjcXYCyYBEgH4296Y30a3b5fUC2oBDEik9 -> ../../dm-0
lrwxrwxrwx 1 root root 15 Dec 25 06:18 lvm-pv-uuid-nssNYn-vKkp-KKpf-YMIk-erQm-AjMq-Y38fhi -> ../../nvme0n1p3
lrwxrwxrwx 1 root root 11 Dec 25 06:18 md-uuid-0f3e7b02:e5c5ac7f:b20c5540:f6758be2 -> ../../md127
lrwxrwxrwx 1 root root 11 Dec 25 06:18 md-uuid-41568cc8:eaa1b9a4:c5c415e4:904c4f32 -> ../../md121
lrwxrwxrwx 1 root root 11 Dec 25 06:18 md-uuid-707206f9:baa694a2:5997ce1b:d0b67bf5 -> ../../md125
lrwxrwxrwx 1 root root 11 Dec 25 06:18 md-uuid-a4cb4b16:86452d25:730b45bd:46042351 -> ../../md123
lrwxrwxrwx 1 root root 13 Dec 25 06:18 nvme-nvme.10ec-503330305a434342323530393031303830-50617472696f74204d2e32205033303020313032344742-00000001 -> ../../nvme0n1
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-nvme.10ec-503330305a434342323530393031303830-50617472696f74204d2e32205033303020313032344742-00000001-part1 -> ../../nvme0n1p1
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-nvme.10ec-503330305a434342323530393031303830-50617472696f74204d2e32205033303020313032344742-00000001-part2 -> ../../nvme0n1p2
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-nvme.10ec-503330305a434342323530393031303830-50617472696f74204d2e32205033303020313032344742-00000001-part3 -> ../../nvme0n1p3
lrwxrwxrwx 1 root root 13 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080 -> ../../nvme0n1
lrwxrwxrwx 1 root root 13 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080_1 -> ../../nvme0n1
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080_1-part1 -> ../../nvme0n1p1
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080_1-part2 -> ../../nvme0n1p2
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080_1-part3 -> ../../nvme0n1p3
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080-part1 -> ../../nvme0n1p1
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080-part2 -> ../../nvme0n1p2
lrwxrwxrwx 1 root root 15 Dec 25 06:18 nvme-Patriot_M.2_P300_1024GB_P300ZCCB250901080-part3 -> ../../nvme0n1p3
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-35000cca295e2aaab -> ../../sdd
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-35000cca295e92753 -> ../../sdb
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-35000cca2a1f79918 -> ../../sdc
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-35000cca2a1f7a690 -> ../../sda
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-35000cca2c1c0bbc8 -> ../../sde
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-SATA_WUH721816ALE6L4_2BJG83LP -> ../../sdd
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-SATA_WUH721816ALE6L4_2BJXJHDD -> ../../sdb
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-SATA_WUH721816ALE6L4_2CKY9JUT -> ../../sdc
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-SATA_WUH721816ALE6L4_2CKYE41T -> ../../sda
lrwxrwxrwx 1 root root 9 Dec 25 06:18 scsi-SATA_WUH721816ALE6L4_2PG1M0RT -> ../../sde
lrwxrwxrwx 1 root root 9 Dec 25 06:18 wwn-0x5000cca295e2aaab -> ../../sdd
lrwxrwxrwx 1 root root 9 Dec 25 06:18 wwn-0x5000cca295e92753 -> ../../sdb
lrwxrwxrwx 1 root root 9 Dec 25 06:18 wwn-0x5000cca2a1f79918 -> ../../sdc
lrwxrwxrwx 1 root root 9 Dec 25 06:18 wwn-0x5000cca2a1f7a690 -> ../../sda
lrwxrwxrwx 1 root root 9 Dec 25 06:18 wwn-0x5000cca2c1c0bbc8 -> ../../sde#
Role allocation
sda - data - scsi-SATA_WUH721816ALE6L4_2CKYE41T sdb - zfs - scsi-SATA_WUH721816ALE6L4_2BJXJHDD sdc - zfs - scsi-SATA_WUH721816ALE6L4_2CKY9JUT sdd - zfs - scsi-SATA_WUH721816ALE6L4_2BJG83LP sde - zfs - scsi-SATA_WUH721816ALE6L4_2PG1M0RT
Remove existing mdadm RAID
frank@freedom:~$ cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md120 : inactive sdd[0]
15625355264 blocks super external:/md121/1
md121 : inactive sdd[0](S)
524288 blocks super external:ddf
md122 : inactive sdb[0]
15625355264 blocks super external:/md123/1
md123 : inactive sdb[0](S)
524288 blocks super external:ddf
md124 : inactive sde[0]
15625355264 blocks super external:/md125/1
md125 : inactive sde[0](S)
524288 blocks super external:ddf
md126 : inactive sda[1] sdc[0]
31250710528 blocks super external:/md127/1
md127 : inactive sda[1](S) sdc[0](S)
1048576 blocks super external:ddf
unused devices: <none>
frank@freedom:~$ sudo mdadm --stop /dev/md120
mdadm: stopped /dev/md120
frank@freedom:~$ sudo mdadm --stop /dev/md121
mdadm: stopped /dev/md121
frank@freedom:~$ sudo mdadm --stop /dev/md122
mdadm: stopped /dev/md122
frank@freedom:~$ sudo mdadm --stop /dev/md123
mdadm: stopped /dev/md123
frank@freedom:~$ sudo mdadm --stop /dev/md124
mdadm: stopped /dev/md124
frank@freedom:~$ sudo mdadm --stop /dev/md125
mdadm: stopped /dev/md125
frank@freedom:~$ sudo mdadm --stop /dev/md126
mdadm: stopped /dev/md126
frank@freedom:~$ sudo mdadm --stop /dev/md127
mdadm: stopped /dev/md127
Prevent mdadm from reassembling automatically
sudo mdadm --zero-superblock /dev/sda
sudo mdadm --zero-superblock /dev/sdb
sudo mdadm --zero-superblock /dev/sdc
sudo mdadm --zero-superblock /dev/sdd
sudo mdadm --zero-superblock /dev/sde
Wipe disks
Wipe disks
sudo wipefs -a /dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2CKYE41T
sudo wipefs -a /dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2BJXJHDD
sudo wipefs -a /dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2CKY9JUT
sudo wipefs -a /dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2BJG83LP
sudo wipefs -a /dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2PG1M0RT
Wipe zfs devices (sanity)
sudo wipefs -a /dev/sda
sudo wipefs -a /dev/sdb
sudo wipefs -a /dev/sdc
sudo wipefs -a /dev/sdd
sudo wipefs -a /dev/sde
Clear GPT/MBR
sudo sgdisk --zap-all /dev/sdb /dev/sdc /dev/sdd /dev/sde /dev/sda
Create RAIDz2
sudo zpool create \
-o ashift=12 \
tank \
raidz2 \
/dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2BJXJHDD \
/dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2CKY9JUT \
/dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2BJG83LP \
/dev/disk/by-id/scsi-SATA_WUH721816ALE6L4_2PG1M0RT
frank@freedom:~$ zpool status
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2BJXJHDD ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2CKY9JUT ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2BJG83LP ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2PG1M0RT ONLINE 0 0 0
errors: No known data errors
Apply recommended zfs settings
sudo zfs set compression=lz4 tank
sudo zfs set atime=off tank
sudo zfs set xattr=sa tank
sudo zfs set acltype=posixacl tank
Create directories
sudo zfs create tank/data
sudo zfs create tank/backups
frank@freedom:~$ zfs list
NAME USED AVAIL REFER MOUNTPOINT
tank 1.07M 28.1T 140K /tank
tank/backups 140K 28.1T 140K /tank/backups
tank/data 140K 28.1T 140K /tank/data
frank@freedom:~$ mount | grep tank
tank on /tank type zfs (rw,noatime,xattr,posixacl,casesensitive)
tank/data on /tank/data type zfs (rw,noatime,xattr,posixacl,casesensitive)
tank/backups on /tank/backups type zfs (rw,noatime,xattr,posixacl,casesensitive)
Set up sda as data disk
sudo mkfs.ext4 /dev/sda
sudo mkdir -p /mnt/data
sudo mount /dev/sda /mnt/data
Persist across reboot
sudo blkid /dev/sda
# add to /etc/fstab
UUID=c4aacb7a-2bbc-42b7-9377-e5601b665386 /mnt/data ext4 defaults,noatime 0 2
Verify disks
frank@freedom:~$ lsblk
zpool status
zfs list
df -h
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 14.6T 0 disk /mnt/data
sdb 8:16 0 14.6T 0 disk
├─sdb1 8:17 0 14.6T 0 part
└─sdb9 8:25 0 8M 0 part
sdc 8:32 0 14.6T 0 disk
├─sdc1 8:33 0 14.6T 0 part
└─sdc9 8:41 0 8M 0 part
sdd 8:48 0 14.6T 0 disk
├─sdd1 8:49 0 14.6T 0 part
└─sdd9 8:57 0 8M 0 part
sde 8:64 0 14.6T 0 disk
├─sde1 8:65 0 14.6T 0 part
└─sde9 8:73 0 8M 0 part
nvme0n1 259:0 0 953.9G 0 disk
├─nvme0n1p1 259:1 0 1M 0 part
├─nvme0n1p2 259:2 0 2G 0 part /boot
└─nvme0n1p3 259:3 0 951.9G 0 part
└─ubuntu--vg-ubuntu--lv 252:0 0 100G 0 lvm /
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2BJXJHDD ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2CKY9JUT ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2BJG83LP ONLINE 0 0 0
scsi-SATA_WUH721816ALE6L4_2PG1M0RT ONLINE 0 0 0
errors: No known data errors
NAME USED AVAIL REFER MOUNTPOINT
tank 1.06M 28.1T 140K /tank
tank/backups 140K 28.1T 140K /tank/backups
tank/data 140K 28.1T 140K /tank/data
Filesystem Size Used Avail Use% Mounted on
tmpfs 4.7G 1.7M 4.7G 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv 98G 12G 82G 13% /
tmpfs 24G 0 24G 0% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
/dev/nvme0n1p2 2.0G 197M 1.6G 11% /boot
tmpfs 4.7G 12K 4.7G 1% /run/user/1000
tank 29T 256K 29T 1% /tank
tank/data 29T 256K 29T 1% /tank/data
tank/backups 29T 256K 29T 1% /tank/backups
/dev/sda 15T 28K 14T 1% /mnt/data
Setup maintenance
Scrub and install SMART
sudo zpool scrub tank
sudo apt install smartmontools
Set up monthly scrub
sudo vi /etc/systemd/system/zfs-scrub@.service
[Unit]
Description=ZFS scrub on %i
Documentation=man:zpool(8)
Requires=zfs.target
After=zfs.target
[Service]
Type=oneshot
ExecStart=/sbin/zpool scrub %i
sudo vi /etc/systemd/system/zfs-scrub@.timer
[Unit]
Description=Monthly ZFS scrub on %i
[Timer]
OnCalendar=monthly
Persistent=true
[Install]
WantedBy=timers.target
sudo systemctl daemon-reexec
sudo systemctl daemon-reload
sudo systemctl enable --now zfs-scrub@tank.timer
systemctl list-timers | grep zfs-scrub
# Perform manual scrub
sudo zpool scrub tank
zpool status
Set up SMART monitoring
sudo vi /etc/smartd.conf
# only line needed
DEVICESCAN -a -o on -S on -n standby -s (S/../.././02|L/../../6/03)
sudo systemctl enable --now smartmontools.service
journalctl -u smartmontools.service --no-pager | tail -n 50
sudo smartd -q onecheck
# trigger smart run
SMART Scripts
$ cat *test*.sh
smart_test_long.sh smart_test_progress.sh smart_test_results.sh smart_test_short.sh
#!/bin/bash
for d in sda sdb sdc sdd sde; do
echo "Starting short test on /dev/$d"
sudo smartctl -t short /dev/$d
done
#!/bin/bash
for d in sda sdb sdc sdd sde; do
echo "Starting long test on /dev/$d"
sudo smartctl -t long /dev/$d
done
#!/bin/bash
for d in sdb sdc sdd sde; do
echo "==== /dev/$d ===="
sudo smartctl -H /dev/$d
done
#!/bin/bash
for d in sda sdb sdc sdd sde; do
echo "- Progress for /dev/$d -"
sudo smartctl -c /dev/$d | grep -A1 "Self-test execution status"
done
Set up smb
sudo apt update
sudo apt install samba
sudo adduser frank
sudo smbpasswd -a frank
sudo smbpasswd -e frank
sudo vi /etc/samba/smb.conf
...
# Apple Time Machine
server min protocol = SMB2
fruit:aapl = yes
fruit:model = MacSamba
fruit:time machine = yes
[tank-data]
path = /tank/data
browseable = yes
read only = no
valid users = @smbusers
force group = smbusers
create mask = 0664
directory mask = 2775
[backups]
path = /tank/backups
browseable = yes
read only = no
valid users = @smbusers
force group = smbusers
create mask = 0664
directory mask = 2775
[timemachine]
path = /tank/timemachine
browseable = yes
read only = no
valid users = @smbusers
force group = smbusers
create mask = 0664
directory mask = 2775
# Apple Time Machine flags
fruit:time machine = yes
fruit:time machine max size = 8T
# Recommended
vfs objects = catia fruit streams_xattr
inherit permissions = yes
valid users = frank
sudo systemctl restart smbd nmbd avahi-daemon
Permissions
sudo groupadd smbusers
getent group smbusers
sudo usermod -aG smbusers frank
# log out/log in
id frank
sudo chown -R frank:smbusers /tank/data
sudo chmod -R 2775 /tank/data
sudo chown -R frank:smbusers /tank/backups
sudo chmod -R 2775 /tank/backups
What this does:
rwxfor owner (frank)rwxfor group (smbusers)rxfor others- setgid bit (
2) → new files inheritsmbusers
Confirm
ls -ld /tank/data
sudo vi /etc/samba/smb.conf
[tank-data]
path = /tank/data
browseable = yes
read only = no
valid users = @smbusers
force group = smbusers
create mask = 0664
directory mask = 2775
sudo systemctl restart smbd
ZFS-side permissions
sudo zfs set acltype=posixacl tank/data
sudo zfs set xattr=sa tank/data
zfs get acltype,xattr tank/data
Test write
touch /tank/data/testfile
ls -l /tank/data/testfile
TODO
- Set up disk alerts
- set up backup
- node exporter for prometheus