- Ideally, follow the steps in "Build a custom virt kernel and ISO with ZFS", and use that install to avoid using unnecessary space and building an unnecessarily large kernel (RAM usage). This produces a system that installs in under 90MB.
- Boot from Alpine ISO. Login as root (no password). //* Run setup-alpine to set up network/package repositories, hit ctrl+c when you get to the disk prompt
- To set up network with DHCP and the first mirror, run the following, or manually use
setup-intefaces -randsetup-apkreposto set something more specifically. Optionally, use set a root passwordsetup-sshdto set up remote console.
setup-interfaces -ra
setup-apkrepos -1c
echo 'root:9' | chpasswd
setup-sshd openssh
echo "PermitRootLogin Yes" >> /etc/ssh/sshd_config
service sshd restart
- Install packages for zfs
apk add zfs sfdisk e2fsprogs syslinux
Confirm the disk you want to use, run sfdisk -l to see a list. Assuming /dev/sda for the rest of the steps, replace it as needed.
Then we will use sfdisk to create a boot partition, with syslinux, create device nodes, and create the boot file system (the boot file system could be fat also, but will be ext4 here.
echo -e "/dev/sda1: start=1M,size=100M,bootable\n/dev/sda2: start=101M" | sfdisk --quiet --label dos /dev/sda
mdev -s
mkfs.ext4 /dev/sda1
modprobe zfs
zpool create -f -o ashift=12 -O acltype=posixacl -O canmount=off -O compression=lz4 -O dnodesize=auto -O xattr=sa -O mountpoint=/ -R /mnt rpool /dev/sda2
or with encyrption:
zpool create -f -o ashift=12 -O acltype=posixacl -O canmount=off -O compression=lz4 -O dnodesize=auto -O xattr=sa -O encryption=aes-256-gcm -O keylocation=prompt -O keyformat=passphrase -O mountpoint=/ -R /mnt rpool /dev/sda2
Check and create the required datasets, mount root, mount /boot, enable ZFS' services, Install Alpine
zpool status
zfs create -o mountpoint=none -o canmount=off rpool/ROOT
zfs create -o mountpoint=legacy rpool/ROOT/alpine
mount -t zfs rpool/ROOT/alpine /mnt/
mkdir -p /mnt/boot/
mount -t ext4 /dev/sda1 /mnt/boot/
rc-update add zfs-import sysinit
rc-update add zfs-mount sysinit
setup-disk /mnt
dd if=/usr/share/syslinux/mbr.bin of=/dev/sda # write mbr so we can boot
Reboot into the new system. To set everything up:
setup-interfaces -ra
setup-apkrepos -1c
rc-update add networking boot
You probably also should run setup-user and add a normal user, and maybe doas (replace user in adduser with the user you want to add to wheel):
adduser user wheel
apk add doas
echo "permit persist :wheel" >> /etc/doas.conf
Also edit /etc/ssh/sshd_config and disable root login by removing the PermitRootLogin line, or setting it to Prohibit-Password.
zfs create -V 4G -b 4K -o compression=off -o logbias=throughput -o sync=always -o primarycache=metadata -o secondarycache=none -o dedup=off rpool/swap
mkswap /dev/zd0
swapon /dev/zd0
free -h
# make it permanent
echo "/dev/zd0 none swap defaults 0 0" >> /etc/fstab
rc-update add swap
apk add eudev udev-init-scripts zfs-udev
setup-devd udev
apk add zfs-udev
rc-update add udev sysinit
rc-update add udev-trigger sysinit
rc-update add udev-settle sysinit
rc-update add udev-postmount default
Reboot, or trigger udev:
udevadm control --reload-rules
udevadm trigger
TODO: confirm
# config scripts
echo "#!/bin/sh" > /usr/bin/mount-rw.sh
echo "mount -o remount,rw /" >> /usr/bin/mount-rw.sh
echo "#!/bin/sh" > /usr/bin/mount-ro.sh
echo "mount -o remount,ro /" >> /usr/bin/mount-ro.sh
# /usr/bin/update-var.sh
cat > /usr/bin/update-var.sh <<'EOF'
#!/bin/sh
# copy running /var to /cfg/var
mount -o remount,rw /
cp -Rp /var/ /cfg/
# cp -Rp /var/{db,cron,spool,mail,run,www} /cfg/var/
mount -o remount,ro /
EOF
chmod +x /usr/bin/mount-rw.sh
chmod +x /usr/bin/mount-ro.sh
chmod +x /usr/bin/update-var.sh
# actually set up
mkdir -p /cfg/
cp -Rp /var /cfg/
cp -Rp /dev /cfg/
# make rw resolv.conf
mv /etc/resolv.conf /cfg/var/resolv.conf
ln -s /var/resolv.conf /etc/resolv.conf
# size can be set to cap size if needed
echo "tmpfs /var tmpfs rw,mode=755,size=128m 0 0" >> /etc/fstab
# echo "tmpfs /tmp tmpfs rw,mode=1777 0 0" >> /etc/fstab
echo "tmpfs /run tmpfs rw,mode=755,size= 0 0" >> /etc/fstab
echo "#!/bin/sh" > /etc/local.d/00-rw_dirs.start
echo "cp -a /cfg/dev/* /dev/" >> /etc/local.d/00-rw_dirs.start
echo "cp -a /cfg/var/* /var/" >> /etc/local.d/00-rw_dirs.start
chmod +x /etc/local.d/00-rw_dirs.start
rc-update add local default
Edit /etc/fstab to change the first line where / is mounted to have "ro" instead of "rw".
Make the file system read-write by running mount-rw.sh before creating more zfs volumes - it can create them while read-only, but it won't be able to add the mount-point.
At this point, if you zfs create rpool/data, it will mount the zfs volume in root in '/data' and make it rw by default.
To minimize overhead in VMs, we don't need the full Alpine Extended kernel - better to build a modified Virt kernel containing ZFS.
- Install Alpine if you don't have a system
apk add abuild alpine-conf syslinux xorriso squashfs-tools grub mtools git
abuild-keygen -a # generate signing key (answer the prompts)
cd ~
git clone --depth=1 https://gitlab.alpinelinux.org/alpine/aports.git
cd aports/scripts
- Create a custom mkimage profile based on virt.
cd ~/aports/scripts
export PROFILENAME=virtzfs
cat > mkimg.$PROFILENAME.sh << 'EOF'
#!/bin/sh
profile_virtzfs() {
# Start from the stock virt profile to stay minimal
profile_virt
# Only build the virt kernel flavour
kernel_flavors="virt"
# We want ZFS kernel addon for this flavor
kernel_addons="zfs"
# Userland bits for ZFS
apks="$apks alpine-base zfs zfs-scripts zfs-utils-py zfs-openrc sfdisk e2fsprogs syslinux"
# Pull kernel + addon modules explicitly so mkimage
# can build modloop-virt with ZFS modules included.
local _k _a
for _k in $kernel_flavors; do
apks="$apks linux-$_k"
for _a in $kernel_addons; do
apks="$apks ${_a}-$_k"
done
done
# Optional: if you *only* care about VMs and want to be ruthless
# about size, you can omit linux-firmware completely.
# apks="$apks linux-firmware" # leave this out for max minimalism
}
EOF
chmod +x mkimg.$PROFILENAME.sh
Build the release
cd ~/aports/scripts
export PROFILENAME=virtzfs
sh mkimage.sh --tag v3.22 --arch x86_64 --profile "$PROFILENAME" --outdir ~/iso --repository https://dl-cdn.alpinelinux.org/alpine/v3.22/main --repository https://dl-cdn.alpinelinux.org/alpine/v3.22/community
sh mkimage.sh \
--tag v3.22 \
--arch x86_64 \
--profile "$PROFILENAME" \
--outdir ~/iso \
--repository https://dl-cdn.alpinelinux.org/alpine/v3.22/main \
--repository https://dl-cdn.alpinelinux.org/alpine/v3.22/community