Alpine, Arch Linux, Fedora, RHEL, NixOS Root on ZFS guide: add CI/CD tests

Remove unmaintained Arch Linux guides.

Signed-off-by: Maurice Zhou <yuchen@apvc.uk>
This commit is contained in:
Maurice Zhou
2023-04-05 12:46:27 +02:00
committed by George Melikov
parent a67d02b8ac
commit 4fb5fb694f
43 changed files with 3655 additions and 2519 deletions

View File

@@ -0,0 +1,79 @@
name: "Test installation guides"
on:
push:
branches:
- master
paths:
- 'docs/Getting Started/NixOS/Root on ZFS.rst'
- 'docs/Getting Started/RHEL-based distro/Root on ZFS.rst'
- 'docs/Getting Started/Alpine Linux/Root on ZFS.rst'
- 'docs/Getting Started/Arch Linux/Root on ZFS.rst'
- 'docs/Getting Started/Fedora/Root on ZFS.rst'
- 'docs/Getting Started/zfs_root_maintenance.rst'
pull_request:
paths:
- 'docs/Getting Started/NixOS/Root on ZFS.rst'
- 'docs/Getting Started/RHEL-based distro/Root on ZFS.rst'
- 'docs/Getting Started/Alpine Linux/Root on ZFS.rst'
- 'docs/Getting Started/Arch Linux/Root on ZFS.rst'
- 'docs/Getting Started/Fedora/Root on ZFS.rst'
- 'docs/Getting Started/zfs_root_maintenance.rst'
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install shellcheck
run: |
sudo apt install --yes shellcheck
- name: Run shellcheck on test entry point
run: |
sh -n ./scripts/zfs_root_guide_test.sh
shellcheck --check-sourced --enable=all --shell=dash --severity=style --format=tty \
./scripts/zfs_root_guide_test.sh
- name: Install pylit
run: |
set -vexuf
sudo apt-get update -y
sudo apt-get install -y python3-pip
sudo pip install pylit
- name: Install ZFS and partitioning tools
run: |
set -vexuf
sudo add-apt-repository --yes universe
sudo apt install --yes zfsutils-linux
sudo apt install --yes qemu-utils
sudo modprobe zfs
sudo apt install --yes git jq parted
sudo apt install --yes whois curl
sudo apt install --yes arch-install-scripts
- name: Test Alpine Linux guide
run: |
sudo ./scripts/zfs_root_guide_test.sh alpine
- name: Test Root on ZFS maintenance guide
run: |
sudo ./scripts/zfs_root_guide_test.sh maintenance
- name: Test Arch Linux guide
run: |
sudo ./scripts/zfs_root_guide_test.sh archlinux
- name: Test Fedora guide
run: |
sudo ./scripts/zfs_root_guide_test.sh fedora
- name: Test RHEL guide
run: |
sudo ./scripts/zfs_root_guide_test.sh rhel
- uses: cachix/install-nix-action@v20
with:
nix_path: nixpkgs=channel:nixos-unstable
- name: Test NixOS guide
run: |
sudo PATH="${PATH}" NIX_PATH="${NIX_PATH}" ./scripts/zfs_root_guide_test.sh nixos
- uses: actions/upload-artifact@v3
with:
name: installation-scripts
path: |
*.sh

View File

@@ -1,11 +0,0 @@
Alpine Linux Root on ZFS
========================
Start from "Preparation".
Contents
--------
.. toctree::
:maxdepth: 2
:glob:
Root on ZFS/*

View File

@@ -0,0 +1,556 @@
.. highlight:: sh
Alpine Linux Root on ZFS
========================
.. ifconfig:: zfs_root_test
::
# For the CI/CD test run of this guide,
# Enable verbose logging of bash shell and fail immediately when
# a commmand fails.
set -vxeuf
distro=${1}
cp /etc/resolv.conf ./"rootfs-${distro}"/etc/resolv.conf
arch-chroot ./"rootfs-${distro}" sh <<-'ZFS_ROOT_GUIDE_TEST'
set -vxeuf
# install alpine setup scripts
apk update
apk add alpine-conf curl
**Customization**
Unless stated otherwise, it is not recommended to customize system
configuration before reboot.
Preparation
---------------------------
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Download latest extended variant of `Alpine Linux
live image
<https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso>`__,
verify `checksum <https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso.asc>`__
and boot from it.
.. code-block:: sh
gpg --auto-key-retrieve --keyserver hkps://keyserver.ubuntu.com --verify alpine-extended-*.asc
dd if=input-file of=output-file bs=1M
.. ifconfig:: zfs_root_test
# check whether the download page exists
# alpine version must be in sync with ci/cd test chroot tarball
curl --head --fail https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso
curl --head --fail https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso.asc
#. Login as root user. There is no password.
#. Configure Internet
.. code-block:: sh
setup-interfaces -r
# You must use "-r" option to start networking services properly
# example:
network interface: wlan0
WiFi name: <ssid>
ip address: dhcp
<enter done to finish network config>
manual netconfig: n
#. If you are using wireless network and it is not shown, see `Alpine
Linux wiki
<https://wiki.alpinelinux.org/wiki/Wi-Fi#wpa_supplicant>`__ for
further details. ``wpa_supplicant`` can be installed with ``apk
add wpa_supplicant`` without internet connection.
#. Configure SSH server
.. code-block:: sh
setup-sshd
# example:
ssh server: openssh
allow root: "prohibit-password" or "yes"
ssh key: "none" or "<public key>"
Configurations set here will be copied verbatim to the installed system.
#. Set root password or ``/root/.ssh/authorized_keys``.
Choose a strong root password, as it will be copied to the
installed system. However, ``authorized_keys`` is not copied.
#. Connect from another computer
.. code-block:: sh
ssh root@192.168.1.91
#. Configure NTP client for time synchronization
.. code-block:: sh
setup-ntp busybox
.. ifconfig:: zfs_root_test
# this step is unnecessary for chroot and returns 1 when executed
#. Set up apk-repo. A list of available mirrors is shown.
Press space bar to continue
.. code-block:: sh
setup-apkrepos
#. Throughout this guide, we use predictable disk names generated by
udev
.. code-block:: sh
apk update
apk add eudev
setup-devd udev
It can be removed after reboot with ``setup-devd mdev && apk del eudev``.
.. ifconfig:: zfs_root_test
# for some reason, udev is extremely slow in chroot
# it is not needed for chroot anyway. so, skip this step
#. Target disk
List available disks with
.. code-block:: sh
find /dev/disk/by-id/
If virtio is used as disk bus, power off the VM and set serial numbers for disk.
For QEMU, use ``-drive format=raw,file=disk2.img,serial=AaBb``.
For libvirt, edit domain XML. See `this page
<https://bugzilla.redhat.com/show_bug.cgi?id=1245013>`__ for examples.
Declare disk array
.. code-block:: sh
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use
.. code-block:: sh
DISK='/dev/disk/by-id/disk1'
.. ifconfig:: zfs_root_test
# for github test run, use chroot and loop devices
DISK="$(losetup -a| grep alpine | cut -f1 -d: | xargs -t -I '{}' printf '{} ')"
# for maintenance guide test
DISK="$(losetup -a| grep maintenance | cut -f1 -d: | xargs -t -I '{}' printf '{} ') ${DISK}"
#. Set a mount point
::
MNT=$(mktemp -d)
#. Set partition size:
Set swap size in GB, set to 1 if you don't want swap to
take up too much space
.. code-block:: sh
SWAPSIZE=4
.. ifconfig:: zfs_root_test
# For the test run, use 1GB swap space to avoid hitting CI/CD
# quota
SWAPSIZE=1
Set how much space should be left at the end of the disk, minimum 1GB
::
RESERVE=1
#. Install ZFS support from live media::
apk add zfs
#. Install bootloader programs and partition tool
::
apk add grub-bios grub-efi parted e2fsprogs cryptsetup util-linux
System Installation
---------------------------
#. Partition the disks.
Note: you must clear all existing partition tables and data structures from the disks,
especially those with existing ZFS pools or mdraid and those that have been used as live media.
Those data structures may interfere with boot process.
For flash-based storage, this can be done by uncommenting the blkdiscard command below:
::
partition_disk () {
local disk="${1}"
#blkdiscard -f "${disk}"
parted --script --align=optimal "${disk}" -- \
mklabel gpt \
mkpart EFI 2MiB 1GiB \
mkpart bpool 1GiB 5GiB \
mkpart rpool 5GiB -$((SWAPSIZE + RESERVE))GiB \
mkpart swap -$((SWAPSIZE + RESERVE))GiB -"${RESERVE}"GiB \
mkpart BIOS 1MiB 2MiB \
set 1 esp on \
set 5 bios_grub on \
set 5 legacy_boot on
partprobe "${disk}"
}
for i in ${DISK}; do
partition_disk "${i}"
done
.. ifconfig:: zfs_root_test
::
# When working with GitHub chroot runners, we are using loop
# devices as installation target. However, the alias support for
# loop device was just introduced in March 2023. See
# https://github.com/systemd/systemd/pull/26693
# For now, we will create the aliases maunally as a workaround
looppart="1 2 3 4 5"
for i in ${DISK}; do
for j in ${looppart}; do
if test -e "${i}p${j}"; then
ln -s "${i}p${j}" "${i}-part${j}"
fi
done
done
#. Setup encrypted swap. This is useful if the available memory is
small::
for i in ${DISK}; do
cryptsetup open --type plain --key-file /dev/random "${i}"-part4 "${i##*/}"-part4
mkswap /dev/mapper/"${i##*/}"-part4
swapon /dev/mapper/"${i##*/}"-part4
done
#. Load ZFS kernel module
.. code-block:: sh
modprobe zfs
#. Create boot pool
::
# shellcheck disable=SC2046
zpool create -d \
-o feature@async_destroy=enabled \
-o feature@bookmarks=enabled \
-o feature@embedded_data=enabled \
-o feature@empty_bpobj=enabled \
-o feature@enabled_txg=enabled \
-o feature@extensible_dataset=enabled \
-o feature@filesystem_limits=enabled \
-o feature@hole_birth=enabled \
-o feature@large_blocks=enabled \
-o feature@lz4_compress=enabled \
-o feature@spacemap_histogram=enabled \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R "${MNT}" \
bpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part2";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
#. Create root pool
::
# shellcheck disable=SC2046
zpool create \
-o ashift=12 \
-o autotrim=on \
-R "${MNT}" \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part3";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted
::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/alpinelinux
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info
.. code-block:: sh
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/alpinelinux
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets,
manage mountpoints with ``mountpoint=legacy``
::
zfs create -o canmount=noauto -o mountpoint=/ rpool/alpinelinux/root
zfs mount rpool/alpinelinux/root
zfs create -o mountpoint=legacy rpool/alpinelinux/home
mkdir "${MNT}"/home
mount -t zfs rpool/alpinelinux/home "${MNT}"/home
zfs create -o mountpoint=legacy rpool/alpinelinux/var
zfs create -o mountpoint=legacy rpool/alpinelinux/var/lib
zfs create -o mountpoint=legacy rpool/alpinelinux/var/log
zfs create -o mountpoint=none bpool/alpinelinux
zfs create -o mountpoint=legacy bpool/alpinelinux/root
mkdir "${MNT}"/boot
mount -t zfs bpool/alpinelinux/root "${MNT}"/boot
mkdir -p "${MNT}"/var/log
mkdir -p "${MNT}"/var/lib
mount -t zfs rpool/alpinelinux/var/lib "${MNT}"/var/lib
mount -t zfs rpool/alpinelinux/var/log "${MNT}"/var/log
#. Format and mount ESP
::
for i in ${DISK}; do
mkfs.vfat -n EFI "${i}"-part1
mkdir -p "${MNT}"/boot/efis/"${i##*/}"-part1
mount -t vfat -o iocharset=iso8859-1 "${i}"-part1 "${MNT}"/boot/efis/"${i##*/}"-part1
done
mkdir -p "${MNT}"/boot/efi
mount -t vfat -o iocharset=iso8859-1 "$(echo "${DISK}" | sed "s|^ *||" | cut -f1 -d' '|| true)"-part1 "${MNT}"/boot/efi
System Configuration
---------------------------
#. Workaround for GRUB to recognize predictable disk names::
export ZPOOL_VDEV_NAME_PATH=YES
#. Install system to disk
.. code-block:: sh
BOOTLOADER=grub setup-disk -k lts -v "${MNT}"
GRUB installation will fail and will be reinstalled later.
The error message about ZFS kernel module can be ignored.
.. ifconfig:: zfs_root_test
# lts kernel will pull in tons of firmware
BOOTLOADER=grub setup-disk -k virt -v "${MNT}"
#. Allow EFI system partition to fail at boot::
sed -i "s|vfat.*rw|vfat rw,nofail|" "${MNT}"/etc/fstab
#. Chroot
.. code-block:: sh
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" sh
.. ifconfig:: zfs_root_test
::
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" sh <<-'ZFS_ROOT_NESTED_CHROOT'
set -vxeuf
#. Apply GRUB workaround
::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
# shellcheck disable=SC1091
. /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
# BusyBox stat does not recognize zfs, replace fs detection with ZFS
sed -i 's|stat -f -c %T /|echo zfs|' /usr/sbin/grub-mkconfig
# grub-probe fails to identify fs mounted at /boot
BOOT_DEVICE=$(zpool status -P bpool | grep -- -part2 | head -n1 | sed "s|.*/dev*|/dev|" | sed "s|part2.*|part2|")
sed -i "s|GRUB_DEVICE_BOOT=.*|GRUB_DEVICE_BOOT=${BOOT_DEVICE}|" /usr/sbin/grub-mkconfig
The ``sed`` workaround for ``grub-mkconfig`` needs to be applied
for every GRUB update, as the update will overwrite the changes.
#. Install GRUB::
mkdir -p /boot/efi/alpine/grub-bootdir/i386-pc/
mkdir -p /boot/efi/alpine/grub-bootdir/x86_64-efi/
for i in ${DISK}; do
grub-install --target=i386-pc --boot-directory \
/boot/efi/alpine/grub-bootdir/i386-pc/ "${i}"
done
grub-install --target x86_64-efi --boot-directory \
/boot/efi/alpine/grub-bootdir/x86_64-efi/ --efi-directory \
/boot/efi --bootloader-id alpine --removable
if test -d /sys/firmware/efi/efivars/; then
apk add efibootmgr
grub-install --target x86_64-efi --boot-directory \
/boot/efi/alpine/grub-bootdir/x86_64-efi/ --efi-directory \
/boot/efi --bootloader-id alpine
fi
#. Generate GRUB menu::
mkdir -p /boot/grub
grub-mkconfig -o /boot/grub/grub.cfg
cp /boot/grub/grub.cfg \
/boot/efi/alpine/grub-bootdir/x86_64-efi/grub/grub.cfg
cp /boot/grub/grub.cfg \
/boot/efi/alpine/grub-bootdir/i386-pc/grub/grub.cfg
.. ifconfig:: zfs_root_test
::
find /boot/efis/ -name "grub.cfg" -print0 \
| xargs -t -0I '{}' grub-script-check -v '{}'
#. For both legacy and EFI booting: mirror ESP content::
espdir=$(mktemp -d)
find /boot/efi/ -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' cp -r '{}' "${espdir}"
find "${espdir}" -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' sh -vxc "find /boot/efis/ -maxdepth 1 -mindepth 1 -type d -print0 | xargs -t -0I '[]' cp -r '{}' '[]'"
.. ifconfig:: zfs_root_test
::
##################################################
#
#
# MAINTENANCE SCRIPT ENTRY POINT
# DO NOT TOUCH
#
#
#################################################
#. Exit chroot
.. code-block:: sh
exit
.. ifconfig:: zfs_root_test
# nested chroot ends here
ZFS_ROOT_NESTED_CHROOT
.. ifconfig:: zfs_root_test
::
# list contents of boot dir to confirm
# that the mirroring succeeded
find "${MNT}"/boot/efis/ -type d > list_of_efi_dirs
for i in ${DISK}; do
if ! grep "${i##*/}-part1/efi\|${i##*/}-part1/EFI" list_of_efi_dirs; then
echo "disk ${i} not found in efi system partition, installation error";
cat list_of_efi_dirs
exit 1
fi
done
#. Unmount filesystems and create initial system snapshot
You can later create a boot environment from this snapshot.
See `Root on ZFS maintenance page <../zfs_root_maintenance.html>`__.
::
umount -Rl "${MNT}"
zfs snapshot -r rpool@initial-installation
zfs snapshot -r bpool@initial-installation
zpool export -a
#. Reboot
.. code-block:: sh
reboot
.. ifconfig:: zfs_root_test
# chroot ends here
ZFS_ROOT_GUIDE_TEST

View File

@@ -1,99 +0,0 @@
.. highlight:: sh
Preparation
======================
.. contents:: Table of Contents
:local:
#. Disable Secure Boot. ZFS modules can not be loaded if
Secure Boot is enabled.
#. Download latest extended variant of `Alpine Linux
live image
<https://dl-cdn.alpinelinux.org/alpine/latest-stable/releases/x86_64/>`__
and boot from it.
#. Login as root user. There is no password.
#. Configure Internet::
setup-interfaces -r
# example:
network interface: wlan0
WiFi name: <ssid>
ip address: dhcp
<enter done to finish network config>
manual netconfig: n
#. Configure SSH server::
setup-sshd
# example:
ssh server: openssh
allow root: "prohibit-password" or "yes"
ssh key: "none" or "<public key>"
Configurations set here will be copied verbatim to the installed system.
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Connect from another computer::
ssh root@192.168.1.19
#. Configure NTP client for time synchronization::
setup-ntp chrony
#. Pick a mirror from `Alpine Mirrors <https://mirrors.alpinelinux.org/>`__
and add it to package manager configuration::
tee -a /etc/apk/repositories <<EOF
https://dl-cdn.alpinelinux.org/alpine/latest-stable/community/
https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/
EOF
#. Throughout this guide, we use predictable disk names generated by udev::
apk update
apk add eudev
setup-devd udev
It can be removed after reboot with ``setup-devd mdev && apk del eudev``.
#. Target disk
List available disks with::
find /dev/disk/by-id/
If using virtio as disk bus, use ``/dev/disk/by-path/``.
Declare disk array::
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use::
DISK='/dev/disk/by-id/disk1'
#. Set partition size:
Set swap size, set to 1 if you don't want swap to
take up too much space::
INST_PARTSIZE_SWAP=4
Root pool size, use all remaining disk space if not set::
INST_PARTSIZE_RPOOL=
#. Install ZFS support and partition tool::
apk add zfs zfs-lts sgdisk e2fsprogs cryptsetup util-linux
modprobe zfs
Many errors about firmware will appear. They are
safe to be ignored.
#. Install bootloader for both legacy boot and UEFI::
apk add grub-bios grub-efi

View File

@@ -1,256 +0,0 @@
.. highlight:: sh
System Installation
======================
.. contents:: Table of Contents
:local:
#. Partition the disks::
for i in ${DISK}; do
# wipe flash-based storage device to improve
# performance.
# ALL DATA WILL BE LOST
# blkdiscard -f $i
sgdisk --zap-all $i
sgdisk -n1:1M:+1G -t1:EF00 $i
sgdisk -n2:0:+4G -t2:BE00 $i
sgdisk -n4:0:+${INST_PARTSIZE_SWAP}G -t4:8200 $i
if test -z $INST_PARTSIZE_RPOOL; then
sgdisk -n3:0:0 -t3:BF00 $i
else
sgdisk -n3:0:+${INST_PARTSIZE_RPOOL}G -t3:BF00 $i
fi
sgdisk -a1 -n5:24K:+1000K -t5:EF02 $i
sync && udevadm settle && sleep 3
cryptsetup open --type plain --key-file /dev/random $i-part4 ${i##*/}-part4
mkswap /dev/mapper/${i##*/}-part4
swapon /dev/mapper/${i##*/}-part4
done
#. Create boot pool::
zpool create -d \
-o feature@async_destroy=enabled \
-o feature@bookmarks=enabled \
-o feature@embedded_data=enabled \
-o feature@empty_bpobj=enabled \
-o feature@enabled_txg=enabled \
-o feature@extensible_dataset=enabled \
-o feature@filesystem_limits=enabled \
-o feature@hole_birth=enabled \
-o feature@large_blocks=enabled \
-o feature@lz4_compress=enabled \
-o feature@spacemap_histogram=enabled \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R /mnt \
bpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part2 ";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
#. Create root pool::
zpool create \
-o ashift=12 \
-o autotrim=on \
-R /mnt \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part3 ";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/alpinelinux
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info::
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/alpinelinux
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets, let Alpinelinux declaratively
manage mountpoints with ``mountpoint=legacy``::
zfs create -o mountpoint=/ -o canmount=noauto rpool/alpinelinux/root
zfs mount rpool/alpinelinux/root
zfs create -o mountpoint=legacy rpool/alpinelinux/home
mkdir /mnt/home
mount -t zfs rpool/alpinelinux/home /mnt/home
zfs create -o mountpoint=legacy rpool/alpinelinux/var
zfs create -o mountpoint=legacy rpool/alpinelinux/var/lib
zfs create -o mountpoint=legacy rpool/alpinelinux/var/log
zfs create -o mountpoint=none bpool/alpinelinux
zfs create -o mountpoint=legacy bpool/alpinelinux/root
mkdir /mnt/boot
mount -t zfs bpool/alpinelinux/root /mnt/boot
mkdir -p /mnt/var/log
mkdir -p /mnt/var/lib
mount -t zfs rpool/alpinelinux/var/lib /mnt/var/lib
mount -t zfs rpool/alpinelinux/var/log /mnt/var/log
#. Format and mount ESP::
for i in ${DISK}; do
mkfs.vfat -n EFI ${i}-part1
mkdir -p /mnt/boot/efis/${i##*/}-part1
mount -t vfat ${i}-part1 /mnt/boot/efis/${i##*/}-part1
done
mkdir -p /mnt/boot/efi
mount -t vfat $(echo $DISK | cut -f1 -d' ')-part1 /mnt/boot/efi
#. By default ``setup-disk`` command does not support zfs and will refuse to run,
add zfs support::
sed -i 's|supported="ext|supported="zfs ext|g' /sbin/setup-disk
#. Workaround for GRUB to recognize predictable disk names::
export ZPOOL_VDEV_NAME_PATH=YES
#. Install system to disk::
BOOTLOADER=grub setup-disk -v /mnt
GRUB installation will fail and will be reinstalled later.
#. Allow EFI system partition to fail at boot::
sed -i "s|vfat.*rw|vfat rw,nofail|" /mnt/etc/fstab
#. Chroot::
m='/dev /proc /sys'
for i in $m; do mount --rbind $i /mnt/$i; done
chroot /mnt /usr/bin/env DISK="$DISK" sh
#. Apply GRUB workaround::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
source /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
# BusyBox stat does not recognize zfs, replace fs detection with ZFS
sed -i 's|stat -f -c %T /|echo zfs|' /usr/sbin/grub-mkconfig
# grub-probe fails to identify fs mounted at /boot
BOOT_DEVICE=$(zpool status -P bpool | grep -- -part2 | head -n1 | sed "s|.*/dev*|/dev|" | sed "s|part2.*|part2|")
sed -i "s|GRUB_DEVICE_BOOT=.*|GRUB_DEVICE_BOOT=${BOOT_DEVICE}|" /usr/sbin/grub-mkconfig
The ``sed`` workaround for ``grub-mkconfig`` needs to be applied
for every GRUB update, as the update will overwrite the changes.
#. Install GRUB::
mkdir -p /boot/efi/alpine/grub-bootdir/i386-pc/
mkdir -p /boot/efi/alpine/grub-bootdir/x86_64-efi/
for i in ${DISK}; do
grub-install --target=i386-pc --boot-directory \
/boot/efi/alpine/grub-bootdir/i386-pc/ $i
done
grub-install --target x86_64-efi --boot-directory \
/boot/efi/alpine/grub-bootdir/x86_64-efi/ --efi-directory \
/boot/efi --bootloader-id alpine --removable
#. Generate GRUB menu::
grub-mkconfig -o /boot/efi/alpine/grub-bootdir/x86_64-efi/grub/grub.cfg
grub-mkconfig -o /boot/efi/alpine/grub-bootdir/i386-pc/grub/grub.cfg
mkdir -p /boot/grub
grub-mkconfig -o /boot/grub/grub.cfg
#. For both legacy and EFI booting: mirror ESP content::
ESP_MIRROR=$(mktemp -d)
cp -r /boot/efi/EFI $ESP_MIRROR
for i in /boot/efis/*; do
cp -r $ESP_MIRROR/EFI $i
done
rm -rf $ESP_MIRROR
#. Exit chroot::
exit
#. Unmount filesystems::
umount -Rl /mnt
zpool export -a
#. Reboot::
reboot
Post installaion
~~~~~~~~~~~~~~~~
#. Setup graphical desktop::
setup-desktop
#. Configure swap.
#. You can create a snapshot of the newly installed
system for later rollback,
see `this page <https://openzfs.github.io/openzfs-docs/Getting%20Started/Arch%20Linux/Root%20on%20ZFS/6-create-boot-environment.html>`__.

View File

@@ -26,13 +26,8 @@ see below.
Root on ZFS
-----------
ZFS can be used as root file system for Alpine Linux.
An installation guide is available.
Start from "Preparation".
.. toctree::
:maxdepth: 1
:glob:
:maxdepth: 1
:glob:
Root on ZFS/*
*

View File

@@ -1,32 +0,0 @@
.. highlight:: sh
archzfs repo
============
.. contents:: Table of Contents
:local:
ZFS packages are provided by the third-party
`archzfs repository <https://github.com/archzfs/archzfs>`__.
You can use it as follows.
#. Import keys of archzfs repository::
curl -L https://archzfs.com/archzfs.gpg | pacman-key -a -
pacman-key --lsign-key $(curl -L https://git.io/JsfVS)
curl -L https://git.io/Jsfw2 > /etc/pacman.d/mirrorlist-archzfs
#. Add archzfs repository::
tee -a /etc/pacman.conf <<- 'EOF'
#[archzfs-testing]
#Include = /etc/pacman.d/mirrorlist-archzfs
[archzfs]
Include = /etc/pacman.d/mirrorlist-archzfs
EOF
#. Update pacman database::
pacman -Sy

View File

@@ -1,75 +0,0 @@
.. highlight:: sh
zfs-linux
============
.. contents:: Table of Contents
:local:
``zfs-linux*`` packages provides prebuilt modules.
Prebuilt modules are kernel-specific, i.e.,
module built for 5.11.1 is incompatible
with 5.11.2.
For this reason, ``zfs-linux*``
depends on a particular kernel version.
Example: if linux=5.11.2 is available, but
zfs-linux=5.11.2 is not available, you can not
upgrade to linux=5.11.2 until zfs-linux=5.11.2
came out.
``zfs-linux*`` is recommended for users who are using stock kernels
from Arch Linux repo and can accept kernel update delays.
You can also switch between ``zfs-linux*`` and ``zfs-dkms``
packages later.
For other kernels or Arch-based distros, use zfs-dkms package.
Available packages
~~~~~~~~~~~~~~~~~~~
* zfs-linux
* zfs-linux-lts
* zfs-linux-zen
* zfs-linux-hardened
Installation
~~~~~~~~~~~~
#. Check kernel variant::
INST_LINVAR=$(sed 's|.*linux|linux|' /proc/cmdline | sed 's|.img||g' | awk '{ print $1 }')
#. Check compatible kernel version::
INST_LINVER=$(pacman -Si zfs-${INST_LINVAR} | grep 'Depends On' | sed "s|.*${INST_LINVAR}=||" | awk '{ print $1 }')
#. Install kernel. Download from archive if kernel is not available::
if [ ${INST_LINVER} = \
$(pacman -Si ${INST_LINVAR} | grep Version | awk '{ print $3 }') ]; then
pacman -S --noconfirm --needed ${INST_LINVAR}
else
pacman -U --noconfirm --needed \
https://archive.archlinux.org/packages/l/${INST_LINVAR}/${INST_LINVAR}-${INST_LINVER}-x86_64.pkg.tar.zst
fi
#. Install zfs-linux::
pacman -Sy zfs-${INST_LINVAR}
#. Ignore kernel updates::
sed -i 's/#IgnorePkg/IgnorePkg/' /etc/pacman.conf
sed -i "/^IgnorePkg/ s/$/ ${INST_LINVAR} ${INST_LINVAR}-headers zfs-${INST_LINVAR} zfs-utils/" /etc/pacman.conf
#. Load kernel module::
modprobe zfs
Update kernel
~~~~~~~~~~~~~
To update kernel, run::
INST_LINVAR=$(sed 's|.*linux|linux|' /proc/cmdline | sed 's|.img||g' | awk '{ print $1 }')
pacman -Sy --needed --noconfirm ${INST_LINVAR} ${INST_LINVAR}-headers zfs-${INST_LINVAR} zfs-utils

View File

@@ -1,139 +0,0 @@
.. highlight:: sh
zfs-dkms
============
.. contents:: Table of Contents
:local:
``zfs-dkms`` package provides Dynamic Kernel Module Support.
It will automatically build ZFS kernel modules for compatible
kernels.
However, there are several disadvantages:
- slow to build
- little warning when DKMS build fails
``zfs-dkms`` is required for users who are using custom kernels or
do not accept delays for kernel updates. This package is also required for derivative
distros such as `Artix Linux <https://artixlinux.org>`__.
Installation
~~~~~~~~~~~~
#. Check kernel variant::
INST_LINVAR=$(sed 's|.*linux|linux|' /proc/cmdline | sed 's|.img||g' | awk '{ print $1 }')
If you are using live image, use the hard-coded value::
#INST_LINVAR=linux
#. Check kernel version::
INST_LINVER=$(pacman -Qi ${INST_LINVAR} | grep Version | awk '{ print $3 }')
#. Install kernel headers::
if [ "${INST_LINVER}" = \
"$(pacman -Si ${INST_LINVAR}-headers | grep Version | awk '{ print $3 }')" ]; then
pacman -S --noconfirm --needed ${INST_LINVAR}-headers
else
pacman -U --noconfirm --needed \
https://archive.archlinux.org/packages/l/${INST_LINVAR}-headers/${INST_LINVAR}-headers-${INST_LINVER}-x86_64.pkg.tar.zst
fi
#. Install zfs-dkms::
pacman -Sy --needed --noconfirm zfs-dkms glibc
If pacman output contains the following error message,
then the kernel needs a `downgrade <#zfs-dkms-compatible-kernel>`__,
or you can try ``zfs-dkms-git`` package::
(3/4) Install DKMS modules
==> dkms install --no-depmod -m zfs -v 2.0.4 -k 5.12.0-rc5-1-git-00030-gd19cc4bfbff1
configure: error:
*** None of the expected "capability" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: zfs-2.0.4-1
*** Compatible Kernels: 3.10 - 5.11
#. Ignore kernel package from updates::
sed -i 's/#IgnorePkg/IgnorePkg/' /etc/pacman.conf
sed -i "/^IgnorePkg/ s/$/ ${INST_LINVAR} ${INST_LINVAR}-headers/" /etc/pacman.conf
#. Load kernel module::
modprobe zfs
Update kernel
~~~~~~~~~~~~~
#. Check kernel variant::
INST_LINVAR=$(sed 's|.*linux|linux|' /proc/cmdline | sed 's|.img||g' | awk '{ print $1 }')
#. Install zfs-dkms::
pacman -Sy --needed $INST_LINVAR $INST_LINVAR-headers zfs-dkms glibc
If pacman output contains the following error message,
then the kernel needs a `downgrade <#zfs-dkms-compatible-kernel>`__,
or you can try ``zfs-dkms-git`` package::
(3/4) Install DKMS modules
==> dkms install --no-depmod -m zfs -v 2.0.4 -k 5.12.0-rc5-1-git-00030-gd19cc4bfbff1
configure: error:
*** None of the expected "capability" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: zfs-2.0.4-1
*** Compatible Kernels: 3.10 - 5.11
Install compatible kernel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If the installed kernel is not
compatible with ZFS, a kernel downgrade
is needed.
#. Choose kernel variant. Available variants are:
* linux
* linux-lts
* linux-zen
* linux-hardened
::
INST_LINVAR=linux
#. Install kernels available when the package was built. Check build date::
DKMS_DATE=$(pacman -Syi zfs-dkms \
| grep 'Build Date' \
| sed 's/.*: //' \
| LC_ALL=C xargs -i{} date -d {} -u +%Y/%m/%d)
#. Check kernel version::
INST_LINVER=$(curl https://archive.archlinux.org/repos/${DKMS_DATE}/core/os/x86_64/ \
| grep \"${INST_LINVAR}-'[0-9]' \
| grep -v sig \
| sed "s|.*$INST_LINVAR-||" \
| sed "s|-x86_64.*||")
#. Install compatible kernel and headers::
pacman -U \
https://archive.archlinux.org/packages/l/${INST_LINVAR}/${INST_LINVAR}-${INST_LINVER}-x86_64.pkg.tar.zst \
https://archive.archlinux.org/packages/l/${INST_LINVAR}-headers/${INST_LINVAR}-headers-${INST_LINVER}-x86_64.pkg.tar.zst
#. Continue from `installation <#installation>`__.

View File

@@ -1,11 +0,0 @@
Arch Linux Root on ZFS
======================
Start from "Preparation".
Contents
--------
.. toctree::
:maxdepth: 2
:glob:
Root on ZFS/*

View File

@@ -0,0 +1,666 @@
.. highlight:: sh
.. ifconfig:: zfs_root_test
::
# For the CI/CD test run of this guide,
# Enable verbose logging of bash shell and fail immediately when
# a commmand fails.
set -vxeuf
distro=${1}
cp /etc/resolv.conf ./"rootfs-${distro}"/etc/resolv.conf
arch-chroot ./"rootfs-${distro}" sh <<-'ZFS_ROOT_GUIDE_TEST'
set -vxeuf
# install alpine setup scripts
apk update
apk add alpine-conf curl
.. In this document, there are three types of code-block markups:
``::`` are commands intended for both the vm test and the users
``.. ifconfig:: zfs_root_test`` are commands intended only for vm test
``.. code-block:: sh`` are commands intended only for users
Arch Linux Root on ZFS
=======================================
**Customization**
Unless stated otherwise, it is not recommended to customize system
configuration before reboot.
Preparation
---------------------------
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Because the kernel of latest Live CD might be incompatible with
ZFS, we will use Alpine Linux Extended, which ships with ZFS by
default.
Download latest extended variant of `Alpine Linux
live image
<https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso>`__,
verify `checksum <https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso.asc>`__
and boot from it.
.. code-block:: sh
gpg --auto-key-retrieve --keyserver hkps://keyserver.ubuntu.com --verify alpine-extended-*.asc
dd if=input-file of=output-file bs=1M
.. ifconfig:: zfs_root_test
# check whether the download page exists
# alpine version must be in sync with ci/cd test chroot tarball
#. Login as root user. There is no password.
#. Configure Internet
.. code-block:: sh
setup-interfaces -r
# You must use "-r" option to start networking services properly
# example:
network interface: wlan0
WiFi name: <ssid>
ip address: dhcp
<enter done to finish network config>
manual netconfig: n
#. If you are using wireless network and it is not shown, see `Alpine
Linux wiki
<https://wiki.alpinelinux.org/wiki/Wi-Fi#wpa_supplicant>`__ for
further details. ``wpa_supplicant`` can be installed with ``apk
add wpa_supplicant`` without internet connection.
#. Configure SSH server
.. code-block:: sh
setup-sshd
# example:
ssh server: openssh
allow root: "prohibit-password" or "yes"
ssh key: "none" or "<public key>"
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Connect from another computer
.. code-block:: sh
ssh root@192.168.1.91
#. Configure NTP client for time synchronization
.. code-block:: sh
setup-ntp busybox
.. ifconfig:: zfs_root_test
# this step is unnecessary for chroot and returns 1 when executed
#. Set up apk-repo. A list of available mirrors is shown.
Press space bar to continue
.. code-block:: sh
setup-apkrepos
#. Throughout this guide, we use predictable disk names generated by
udev
.. code-block:: sh
apk update
apk add eudev
setup-devd udev
.. ifconfig:: zfs_root_test
# for some reason, udev is extremely slow in chroot
# it is not needed for chroot anyway. so, skip this step
#. Target disk
List available disks with
.. code-block:: sh
find /dev/disk/by-id/
If virtio is used as disk bus, power off the VM and set serial numbers for disk.
For QEMU, use ``-drive format=raw,file=disk2.img,serial=AaBb``.
For libvirt, edit domain XML. See `this page
<https://bugzilla.redhat.com/show_bug.cgi?id=1245013>`__ for examples.
Declare disk array
.. code-block:: sh
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use
.. code-block:: sh
DISK='/dev/disk/by-id/disk1'
.. ifconfig:: zfs_root_test
# for github test run, use chroot and loop devices
DISK="$(losetup -a| grep archlinux | cut -f1 -d: | xargs -t -I '{}' printf '{} ')"
#. Set a mount point
::
MNT=$(mktemp -d)
#. Set partition size:
Set swap size in GB, set to 1 if you don't want swap to
take up too much space
.. code-block:: sh
SWAPSIZE=4
.. ifconfig:: zfs_root_test
# For the test run, use 1GB swap space to avoid hitting CI/CD
# quota
SWAPSIZE=1
Set how much space should be left at the end of the disk, minimum 1GB
::
RESERVE=1
#. Install ZFS support from live media::
apk add zfs
#. Install partition tool
::
apk add parted e2fsprogs cryptsetup util-linux
System Installation
---------------------------
#. Partition the disks.
Note: you must clear all existing partition tables and data structures from the disks,
especially those with existing ZFS pools or mdraid and those that have been used as live media.
Those data structures may interfere with boot process.
For flash-based storage, this can be done by uncommenting the blkdiscard command below:
::
partition_disk () {
local disk="${1}"
#blkdiscard -f "${disk}"
parted --script --align=optimal "${disk}" -- \
mklabel gpt \
mkpart EFI 2MiB 1GiB \
mkpart bpool 1GiB 5GiB \
mkpart rpool 5GiB -$((SWAPSIZE + RESERVE))GiB \
mkpart swap -$((SWAPSIZE + RESERVE))GiB -"${RESERVE}"GiB \
mkpart BIOS 1MiB 2MiB \
set 1 esp on \
set 5 bios_grub on \
set 5 legacy_boot on
partprobe "${disk}"
}
for i in ${DISK}; do
partition_disk "${i}"
done
.. ifconfig:: zfs_root_test
::
# When working with GitHub chroot runners, we are using loop
# devices as installation target. However, the alias support for
# loop device was just introduced in March 2023. See
# https://github.com/systemd/systemd/pull/26693
# For now, we will create the aliases maunally as a workaround
looppart="1 2 3 4 5"
for i in ${DISK}; do
for j in ${looppart}; do
if test -e "${i}p${j}"; then
ln -s "${i}p${j}" "${i}-part${j}"
fi
done
done
#. Setup encrypted swap. This is useful if the available memory is
small::
for i in ${DISK}; do
cryptsetup open --type plain --key-file /dev/random "${i}"-part4 "${i##*/}"-part4
mkswap /dev/mapper/"${i##*/}"-part4
swapon /dev/mapper/"${i##*/}"-part4
done
#. Load ZFS kernel module
.. code-block:: sh
modprobe zfs
#. Create boot pool
::
# shellcheck disable=SC2046
zpool create -d \
-o feature@async_destroy=enabled \
-o feature@bookmarks=enabled \
-o feature@embedded_data=enabled \
-o feature@empty_bpobj=enabled \
-o feature@enabled_txg=enabled \
-o feature@extensible_dataset=enabled \
-o feature@filesystem_limits=enabled \
-o feature@hole_birth=enabled \
-o feature@large_blocks=enabled \
-o feature@lz4_compress=enabled \
-o feature@spacemap_histogram=enabled \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R "${MNT}" \
bpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part2";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
#. Create root pool
::
# shellcheck disable=SC2046
zpool create \
-o ashift=12 \
-o autotrim=on \
-R "${MNT}" \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part3";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted
::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/archlinux
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info
.. code-block:: sh
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/archlinux
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets,
manage mountpoints with ``mountpoint=legacy``
::
zfs create -o canmount=noauto -o mountpoint=/ rpool/archlinux/root
zfs mount rpool/archlinux/root
zfs create -o mountpoint=legacy rpool/archlinux/home
mkdir "${MNT}"/home
mount -t zfs rpool/archlinux/home "${MNT}"/home
zfs create -o mountpoint=legacy rpool/archlinux/var
zfs create -o mountpoint=legacy rpool/archlinux/var/lib
zfs create -o mountpoint=legacy rpool/archlinux/var/log
zfs create -o mountpoint=none bpool/archlinux
zfs create -o mountpoint=legacy bpool/archlinux/root
mkdir "${MNT}"/boot
mount -t zfs bpool/archlinux/root "${MNT}"/boot
mkdir -p "${MNT}"/var/log
mkdir -p "${MNT}"/var/lib
mount -t zfs rpool/archlinux/var/lib "${MNT}"/var/lib
mount -t zfs rpool/archlinux/var/log "${MNT}"/var/log
#. Format and mount ESP
::
for i in ${DISK}; do
mkfs.vfat -n EFI "${i}"-part1
mkdir -p "${MNT}"/boot/efis/"${i##*/}"-part1
mount -t vfat -o iocharset=iso8859-1 "${i}"-part1 "${MNT}"/boot/efis/"${i##*/}"-part1
done
mkdir -p "${MNT}"/boot/efi
mount -t vfat -o iocharset=iso8859-1 "$(echo "${DISK}" | sed "s|^ *||" | cut -f1 -d' '|| true)"-part1 "${MNT}"/boot/efi
System Configuration
---------------------------
#. Download and extract minimal Arch Linux root filesystem::
apk add curl
curl --fail-early --fail -L \
https://america.archive.pkgbuild.com/iso/2023.04.01/archlinux-bootstrap-x86_64.tar.gz \
-o rootfs.tar.gz
curl --fail-early --fail -L \
https://america.archive.pkgbuild.com/iso/2023.04.01/archlinux-bootstrap-x86_64.tar.gz.sig \
-o rootfs.tar.gz.sig
apk add gnupg
gpg --auto-key-retrieve --keyserver hkps://keyserver.ubuntu.com --verify rootfs.tar.gz.sig
ln -s "${MNT}" "${MNT}"/root.x86_64
tar x -C "${MNT}" -af rootfs.tar.gz root.x86_64
#. Enable community repo
.. code-block:: sh
sed -i '/edge/d' /etc/apk/repositories
sed -i -E 's/#(.*)community/\1community/' /etc/apk/repositories
#. Generate fstab::
apk add arch-install-scripts
genfstab -t PARTUUID "${MNT}" \
| grep -v swap \
| sed "s|vfat.*rw|vfat rw,x-systemd.idle-timeout=1min,x-systemd.automount,noauto,nofail|" \
> "${MNT}"/etc/fstab
#. Chroot
.. code-block:: sh
cp /etc/resolv.conf "${MNT}"/etc/resolv.conf
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" bash
.. ifconfig:: zfs_root_test
::
cp /etc/resolv.conf "${MNT}"/etc/resolv.conf
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" bash <<-'ZFS_ROOT_NESTED_CHROOT'
set -vxeuf
#. Add archzfs repo to pacman config
::
pacman-key --init
pacman-key --refresh-keys
pacman-key --populate
curl --fail-early --fail -L https://archzfs.com/archzfs.gpg \
| pacman-key -a - --gpgdir /etc/pacman.d/gnupg
pacman-key \
--lsign-key \
--gpgdir /etc/pacman.d/gnupg \
DDF7DB817396A49B2A2723F7403BD972F75D9D76
tee -a /etc/pacman.d/mirrorlist-archzfs <<- 'EOF'
## See https://github.com/archzfs/archzfs/wiki
## France
#,Server = https://archzfs.com/$repo/$arch
## Germany
#,Server = https://mirror.sum7.eu/archlinux/archzfs/$repo/$arch
#,Server = https://mirror.biocrafting.net/archlinux/archzfs/$repo/$arch
## India
#,Server = https://mirror.in.themindsmaze.com/archzfs/$repo/$arch
## United States
#,Server = https://zxcvfdsa.com/archzfs/$repo/$arch
EOF
tee -a /etc/pacman.conf <<- 'EOF'
#[archzfs-testing]
#Include = /etc/pacman.d/mirrorlist-archzfs
#,[archzfs]
#,Include = /etc/pacman.d/mirrorlist-archzfs
EOF
# this #, prefix is a workaround for ci/cd tests
# remove them
sed -i 's|#,||' /etc/pacman.d/mirrorlist-archzfs
sed -i 's|#,||' /etc/pacman.conf
sed -i 's|^#||' /etc/pacman.d/mirrorlist
#. Install base packages::
pacman -Sy
pacman -S --noconfirm mg mandoc grub efibootmgr mkinitcpio
kernel_compatible_with_zfs="$(pacman -Si zfs-linux \
| grep 'Depends On' \
| sed "s|.*linux=||" \
| awk '{ print $1 }')"
pacman -U --noconfirm https://america.archive.pkgbuild.com/packages/l/linux/linux-"${kernel_compatible_with_zfs}"-x86_64.pkg.tar.zst
#. Install zfs packages::
pacman -S --noconfirm zfs-linux zfs-utils
#. Configure mkinitcpio::
sed -i 's|filesystems|zfs filesystems|' /etc/mkinitcpio.conf
mkinitcpio -P
#. For physical machine, install firmware
.. code-block:: sh
pacman -S linux-firmware intel-ucode amd-ucode
#. Enable internet time synchronisation::
systemctl enable systemd-timesyncd
#. Generate host id::
zgenhostid -f -o /etc/hostid
#. Generate locales::
echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
locale-gen
#. Set locale, keymap, timezone, hostname
::
rm -f /etc/localtime
systemd-firstboot \
--force \
--locale=en_US.UTF-8 \
--timezone=Etc/UTC \
--hostname=testhost \
--keymap=us
#. Set root passwd
::
printf 'root:yourpassword' | chpasswd
Bootloader
---------------------------
#. Apply GRUB workaround
::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
# shellcheck disable=SC1091
. /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
This workaround needs to be applied for every GRUB update, as the
update will overwrite the changes.
#. Install GRUB::
mkdir -p /boot/efi/archlinux/grub-bootdir/i386-pc/
mkdir -p /boot/efi/archlinux/grub-bootdir/x86_64-efi/
for i in ${DISK}; do
grub-install --target=i386-pc --boot-directory \
/boot/efi/archlinux/grub-bootdir/i386-pc/ "${i}"
done
grub-install --target x86_64-efi --boot-directory \
/boot/efi/archlinux/grub-bootdir/x86_64-efi/ --efi-directory \
/boot/efi --bootloader-id archlinux --removable
if test -d /sys/firmware/efi/efivars/; then
grub-install --target x86_64-efi --boot-directory \
/boot/efi/archlinux/grub-bootdir/x86_64-efi/ --efi-directory \
/boot/efi --bootloader-id archlinux
fi
#. Import both bpool and rpool at boot::
echo 'GRUB_CMDLINE_LINUX="zfs_import_dir=/dev/"' >> /etc/default/grub
#. Generate GRUB menu::
mkdir -p /boot/grub
grub-mkconfig -o /boot/grub/grub.cfg
cp /boot/grub/grub.cfg \
/boot/efi/archlinux/grub-bootdir/x86_64-efi/grub/grub.cfg
cp /boot/grub/grub.cfg \
/boot/efi/archlinux/grub-bootdir/i386-pc/grub/grub.cfg
.. ifconfig:: zfs_root_test
::
find /boot/efis/ -name "grub.cfg" -print0 \
| xargs -t -0I '{}' grub-script-check -v '{}'
#. For both legacy and EFI booting: mirror ESP content::
espdir=$(mktemp -d)
find /boot/efi/ -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' cp -r '{}' "${espdir}"
find "${espdir}" -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' sh -vxc "find /boot/efis/ -maxdepth 1 -mindepth 1 -type d -print0 | xargs -t -0I '[]' cp -r '{}' '[]'"
#. Exit chroot
.. code-block:: sh
exit
.. ifconfig:: zfs_root_test
# nested chroot ends here
ZFS_ROOT_NESTED_CHROOT
.. ifconfig:: zfs_root_test
::
# list contents of boot dir to confirm
# that the mirroring succeeded
find "${MNT}"/boot/efis/ -type d > list_of_efi_dirs
for i in ${DISK}; do
if ! grep "${i##*/}-part1/efi\|${i##*/}-part1/EFI" list_of_efi_dirs; then
echo "disk ${i} not found in efi system partition, installation error";
cat list_of_efi_dirs
exit 1
fi
done
#. Unmount filesystems and create initial system snapshot
You can later create a boot environment from this snapshot.
See `Root on ZFS maintenance page <../zfs_root_maintenance.html>`__.
::
umount -Rl "${MNT}"
zfs snapshot -r rpool@initial-installation
zfs snapshot -r bpool@initial-installation
#. Export all pools
.. code-block:: sh
zpool export -a
.. ifconfig:: zfs_root_test
# we are now inside a chroot, where the export will fail
# export pools when we are outside chroot
#. Reboot
.. code-block:: sh
reboot
.. ifconfig:: zfs_root_test
# chroot ends here
ZFS_ROOT_GUIDE_TEST

View File

@@ -1,108 +0,0 @@
.. highlight:: sh
Preparation
======================
.. contents:: Table of Contents
:local:
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Download an older `Arch Linux live image
<https://geo.mirror.pkgbuild.com/iso/>`__ and boot from it. Latest
image may lack a compatible kernel.
#. Connect to the Internet.
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Start SSH server::
systemctl restart sshd
#. Connect from another computer::
ssh root@192.168.1.19
#. Use bash shell. Other shells are not tested::
bash
#. Target disk
List available disks with::
find /dev/disk/by-id/
If using virtio as disk bus, use ``/dev/disk/by-path/``.
Declare disk array::
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use::
DISK='/dev/disk/by-id/disk1'
#. Set partition size:
Set swap size, set to 1 if you don't want swap to
take up too much space::
INST_PARTSIZE_SWAP=4
Root pool size, use all remaining disk space if not set::
INST_PARTSIZE_RPOOL=
#. Add ZFS repo::
curl -L https://archzfs.com/archzfs.gpg | pacman-key -a -
pacman-key --lsign-key $(curl -L https://git.io/JsfVS)
curl -L https://raw.githubusercontent.com/openzfs/openzfs-docs/master/docs/Getting%20Started/Arch%20Linux/archzfs-repo/mirrorlist-archzfs > /etc/pacman.d/mirrorlist-archzfs
tee -a /etc/pacman.conf <<- 'EOF'
#[archzfs-testing]
#Include = /etc/pacman.d/mirrorlist-archzfs
[archzfs]
Include = /etc/pacman.d/mirrorlist-archzfs
EOF
#. Check kernel version::
uname -r
#5.18.7-arch1-1
#. Find a ZFS package compatible with the kernel:
Search kernel version string (e.g. 5.18.7) in both pages:
* https://archzfs.com/archive_archzfs/
* https://archzfs.com/archzfs/x86_64/
Such as::
curl -L https://archzfs.com/archive_archzfs/ \
| grep zfs-linux-[0-9] \
| grep -v src.tar \
| grep --color "5.18.7"
# ...<a href="zfs-linux-2.1.5_5.18.7.arch1.1-1-x86_64.pkg.tar.zst">...
Result: https/.../archive_archzfs/zfs-linux-2.1.5_5.18.7.arch1.1-1-x86_64.pkg.tar.zst
#. Find compatible zfs-utils package:
Search ZFS version string (e.g. 2.1.5) in both pages above::
curl -L https://archzfs.com/archzfs/x86_64/ \
| grep -v src.tar \
| grep --color zfs-utils-2.1.5
# ...<a href="zfs-utils-2.1.5-1-x86_64.pkg.tar.zst">...
Result: https/.../archzfs/x86_64/zfs-utils-2.1.5-2-x86_64.pkg.tar.zst
#. Download both then install::
pacman -U link-to-zfs.zst link-to-utils.zst
#. Load kernel modules::
modprobe zfs

View File

@@ -1,151 +0,0 @@
.. highlight:: sh
System Installation
======================
.. contents:: Table of Contents
:local:
#. Partition the disks::
for i in ${DISK}; do
# wipe flash-based storage device to improve
# performance.
# ALL DATA WILL BE LOST
# blkdiscard -f $i
sgdisk --zap-all $i
sgdisk -n1:1M:+1G -t1:EF00 $i
sgdisk -n2:0:+4G -t2:BE00 $i
sgdisk -n4:0:+${INST_PARTSIZE_SWAP}G -t4:8200 $i
if test -z $INST_PARTSIZE_RPOOL; then
sgdisk -n3:0:0 -t3:BF00 $i
else
sgdisk -n3:0:+${INST_PARTSIZE_RPOOL}G -t3:BF00 $i
fi
sgdisk -a1 -n5:24K:+1000K -t5:EF02 $i
sync && udevadm settle && sleep 3
cryptsetup open --type plain --key-file /dev/random $i-part4 ${i##*/}-part4
mkswap /dev/mapper/${i##*/}-part4
swapon /dev/mapper/${i##*/}-part4
done
#. Create boot pool::
zpool create \
-o compatibility=grub2 \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R /mnt \
bpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part2 ";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
Features enabled with ``-o compatibility=grub2`` can be seen
`here <https://github.com/openzfs/zfs/blob/master/cmd/zpool/compatibility.d/grub2>`__.
#. Create root pool::
zpool create \
-o ashift=12 \
-o autotrim=on \
-R /mnt \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part3 ";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/archlinux
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info::
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/archlinux
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets, let Archlinux declaratively
manage mountpoints with ``mountpoint=legacy``::
zfs create -o mountpoint=/ -o canmount=noauto rpool/archlinux/root
zfs mount rpool/archlinux/root
zfs create -o mountpoint=legacy rpool/archlinux/home
mkdir /mnt/home
mount -t zfs rpool/archlinux/home /mnt/home
zfs create -o mountpoint=legacy rpool/archlinux/var
zfs create -o mountpoint=legacy rpool/archlinux/var/lib
zfs create -o mountpoint=legacy rpool/archlinux/var/log
zfs create -o mountpoint=none bpool/archlinux
zfs create -o mountpoint=legacy bpool/archlinux/root
mkdir /mnt/boot
mount -t zfs bpool/archlinux/root /mnt/boot
mkdir -p /mnt/var/log
mkdir -p /mnt/var/lib
mount -t zfs rpool/archlinux/var/lib /mnt/var/lib
mount -t zfs rpool/archlinux/var/log /mnt/var/log
#. Format and mount ESP::
for i in ${DISK}; do
mkfs.vfat -n EFI ${i}-part1
mkdir -p /mnt/boot/efis/${i##*/}-part1
mount -t vfat ${i}-part1 /mnt/boot/efis/${i##*/}-part1
done
mkdir -p /mnt/boot/efi
mount -t vfat $(echo $DISK | cut -f1 -d' ')-part1 /mnt/boot/efi

View File

@@ -1,80 +0,0 @@
.. highlight:: sh
System Configuration
======================
.. contents:: Table of Contents
:local:
#. Generate fstab::
mkdir -p /mnt/etc/
genfstab -t PARTUUID /mnt | grep -v swap > /mnt/etc/fstab
sed -i "s|vfat.*rw|vfat rw,x-systemd.idle-timeout=1min,x-systemd.automount,noauto,nofail|" /mnt/etc/fstab
#. Install packages::
pacstrap /mnt base mg mandoc grub efibootmgr mkinitcpio
CompatibleVer=$(pacman -Si zfs-linux \
| grep 'Depends On' \
| sed "s|.*linux=||" \
| awk '{ print $1 }')
pacstrap -U /mnt https://archive.archlinux.org/packages/l/linux/linux-${CompatibleVer}-x86_64.pkg.tar.zst
pacstrap /mnt zfs-linux zfs-utils
pacstrap /mnt linux-firmware intel-ucode amd-ucode
#. Configure mkinitcpio::
sed -i 's|filesystems|zfs filesystems|' /mnt/etc/mkinitcpio.conf
#. Enable internet time synchronisation::
hwclock --systohc
systemctl enable systemd-timesyncd --root=/mnt
#. Generate host id::
zgenhostid -f -o /mnt/etc/hostid
#. Add archzfs repo::
curl -L https://archzfs.com/archzfs.gpg | pacman-key -a - --gpgdir /mnt/etc/pacman.d/gnupg
pacman-key --lsign-key --gpgdir /mnt/etc/pacman.d/gnupg $(curl -L https://git.io/JsfVS)
curl -L https://git.io/Jsfw2 > /mnt/etc/pacman.d/mirrorlist-archzfs
tee -a /mnt/etc/pacman.conf <<- 'EOF'
#[archzfs-testing]
#Include = /etc/pacman.d/mirrorlist-archzfs
[archzfs]
Include = /etc/pacman.d/mirrorlist-archzfs
EOF
#. Chroot::
history -w /mnt/home/sys-install-pre-chroot.txt
arch-chroot /mnt /usr/bin/env DISK="$DISK" bash
#. Generate locales::
echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
locale-gen
#. Generate initrd::
mkinitcpio -P
#. Import from by-id::
echo GRUB_CMDLINE_LINUX=\"zfs_import_dir=/dev/disk/by-id/\" >> /etc/default/grub
#. Set locale, keymap, timezone, hostname and root password::
rm -f /etc/localtime
systemd-firstboot --prompt --force

View File

@@ -1,63 +0,0 @@
.. highlight:: sh
Bootloader
======================
.. contents:: Table of Contents
:local:
#. Apply GRUB workaround::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
source /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
This workaround needs to be applied for every GRUB update, as the
update will overwrite the changes.
#. Install GRUB::
mkdir -p /boot/efi/arch/grub-bootdir/i386-pc/
mkdir -p /boot/efi/arch/grub-bootdir/x86_64-efi/
for i in ${DISK}; do
grub-install --target=i386-pc --boot-directory \
/boot/efi/arch/grub-bootdir/i386-pc/ $i
done
grub-install --target x86_64-efi --boot-directory \
/boot/efi/arch/grub-bootdir/x86_64-efi/ --efi-directory \
/boot/efi --bootloader-id arch --removable
grub-mkconfig -o /boot/efi/arch/grub-bootdir/x86_64-efi/grub/grub.cfg
grub-mkconfig -o /boot/efi/arch/grub-bootdir/i386-pc/grub/grub.cfg
mkdir -p /boot/grub
grub-mkconfig -o /boot/grub/grub.cfg
#. For both legacy and EFI booting: mirror ESP content::
ESP_MIRROR=$(mktemp -d)
cp -r /boot/efi/EFI $ESP_MIRROR
for i in /boot/efis/*; do
cp -r $ESP_MIRROR/EFI $i
done
rm -rf $ESP_MIRROR
Finish Installation
~~~~~~~~~~~~~~~~~~~~
#. Exit chroot::
exit
#. Export pools::
umount -Rl /mnt
zpool export -a
#. Reboot::
reboot
#. You can create a snapshot of the newly installed
system for later rollback,
see `this page <https://openzfs.github.io/openzfs-docs/Getting%20Started/Arch%20Linux/Root%20on%20ZFS/6-create-boot-environment.html>`__.

View File

@@ -1,172 +0,0 @@
.. highlight:: sh
Create a Boot Environment
==========================
This page is tested for Alpine, Arch Linux, Fedora and
RHEL guides. Not useful for NixOS since system rollback
is already managed by Nix.
With Root on ZFS, it is possible to take snapshots of
existing root file systems, which is a read-only copy of
that file system. A new, full-fledged file system --
clones -- can be then created from this snapshot. This
bootable clone of the current system is then called a
"Boot Environment".
This could be useful if you are performing a major
system upgrade and wish to have the option to go back to
a previous state if the upgrade fails.
#. Identify which dataset is currently mounted as root
``/`` and boot ``/boot``::
findmnt /
# output
TARGET SOURCE FSTYPE OPTIONS
/ rpool/archlinux/root zfs rw,relatime,xattr,posixacl
findmnt /boot
# output
TARGET SOURCE FSTYPE OPTIONS
/boot bpool/archlinux/root zfs rw,relatime,xattr,posixacl
#. Identify your distribution in the dataset (file system) path::
DISTRO=archlinux # or `fedora', `alma', `alpinelinux'
#. Choose a name for the new boot environment::
BE_NAME=backup
#. Take snapshots of the ``/`` and ``/boot`` datasets::
zfs snapshot rpool/$DISTRO/root@$BE_NAME
zfs snapshot bpool/$DISTRO/root@$BE_NAME
#. Create clones from read-only snapshots::
zfs clone -o canmount=noauto \
-o mountpoint=/ \
rpool/$DISTRO/root@$BE_NAME \
rpool/$DISTRO/$BE_NAME
zfs clone -o canmount=noauto \
-o mountpoint=legacy \
bpool/$DISTRO/root@$BE_NAME \
bpool/$DISTRO/$BE_NAME
#. Mount clone and update file system table (fstab) ::
mount -t zfs -o zfsutil rpool/$DISTRO/$BE_NAME /mnt
mount -t zfs bpool/$DISTRO/$BE_NAME /mnt/boot
sed -i "s|rpool/$DISTRO/root|rpool/$DISTRO/$BE_NAME|g" /mnt/etc/fstab
sed -i "s|bpool/$DISTRO/root|bpool/$DISTRO/$BE_NAME|g" /mnt/etc/fstab
If legacy mountpoint is used, omit ``-o zfsutil``
from mount command.
#. Create GRUB menu for new clone::
m='/dev /proc /sys'
for i in $m; do mount --rbind $i /mnt/$i; done
chroot /mnt /usr/bin/env sh <<EOF
if which grub-mkconfig; then
grub-mkconfig -o /boot/grub.cfg
else
grub2-mkconfig -o /boot/grub.cfg
fi
EOF
GRUB menu contains information on kernel version and initramfs.
#. Unmount clone::
umount -Rl /mnt
#. Add new boot environment as GRUB menu entry::
tee -a new_entry <<EOF
menuentry 'ZFS Clone of ${DISTRO}: ${BE_NAME}' { configfile (hd0,gpt2)/${DISTRO}/${BE_NAME}@/grub.cfg }
EOF
find /boot/efis/ -name "grub.cfg" \
| while read i; do
if grep -q "${DISTRO}" $i; then
cat $i new_entry > grub.cfg
cp grub.cfg $i
fi
done
rm new_entry
#. After reboot, select boot environment entry from GRUB
menu to boot from the clone. Press ESC inside
submenu to return to the previous menu.
#. Steps above can also be used to create a new clone
from an existing snapshot.
#. To set a boot environment as default, replace
existing ``grub.cfg`` inside EFI system partition
with the one from the boot environment::
mount -t zfs bpool/$DISTRO/$BE_NAME /mnt
# backup existing grub.cfg inside EFI
# then replace it with menu from clone
mkdir -p /mnt/grub_menu_backup
menu_counter=1
find /boot/efis/ -name "grub.cfg" \
| while read i; do
if grep -q "${DISTRO}" $i; then
cp $i /mnt/grub_menu_backup/grub_${menu_counter}.cfg
echo $i > /mnt/grub_menu_backup/grub_${menu_counter}_path.txt
cp /mnt/grub.cfg $i
menu_counter=$(($menu_counter + 1))
fi
done
umount -Rl /mnt
#. To delete the boot environment, check with
``findmnt`` that the boot environment is not
currently used::
findmnt /
findmnt /boot
Set variables::
DISTRO=archlinux
Then check the origin snapshot::
zfs get origin rpool/archlinux/backup
# rpool/archlinux/root@backup
zfs get origin bpool/archlinux/backup
# bpool/archlinux/root@backup
RM_BE=backup
RM_SNAPSHOT=root@backup
Finally, destroy clone (boot environment) and its
origin snapshot::
zfs destroy rpool/${DISTRO}/${RM_BE}
zfs destroy rpool/${DISTRO}/${RM_SNAPSHOT}
zfs destroy bpool/${DISTRO}/${RM_BE}
zfs destroy bpool/${DISTRO}/${RM_SNAPSHOT}
Remove GRUB entry::
find /boot/efis/ -name "grub.cfg" \
| while read i; do
if grep -q "${DISTRO}/${RM_BE}@/grub.cfg" $i; then
head -n -1 $i > grub.cfg
cp grub.cfg $i
fi
done

View File

@@ -1 +0,0 @@
DDF7DB817396A49B2A2723F7403BD972F75D9D76

View File

@@ -1,12 +0,0 @@
## France
Server = https://archzfs.com/$repo/$arch
## Germany
Server = https://mirror.sum7.eu/archlinux/archzfs/$repo/$arch
Server = https://mirror.biocrafting.net/archlinux/archzfs/$repo/$arch
## India
Server = https://mirror.in.themindsmaze.com/archzfs/$repo/$arch
## United States
Server = https://zxcvfdsa.com/archzfs/$repo/$arch

View File

@@ -31,34 +31,18 @@ ZFS support is provided by third-party `archzfs repo <https://github.com/archzfs
Installation
------------
Note: this is for installing ZFS on an existing Arch
Linux installation. To use ZFS as root file system,
see below.
#. `Add archzfs repo to pacman <0-archzfs-repo.html>`__.
#. Install `zfs-linux* <1-zfs-linux.html>`__
or `zfs-dkms <2-zfs-dkms.html>`__ depending on your needs.
See the respective pages for details.
Live image
----------
Kernel package shipped with latest live image might
not be compatible with ZFS, user should check kernel version
following instructions `here <3-live.html>`__.
See `Archlinux Wiki <https://wiki.archlinux.org/title/ZFS>`__.
Root on ZFS
-----------
ZFS can be used as root file system for Arch Linux.
An installation guide is available.
Start from "Preparation".
.. toctree::
:maxdepth: 1
:glob:
Root on ZFS/*
*
Contribute
----------
@@ -70,8 +54,8 @@ Contribute
pip3 install -r docs/requirements.txt
# Add ~/.local/bin to your $PATH, e.g. by adding this to ~/.bashrc:
[ -d $HOME/.local/bin ] && export PATH=$HOME/.local/bin:$PATH
# Add ~/.local/bin to your "${PATH}", e.g. by adding this to ~/.bashrc:
[ -d "${HOME}"/.local/bin ] && export PATH="${HOME}"/.local/bin:"${PATH}"
#. Make your changes.

View File

@@ -1,11 +0,0 @@
Fedora Root on ZFS
======================
Start from "Preparation".
Contents
--------
.. toctree::
:maxdepth: 2
:glob:
Root on ZFS/*

View File

@@ -0,0 +1,729 @@
.. highlight:: sh
.. ifconfig:: zfs_root_test
::
# For the CI/CD test run of this guide,
# Enable verbose logging of bash shell and fail immediately when
# a commmand fails.
set -vxeuf
distro=${1}
cp /etc/resolv.conf ./"rootfs-${distro}"/etc/resolv.conf
arch-chroot ./"rootfs-${distro}" sh <<-'ZFS_ROOT_GUIDE_TEST'
set -vxeuf
# install alpine setup scripts
apk update
apk add alpine-conf curl
.. In this document, there are three types of code-block markups:
``::`` are commands intended for both the vm test and the users
``.. ifconfig:: zfs_root_test`` are commands intended only for vm test
``.. code-block:: sh`` are commands intended only for users
Fedora Root on ZFS
=======================================
**Customization**
Unless stated otherwise, it is not recommended to customize system
configuration before reboot.
Preparation
---------------------------
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Because the kernel of latest Live CD might be incompatible with
ZFS, we will use Alpine Linux Extended, which ships with ZFS by
default.
Download latest extended variant of `Alpine Linux
live image
<https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso>`__,
verify `checksum <https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso.asc>`__
and boot from it.
.. code-block:: sh
gpg --auto-key-retrieve --keyserver hkps://keyserver.ubuntu.com --verify alpine-extended-*.asc
dd if=input-file of=output-file bs=1M
.. ifconfig:: zfs_root_test
# check whether the download page exists
# alpine version must be in sync with ci/cd test chroot tarball
#. Login as root user. There is no password.
#. Configure Internet
.. code-block:: sh
setup-interfaces -r
# You must use "-r" option to start networking services properly
# example:
network interface: wlan0
WiFi name: <ssid>
ip address: dhcp
<enter done to finish network config>
manual netconfig: n
#. If you are using wireless network and it is not shown, see `Alpine
Linux wiki
<https://wiki.alpinelinux.org/wiki/Wi-Fi#wpa_supplicant>`__ for
further details. ``wpa_supplicant`` can be installed with ``apk
add wpa_supplicant`` without internet connection.
#. Configure SSH server
.. code-block:: sh
setup-sshd
# example:
ssh server: openssh
allow root: "prohibit-password" or "yes"
ssh key: "none" or "<public key>"
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Connect from another computer
.. code-block:: sh
ssh root@192.168.1.91
#. Configure NTP client for time synchronization
.. code-block:: sh
setup-ntp busybox
.. ifconfig:: zfs_root_test
# this step is unnecessary for chroot and returns 1 when executed
#. Set up apk-repo. A list of available mirrors is shown.
Press space bar to continue
.. code-block:: sh
setup-apkrepos
#. Throughout this guide, we use predictable disk names generated by
udev
.. code-block:: sh
apk update
apk add eudev
setup-devd udev
.. ifconfig:: zfs_root_test
# for some reason, udev is extremely slow in chroot
# it is not needed for chroot anyway. so, skip this step
#. Target disk
List available disks with
.. code-block:: sh
find /dev/disk/by-id/
If virtio is used as disk bus, power off the VM and set serial numbers for disk.
For QEMU, use ``-drive format=raw,file=disk2.img,serial=AaBb``.
For libvirt, edit domain XML. See `this page
<https://bugzilla.redhat.com/show_bug.cgi?id=1245013>`__ for examples.
Declare disk array
.. code-block:: sh
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use
.. code-block:: sh
DISK='/dev/disk/by-id/disk1'
.. ifconfig:: zfs_root_test
# for github test run, use chroot and loop devices
DISK="$(losetup -a| grep fedora | cut -f1 -d: | xargs -t -I '{}' printf '{} ')"
#. Set a mount point
::
MNT=$(mktemp -d)
#. Set partition size:
Set swap size in GB, set to 1 if you don't want swap to
take up too much space
.. code-block:: sh
SWAPSIZE=4
.. ifconfig:: zfs_root_test
# For the test run, use 1GB swap space to avoid hitting CI/CD
# quota
SWAPSIZE=1
Set how much space should be left at the end of the disk, minimum 1GB
::
RESERVE=1
#. Install ZFS support from live media::
apk add zfs
#. Install partition tool
::
apk add parted e2fsprogs cryptsetup util-linux
System Installation
---------------------------
#. Partition the disks.
Note: you must clear all existing partition tables and data structures from the disks,
especially those with existing ZFS pools or mdraid and those that have been used as live media.
Those data structures may interfere with boot process.
For flash-based storage, this can be done by uncommenting the blkdiscard command below:
::
partition_disk () {
local disk="${1}"
#blkdiscard -f "${disk}"
parted --script --align=optimal "${disk}" -- \
mklabel gpt \
mkpart EFI 2MiB 1GiB \
mkpart bpool 1GiB 5GiB \
mkpart rpool 5GiB -$((SWAPSIZE + RESERVE))GiB \
mkpart swap -$((SWAPSIZE + RESERVE))GiB -"${RESERVE}"GiB \
mkpart BIOS 1MiB 2MiB \
set 1 esp on \
set 5 bios_grub on \
set 5 legacy_boot on
partprobe "${disk}"
}
for i in ${DISK}; do
partition_disk "${i}"
done
.. ifconfig:: zfs_root_test
::
# When working with GitHub chroot runners, we are using loop
# devices as installation target. However, the alias support for
# loop device was just introduced in March 2023. See
# https://github.com/systemd/systemd/pull/26693
# For now, we will create the aliases maunally as a workaround
looppart="1 2 3 4 5"
for i in ${DISK}; do
for j in ${looppart}; do
if test -e "${i}p${j}"; then
ln -s "${i}p${j}" "${i}-part${j}"
fi
done
done
#. Setup encrypted swap. This is useful if the available memory is
small::
for i in ${DISK}; do
cryptsetup open --type plain --key-file /dev/random "${i}"-part4 "${i##*/}"-part4
mkswap /dev/mapper/"${i##*/}"-part4
swapon /dev/mapper/"${i##*/}"-part4
done
#. Load ZFS kernel module
.. code-block:: sh
modprobe zfs
#. Create boot pool
::
# shellcheck disable=SC2046
zpool create -d \
-o feature@async_destroy=enabled \
-o feature@bookmarks=enabled \
-o feature@embedded_data=enabled \
-o feature@empty_bpobj=enabled \
-o feature@enabled_txg=enabled \
-o feature@extensible_dataset=enabled \
-o feature@filesystem_limits=enabled \
-o feature@hole_birth=enabled \
-o feature@large_blocks=enabled \
-o feature@lz4_compress=enabled \
-o feature@spacemap_histogram=enabled \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R "${MNT}" \
bpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part2";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
#. Create root pool
::
# shellcheck disable=SC2046
zpool create \
-o ashift=12 \
-o autotrim=on \
-R "${MNT}" \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part3";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted
::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/fedora
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info
.. code-block:: sh
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/fedora
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets,
manage mountpoints with ``mountpoint=legacy``
::
zfs create -o canmount=noauto -o mountpoint=/ rpool/fedora/root
zfs mount rpool/fedora/root
zfs create -o mountpoint=legacy rpool/fedora/home
mkdir "${MNT}"/home
mount -t zfs rpool/fedora/home "${MNT}"/home
zfs create -o mountpoint=legacy rpool/fedora/var
zfs create -o mountpoint=legacy rpool/fedora/var/lib
zfs create -o mountpoint=legacy rpool/fedora/var/log
zfs create -o mountpoint=none bpool/fedora
zfs create -o mountpoint=legacy bpool/fedora/root
mkdir "${MNT}"/boot
mount -t zfs bpool/fedora/root "${MNT}"/boot
mkdir -p "${MNT}"/var/log
mkdir -p "${MNT}"/var/lib
mount -t zfs rpool/fedora/var/lib "${MNT}"/var/lib
mount -t zfs rpool/fedora/var/log "${MNT}"/var/log
#. Format and mount ESP
::
for i in ${DISK}; do
mkfs.vfat -n EFI "${i}"-part1
mkdir -p "${MNT}"/boot/efis/"${i##*/}"-part1
mount -t vfat -o iocharset=iso8859-1 "${i}"-part1 "${MNT}"/boot/efis/"${i##*/}"-part1
done
mkdir -p "${MNT}"/boot/efi
mount -t vfat -o iocharset=iso8859-1 "$(echo "${DISK}" | sed "s|^ *||" | cut -f1 -d' '|| true)"-part1 "${MNT}"/boot/efi
System Configuration
---------------------------
#. Download and extract minimal Fedora root filesystem::
apk add curl
curl --fail-early --fail -L \
https://dl.fedoraproject.org/pub/fedora/linux/releases/38/Container/x86_64/images/Fedora-Container-Base-38-1.6.x86_64.tar.xz \
-o rootfs.tar.gz
curl --fail-early --fail -L \
https://dl.fedoraproject.org/pub/fedora/linux/releases/38/Container/x86_64/images/Fedora-Container-38-1.6-x86_64-CHECKSUM \
-o checksum
# BusyBox sha256sum treats all lines in the checksum file
# as checksums and requires two spaces " "
# between filename and checksum
grep 'Container-Base' checksum \
| grep '^SHA256' \
| sed -E 's|.*= ([a-z0-9]*)$|\1 rootfs.tar.gz|' > ./sha256checksum
sha256sum -c ./sha256checksum
rootfs_tar=$(tar t -af rootfs.tar.gz | grep layer.tar)
rootfs_tar_dir=$(dirname "${rootfs_tar}")
tar x -af rootfs.tar.gz "${rootfs_tar}"
ln -s "${MNT}" "${MNT}"/"${rootfs_tar_dir}"
tar x -C "${MNT}" -af "${rootfs_tar}"
unlink "${MNT}"/"${rootfs_tar_dir}"
#. Enable community repo
.. code-block:: sh
sed -i '/edge/d' /etc/apk/repositories
sed -i -E 's/#(.*)community/\1community/' /etc/apk/repositories
#. Generate fstab::
apk add arch-install-scripts
genfstab -t PARTUUID "${MNT}" \
| grep -v swap \
| sed "s|vfat.*rw|vfat rw,x-systemd.idle-timeout=1min,x-systemd.automount,noauto,nofail|" \
> "${MNT}"/etc/fstab
#. Chroot
.. code-block:: sh
cp /etc/resolv.conf "${MNT}"/etc/resolv.conf
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" bash
.. ifconfig:: zfs_root_test
cp /etc/resolv.conf "${MNT}"/etc/resolv.conf
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" bash <<-'ZFS_ROOT_NESTED_CHROOT'
set -vxeuf
#. Unset all shell aliases, which can interfere with installation::
unalias -a
#. Install base packages
.. code-block:: sh
dnf -y install @core grub2-efi-x64 \
grub2-pc grub2-pc-modules grub2-efi-x64-modules shim-x64 \
efibootmgr kernel kernel-devel
.. ifconfig:: zfs_root_test
# no firmware for test
dnf -y install --setopt=install_weak_deps=False @core grub2-efi-x64 \
grub2-pc grub2-pc-modules grub2-efi-x64-modules shim-x64 \
efibootmgr
# kernel-core
#. Install ZFS packages
.. code-block:: sh
dnf -y install \
https://zfsonlinux.org/fedora/zfs-release-2-2$(rpm --eval "%{dist}").noarch.rpm
dnf -y install zfs zfs-dracut
.. ifconfig:: zfs_root_test
# this step will build zfs modules and fail
# no need to test building in chroot
#. Check whether ZFS modules are successfully built
.. code-block:: sh
tail -n10 /var/lib/dkms/zfs/**/build/make.log
# ERROR: modpost: GPL-incompatible module zfs.ko uses GPL-only symbol 'bio_start_io_acct'
# ERROR: modpost: GPL-incompatible module zfs.ko uses GPL-only symbol 'bio_end_io_acct_remapped'
# make[4]: [scripts/Makefile.modpost:138: /var/lib/dkms/zfs/2.1.9/build/module/Module.symvers] Error 1
# make[3]: [Makefile:1977: modpost] Error 2
# make[3]: Leaving directory '/usr/src/kernels/6.2.9-100.fc36.x86_64'
# make[2]: [Makefile:55: modules-Linux] Error 2
# make[2]: Leaving directory '/var/lib/dkms/zfs/2.1.9/build/module'
# make[1]: [Makefile:933: all-recursive] Error 1
# make[1]: Leaving directory '/var/lib/dkms/zfs/2.1.9/build'
# make: [Makefile:794: all] Error 2
If the build failed, you need to install an Long Term Support
kernel and its headers, then rebuild ZFS module
.. code-block:: sh
# this is a third-party repo!
# you have been warned.
#
# select a kernel from
# https://copr.fedorainfracloud.org/coprs/kwizart/
dnf copr enable -y kwizart/kernel-longterm-VERSION
dnf install -y kernel-longterm kernel-longterm-devel
dnf remove -y kernel-core
ZFS modules will be built as part of the kernel installation.
Check build log again with ``tail`` command.
#. Add zfs modules to dracut
.. code-block:: sh
echo 'add_dracutmodules+=" zfs "' >> /etc/dracut.conf.d/zfs.conf
echo 'force_drivers+=" zfs "' >> /etc/dracut.conf.d/zfs.conf
.. ifconfig:: zfs_root_test
# skip this in chroot, because we did not build zfs module
#. Add other drivers to dracut::
if grep mpt3sas /proc/modules; then
echo 'force_drivers+=" mpt3sas "' >> /etc/dracut.conf.d/zfs.conf
fi
if grep virtio_blk /proc/modules; then
echo 'filesystems+=" virtio_blk "' >> /etc/dracut.conf.d/fs.conf
fi
#. Build initrd
::
find -D exec /lib/modules -maxdepth 1 \
-mindepth 1 -type d \
-exec sh -vxc \
'if test -e "$1"/modules.dep;
then kernel=$(basename "$1");
dracut --verbose --force --kver "${kernel}";
fi' sh {} \;
#. For SELinux, relabel filesystem on reboot::
fixfiles -F onboot
#. Enable internet time synchronisation::
systemctl enable systemd-timesyncd
#. Generate host id
.. code-block:: sh
zgenhostid -f -o /etc/hostid
.. ifconfig:: zfs_root_test
# because zfs is not installed, skip this step
#. Install locale package, example for English locale::
dnf install -y glibc-minimal-langpack glibc-langpack-en
#. Set locale, keymap, timezone, hostname
::
rm -f /etc/localtime
systemd-firstboot \
--force \
--locale=en_US.UTF-8 \
--timezone=Etc/UTC \
--hostname=testhost \
--keymap=us
#. Set root passwd
::
printf 'root:yourpassword' | chpasswd
Bootloader
---------------------------
#. Apply GRUB workaround
::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
# shellcheck disable=SC1091
. /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
This workaround needs to be applied for every GRUB update, as the
update will overwrite the changes.
#. Fedora and RHEL uses Boot Loader Specification module for GRUB,
which does not support ZFS. Disable it::
echo 'GRUB_ENABLE_BLSCFG=false' >> /etc/default/grub
This means that you need to regenerate GRUB menu and mirror them
after every kernel update, otherwise computer will still boot old
kernel on reboot.
#. Install GRUB::
mkdir -p /boot/efi/fedora/grub-bootdir/i386-pc/
for i in ${DISK}; do
grub2-install --target=i386-pc --boot-directory \
/boot/efi/fedora/grub-bootdir/i386-pc/ "${i}"
done
dnf reinstall -y grub2-efi-x64 shim-x64
cp -r /usr/lib/grub/x86_64-efi/ /boot/efi/EFI/fedora/
#. Generate GRUB menu
.. code-block:: sh
mkdir -p /boot/grub2
grub2-mkconfig -o /boot/grub2/grub.cfg
cp /boot/grub2/grub.cfg \
/boot/efi/efi/fedora/grub.cfg
cp /boot/grub2/grub.cfg \
/boot/efi/fedora/grub-bootdir/i386-pc/grub2/grub.cfg
.. ifconfig:: zfs_root_test
# GRUB menu can not be generated in test due to missing zfs programs
#. For both legacy and EFI booting: mirror ESP content::
espdir=$(mktemp -d)
find /boot/efi/ -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' cp -r '{}' "${espdir}"
find "${espdir}" -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' sh -vxc "find /boot/efis/ -maxdepth 1 -mindepth 1 -type d -print0 | xargs -t -0I '[]' cp -r '{}' '[]'"
#. Exit chroot
.. code-block:: sh
exit
.. ifconfig:: zfs_root_test
# nested chroot ends here
ZFS_ROOT_NESTED_CHROOT
.. ifconfig:: zfs_root_test
::
# list contents of boot dir to confirm
# that the mirroring succeeded
find "${MNT}"/boot/efis/ -type d > list_of_efi_dirs
for i in ${DISK}; do
if ! grep "${i##*/}-part1/efi\|${i##*/}-part1/EFI" list_of_efi_dirs; then
echo "disk ${i} not found in efi system partition, installation error";
cat list_of_efi_dirs
exit 1
fi
done
#. Unmount filesystems and create initial system snapshot
You can later create a boot environment from this snapshot.
See `Root on ZFS maintenance page <../zfs_root_maintenance.html>`__.
::
umount -Rl "${MNT}"
zfs snapshot -r rpool@initial-installation
zfs snapshot -r bpool@initial-installation
#. Export all pools
.. code-block:: sh
zpool export -a
.. ifconfig:: zfs_root_test
# we are now inside a chroot, where the export will fail
# export pools when we are outside chroot
#. Reboot
.. code-block:: sh
reboot
#. For BIOS-legacy boot users only: the GRUB bootloader installed
might be unusable. In this case, see Bootloader Recovery section
in `Root on ZFS maintenance page <../zfs_root_maintenance.html>`__.
This issue is not related to Alpine Linux chroot, as Arch Linux
installed with this method does not have this issue.
UEFI bootloader is not affected by this issue.
.. ifconfig:: zfs_root_test
# chroot ends here
ZFS_ROOT_GUIDE_TEST
Post installaion
---------------------------
#. Install package groups
.. code-block:: sh
dnf group list --hidden -v # query package groups
dnf group install gnome-desktop
#. Add new user, configure swap.

View File

@@ -1,71 +0,0 @@
.. highlight:: sh
Preparation
======================
.. contents:: Table of Contents
:local:
#. Disable Secure Boot. ZFS modules can not be loaded if
Secure Boot is enabled.
#. Download live Fedora media, such as this `LXQt spin
<https://spins.fedoraproject.org/lxqt/download/index.html>`__.
The installed system is the same regardless of live
media used.
#. Connect to the Internet.
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Start SSH server::
echo PermitRootLogin yes >> /etc/ssh/sshd_config
systemctl restart sshd
#. Connect from another computer::
ssh root@192.168.1.19
#. Target disk
List available disks with::
find /dev/disk/by-id/
If using virtio as disk bus, use ``/dev/disk/by-path/``.
Declare disk array::
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use::
DISK='/dev/disk/by-id/disk1'
#. Set partition size:
Set swap size, set to 1 if you don't want swap to
take up too much space::
INST_PARTSIZE_SWAP=4
Root pool size, use all remaining disk space if not set::
INST_PARTSIZE_RPOOL=
#. Temporarily set SELinux to permissive in live environment::
setenforce 0
SELinux will be enabled on the installed system.
#. Add ZFS repo and install ZFS inside live system::
dnf install -y https://zfsonlinux.org/fedora/zfs-release-2-2$(rpm --eval "%{dist}").noarch.rpm
rpm -e --nodeps zfs-fuse || true
source /etc/os-release
export VERSION_ID
dnf install -y https://dl.fedoraproject.org/pub/fedora/linux/releases/${VERSION_ID}/Everything/x86_64/os/Packages/k/kernel-devel-$(uname -r).rpm
dnf install -y zfs
modprobe zfs
#. Install partition tool and arch-install-scripts::
dnf install -y gdisk dosfstools arch-install-scripts

View File

@@ -1,151 +0,0 @@
.. highlight:: sh
System Installation
======================
.. contents:: Table of Contents
:local:
#. Partition the disks::
for i in ${DISK}; do
# wipe flash-based storage device to improve
# performance.
# ALL DATA WILL BE LOST
# blkdiscard -f $i
sgdisk --zap-all $i
sgdisk -n1:1M:+1G -t1:EF00 $i
sgdisk -n2:0:+4G -t2:BE00 $i
sgdisk -n4:0:+${INST_PARTSIZE_SWAP}G -t4:8200 $i
if test -z $INST_PARTSIZE_RPOOL; then
sgdisk -n3:0:0 -t3:BF00 $i
else
sgdisk -n3:0:+${INST_PARTSIZE_RPOOL}G -t3:BF00 $i
fi
sgdisk -a1 -n5:24K:+1000K -t5:EF02 $i
sync && udevadm settle && sleep 3
cryptsetup open --type plain --key-file /dev/random $i-part4 ${i##*/}-part4
mkswap /dev/mapper/${i##*/}-part4
swapon /dev/mapper/${i##*/}-part4
done
#. Create boot pool::
zpool create \
-o compatibility=grub2 \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R /mnt \
bpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part2 ";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
Features enabled with ``-o compatibility=grub2`` can be seen
`here <https://github.com/openzfs/zfs/blob/master/cmd/zpool/compatibility.d/grub2>`__.
#. Create root pool::
zpool create \
-o ashift=12 \
-o autotrim=on \
-R /mnt \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part3 ";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/fedora
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info::
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/fedora
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets, let Fedora declaratively
manage mountpoints with ``mountpoint=legacy``::
zfs create -o mountpoint=/ -o canmount=noauto rpool/fedora/root
zfs mount rpool/fedora/root
zfs create -o mountpoint=legacy rpool/fedora/home
mkdir /mnt/home
mount -t zfs rpool/fedora/home /mnt/home
zfs create -o mountpoint=legacy rpool/fedora/var
zfs create -o mountpoint=legacy rpool/fedora/var/lib
zfs create -o mountpoint=legacy rpool/fedora/var/log
zfs create -o mountpoint=none bpool/fedora
zfs create -o mountpoint=legacy bpool/fedora/root
mkdir /mnt/boot
mount -t zfs bpool/fedora/root /mnt/boot
mkdir -p /mnt/var/log
mkdir -p /mnt/var/lib
mount -t zfs rpool/fedora/var/lib /mnt/var/lib
mount -t zfs rpool/fedora/var/log /mnt/var/log
#. Format and mount ESP::
for i in ${DISK}; do
mkfs.vfat -n EFI ${i}-part1
mkdir -p /mnt/boot/efis/${i##*/}-part1
mount -t vfat ${i}-part1 /mnt/boot/efis/${i##*/}-part1
done
mkdir -p /mnt/boot/efi
mount -t vfat $(echo $DISK | cut -f1 -d' ')-part1 /mnt/boot/efi

View File

@@ -1,88 +0,0 @@
.. highlight:: sh
System Configuration
======================
.. contents:: Table of Contents
:local:
#. Generate fstab::
mkdir -p /mnt/etc/
genfstab -t PARTUUID /mnt | grep -v swap > /mnt/etc/fstab
sed -i "s|vfat.*rw|vfat rw,x-systemd.idle-timeout=1min,x-systemd.automount,noauto,nofail|" /mnt/etc/fstab
#. Install basic system packages::
dnf --installroot=/mnt \
--releasever=$VERSION_ID -y install \
@core grub2-efi-x64 \
grub2-pc-modules grub2-efi-x64-modules \
shim-x64 efibootmgr \
kernel kernel-devel
dnf --installroot=/mnt \
--releasever=$VERSION_ID -y install \
https://zfsonlinux.org/fedora/zfs-release-2-2$(rpm --eval "%{dist}").noarch.rpm
dnf --installroot=/mnt --releasever=$VERSION_ID \
-y install zfs zfs-dracut
#. Configure dracut::
echo 'add_dracutmodules+=" zfs "' >> /mnt/etc/dracut.conf.d/zfs.conf
echo 'forced_drivers+=" zfs "' >> /mnt/etc/dracut.conf.d/zfs.conf
if grep mpt3sas /proc/modules; then
echo 'forced_drivers+=" mpt3sas "' >> /mnt/etc/dracut.conf.d/zfs.conf
fi
if grep virtio_blk /proc/modules; then
echo 'filesystems+=" virtio_blk "' >> /mnt/etc/dracut.conf.d/fs.conf
fi
#. Generate host id::
zgenhostid -f -o /mnt/etc/hostid
#. Install locale package, example for English locale::
dnf --installroot=/mnt install -y glibc-minimal-langpack glibc-langpack-en
#. By default SSH server is enabled, allowing root login by password,
disable SSH server::
systemctl disable sshd --root=/mnt
systemctl enable firewalld --root=/mnt
#. Chroot::
history -w /mnt/home/sys-install-pre-chroot.txt
arch-chroot /mnt /usr/bin/env DISK="$DISK" bash --login
#. For SELinux, relabel filesystem on reboot::
fixfiles -F onboot
#. Build ZFS modules::
for directory in /lib/modules/*; do
kernel_version=$(basename $directory)
dkms autoinstall -k $kernel_version
done
#. Generate initrd::
for directory in /lib/modules/*; do
kernel_version=$(basename $directory)
dracut --force --kver $kernel_version
done
#. Set locale, keymap, timezone, hostname and root password::
rm -f /etc/localtime
systemd-firstboot --prompt --root-password=PASSWORD --force
#. Set root password, the password set earlier does not work due to SELinux::
passwd

View File

@@ -1,81 +0,0 @@
.. highlight:: sh
Bootloader
======================
.. contents:: Table of Contents
:local:
#. Apply GRUB workaround::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
source /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
This ``sed`` workaround needs to be applied for every
GRUB update, as the update will overwrite the
changes.
#. Install GRUB::
echo 'GRUB_ENABLE_BLSCFG=false' >> /etc/default/grub
mkdir -p /boot/efi/fedora/grub-bootdir/i386-pc/
mkdir -p /boot/efi/fedora/grub-bootdir/x86_64-efi/
for i in ${DISK}; do
grub2-install --target=i386-pc --boot-directory \
/boot/efi/fedora/grub-bootdir/i386-pc/ $i
done
cp -r /usr/lib/grub/x86_64-efi/ /boot/efi/EFI/fedora/
grub2-mkconfig -o /boot/efi/EFI/fedora/grub.cfg
grub2-mkconfig -o /boot/efi/fedora/grub-bootdir/i386-pc/grub2/grub.cfg
mkdir -p /boot/grub2
grub2-mkconfig -o /boot/grub2/grub.cfg
#. For both legacy and EFI booting: mirror ESP content::
unalias -a
ESP_MIRROR=$(mktemp -d)
cp -r /boot/efi/EFI $ESP_MIRROR
for i in /boot/efis/*; do
cp -r $ESP_MIRROR/EFI $i
done
rm -rf $ESP_MIRROR
#. Note: you need to regenerate GRUB menu after kernel
updates, otherwise computer will still boot old
kernel on reboot::
grub2-mkconfig -o /boot/efi/EFI/fedora/grub.cfg
grub2-mkconfig -o /boot/efi/fedora/grub-bootdir/i386-pc/grub2/grub.cfg
grub2-mkconfig -o /boot/grub2/grub.cfg
Finish Installation
~~~~~~~~~~~~~~~~~~~~
#. Exit chroot::
exit
#. Export pools::
umount -Rl /mnt
zpool export -a
#. Reboot::
reboot
Post installaion
~~~~~~~~~~~~~~~~
#. Install package groups::
dnf group list --hidden -v # query package groups
dnf group install gnome-desktop
#. Add new user, configure swap.
#. You can create a snapshot of the newly installed
system for later rollback,
see `this page <https://openzfs.github.io/openzfs-docs/Getting%20Started/Arch%20Linux/Root%20on%20ZFS/6-create-boot-environment.html>`__.

View File

@@ -57,16 +57,6 @@ see below.
modprobe zfs
It might be necessary to rebuild ZFS module::
for directory in /lib/modules/*; do
kernel_version=$(basename $directory)
dkms autoinstall -k $kernel_version
done
If for some reason, ZFS kernel module is not successfully built,
you can also run the above command to debug the problem.
#. By default ZFS kernel modules are loaded upon detecting a pool.
To always load the modules at boot::
@@ -87,13 +77,8 @@ These packages
Root on ZFS
-----------
ZFS can be used as root file system for Fedora.
An installation guide is available.
Start from "Preparation".
.. toctree::
:maxdepth: 1
:glob:
:maxdepth: 1
:glob:
Root on ZFS/*
*

View File

@@ -1,11 +1,503 @@
.. highlight:: sh
.. ifconfig:: zfs_root_test
# For the CI/CD test run of this guide,
# Enable verbose logging of bash shell and fail immediately when
# a commmand fails.
set -vxeuf
.. In this document, there are three types of code-block markups:
``::`` are commands intended for both the vm test and the users
``.. ifconfig:: zfs_root_test`` are commands intended only for vm test
``.. code-block:: sh`` are commands intended only for users
NixOS Root on ZFS
=======================================
Start from "Preparation".
**Note for arm64**:
Contents
--------
.. toctree::
:maxdepth: 2
:glob:
Currently there is a bug with the grub installation script. See `here
<https://github.com/NixOS/nixpkgs/issues/222491>`__ for details.
Root on ZFS/*
**Note for Immutable Root**:
Immutable root can be enabled or disabled by setting
``zfs-root.boot.immutable`` option inside per-host configuration.
**Customization**
Unless stated otherwise, it is not recommended to customize system
configuration before reboot.
Preparation
---------------------------
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Download `NixOS Live Image
<https://nixos.org/download.html#nixos-iso>`__ and boot from it.
.. code-block:: sh
sha256sum -c ./nixos-*.sha256
dd if=input-file of=output-file bs=1M
#. Connect to the Internet.
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Start SSH server
.. code-block:: sh
systemctl restart sshd
#. Connect from another computer
.. code-block:: sh
ssh root@192.168.1.91
#. Target disk
List available disks with
.. code-block:: sh
find /dev/disk/by-id/
If virtio is used as disk bus, power off the VM and set serial numbers for disk.
For QEMU, use ``-drive format=raw,file=disk2.img,serial=AaBb``.
For libvirt, edit domain XML. See `this page
<https://bugzilla.redhat.com/show_bug.cgi?id=1245013>`__ for examples.
Declare disk array
.. code-block:: sh
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use
.. code-block:: sh
DISK='/dev/disk/by-id/disk1'
.. ifconfig:: zfs_root_test
::
# for github test run, use chroot and loop devices
DISK="$(losetup --all| grep nixos | cut -f1 -d: | xargs -t -I '{}' printf '{} ')"
# if there is no loopdev, then we are using qemu virtualized test
# run, use sata disks instead
if test -z "${DISK}"; then
DISK=$(find /dev/disk/by-id -type l | grep -v DVD-ROM | grep -v -- -part | xargs -t -I '{}' printf '{} ')
fi
#. Set a mount point
::
MNT=$(mktemp -d)
#. Set partition size:
Set swap size in GB, set to 1 if you don't want swap to
take up too much space
.. code-block:: sh
SWAPSIZE=4
.. ifconfig:: zfs_root_test
# For the test run, use 1GB swap space to avoid hitting CI/CD
# quota
SWAPSIZE=1
Set how much space should be left at the end of the disk, minimum 1GB
::
RESERVE=1
#. Enable Nix Flakes functionality
::
mkdir -p ~/.config/nix
echo "experimental-features = nix-command flakes" >> ~/.config/nix/nix.conf
#. Install programs needed for system installation
::
if ! command -v git; then nix-env -f '<nixpkgs>' -iA git; fi
if ! command -v jq; then nix-env -f '<nixpkgs>' -iA jq; fi
if ! command -v partprobe; then nix-env -f '<nixpkgs>' -iA parted; fi
.. ifconfig:: zfs_root_test
::
# install missing packages in chroot
if (echo "${DISK}" | grep "/dev/loop"); then
nix-env -f '<nixpkgs>' -iA nixos-install-tools
fi
System Installation
---------------------------
#. Partition the disks.
Note: you must clear all existing partition tables and data structures from the disks,
especially those with existing ZFS pools or mdraid and those that have been used as live media.
Those data structures may interfere with boot process.
For flash-based storage, this can be done by uncommenting the blkdiscard command below:
::
partition_disk () {
local disk="${1}"
#blkdiscard -f "${disk}"
parted --script --align=optimal "${disk}" -- \
mklabel gpt \
mkpart EFI 2MiB 1GiB \
mkpart bpool 1GiB 5GiB \
mkpart rpool 5GiB -$((SWAPSIZE + RESERVE))GiB \
mkpart swap -$((SWAPSIZE + RESERVE))GiB -"${RESERVE}"GiB \
mkpart BIOS 1MiB 2MiB \
set 1 esp on \
set 5 bios_grub on \
set 5 legacy_boot on
partprobe "${disk}"
udevadm settle
}
for i in ${DISK}; do
partition_disk "${i}"
done
.. ifconfig:: zfs_root_test
::
# When working with GitHub chroot runners, we are using loop
# devices as installation target. However, the alias support for
# loop device was just introduced in March 2023. See
# https://github.com/systemd/systemd/pull/26693
# For now, we will create the aliases maunally as a workaround
looppart="1 2 3 4 5"
for i in ${DISK}; do
for j in ${looppart}; do
if test -e "${i}p${j}"; then
ln -s "${i}p${j}" "${i}-part${j}"
fi
done
done
#. Setup encrypted swap. This is useful if the available memory is
small::
for i in ${DISK}; do
cryptsetup open --type plain --key-file /dev/random "${i}"-part4 "${i##*/}"-part4
mkswap /dev/mapper/"${i##*/}"-part4
swapon /dev/mapper/"${i##*/}"-part4
done
#. Create boot pool
::
# shellcheck disable=SC2046
zpool create \
-o compatibility=grub2 \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R "${MNT}" \
bpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part2";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
Features enabled with ``-o compatibility=grub2`` can be seen
`here <https://github.com/openzfs/zfs/blob/master/cmd/zpool/compatibility.d/grub2>`__.
#. Create root pool
::
# shellcheck disable=SC2046
zpool create \
-o ashift=12 \
-o autotrim=on \
-R "${MNT}" \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part3";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted
::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/nixos
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info
.. code-block:: sh
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/nixos
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets,
manage mountpoints with ``mountpoint=legacy``
::
zfs create -o mountpoint=legacy rpool/nixos/root
mount -t zfs rpool/nixos/root "${MNT}"/
zfs create -o mountpoint=legacy rpool/nixos/home
mkdir "${MNT}"/home
mount -t zfs rpool/nixos/home "${MNT}"/home
zfs create -o mountpoint=legacy rpool/nixos/var
zfs create -o mountpoint=legacy rpool/nixos/var/lib
zfs create -o mountpoint=legacy rpool/nixos/var/log
zfs create -o mountpoint=none bpool/nixos
zfs create -o mountpoint=legacy bpool/nixos/root
mkdir "${MNT}"/boot
mount -t zfs bpool/nixos/root "${MNT}"/boot
mkdir -p "${MNT}"/var/log
mkdir -p "${MNT}"/var/lib
mount -t zfs rpool/nixos/var/lib "${MNT}"/var/lib
mount -t zfs rpool/nixos/var/log "${MNT}"/var/log
zfs create -o mountpoint=legacy rpool/nixos/empty
zfs snapshot rpool/nixos/empty@start
#. Format and mount ESP
::
for i in ${DISK}; do
mkfs.vfat -n EFI "${i}"-part1
mkdir -p "${MNT}"/boot/efis/"${i##*/}"-part1
mount -t vfat -o iocharset=iso8859-1 "${i}"-part1 "${MNT}"/boot/efis/"${i##*/}"-part1
done
System Configuration
---------------------------
#. Clone template flake configuration
.. code-block:: sh
mkdir -p "${MNT}"/etc
git clone --depth 1 --branch openzfs-guide \
https://github.com/ne9z/dotfiles-flake.git "${MNT}"/etc/nixos
.. ifconfig:: zfs_root_test
::
# Use vm branch of the template config for test run
mkdir -p "${MNT}"/etc
git clone --depth 1 --branch openzfs-guide-testvm \
https://github.com/ne9z/dotfiles-flake.git "${MNT}"/etc/nixos
# for debugging: show template revision
git -C "${MNT}"/etc/nixos log -n1
#. From now on, the complete configuration of the system will be
tracked by git, set a user name and email address to continue
::
rm -rf "${MNT}"/etc/nixos/.git
git -C "${MNT}"/etc/nixos/ init -b main
git -C "${MNT}"/etc/nixos/ add "${MNT}"/etc/nixos/
git -C "${MNT}"/etc/nixos config user.email "you@example.com"
git -C "${MNT}"/etc/nixos config user.name "Alice Q. Nixer"
git -C "${MNT}"/etc/nixos commit -asm 'initial commit'
#. Customize configuration to your hardware
::
for i in ${DISK}; do
sed -i \
"s|/dev/disk/by-id/|${i%/*}/|" \
"${MNT}"/etc/nixos/hosts/exampleHost/default.nix
break
done
diskNames=""
for i in ${DISK}; do
diskNames="${diskNames} \"${i##*/}\""
done
sed -i "s|\"bootDevices_placeholder\"|${diskNames}|g" \
"${MNT}"/etc/nixos/hosts/exampleHost/default.nix
sed -i "s|\"abcd1234\"|\"$(head -c4 /dev/urandom | od -A none -t x4| sed 's| ||g' || true)\"|g" \
"${MNT}"/etc/nixos/hosts/exampleHost/default.nix
sed -i "s|\"x86_64-linux\"|\"$(uname -m || true)-linux\"|g" \
"${MNT}"/etc/nixos/flake.nix
cp "$(command -v nixos-generate-config || true)" ./nixos-generate-config
chmod a+rw ./nixos-generate-config
# shellcheck disable=SC2016
echo 'print STDOUT $initrdAvailableKernelModules' >> ./nixos-generate-config
kernelModules="$(./nixos-generate-config --show-hardware-config --no-filesystems | tail -n1 || true)"
sed -i "s|\"kernelModules_placeholder\"|${kernelModules}|g" \
"${MNT}"/etc/nixos/hosts/exampleHost/default.nix
.. ifconfig:: zfs_root_test
::
# show generated config
cat "${MNT}"/etc/nixos/hosts/exampleHost/default.nix
#. Set root password
.. code-block:: sh
rootPwd=$(mkpasswd -m SHA-512)
.. ifconfig:: zfs_root_test
::
# Use "test" for root password in test run
rootPwd=$(echo yourpassword | mkpasswd -m SHA-512 -)
Declare password in configuration
::
sed -i \
"s|rootHash_placeholder|${rootPwd}|" \
"${MNT}"/etc/nixos/configuration.nix
#. You can enable NetworkManager for wireless networks and GNOME
desktop environment in ``configuration.nix``.
#. Commit changes to local repo
::
git -C "${MNT}"/etc/nixos commit -asm 'initial installation'
#. Update flake lock file to track latest system version
::
nix flake update --commit-lock-file \
"git+file://${MNT}/etc/nixos"
#. Install system and apply configuration
.. code-block:: sh
nixos-install \
--root "${MNT}" \
--no-root-passwd \
--flake "git+file://${MNT}/etc/nixos#exampleHost"
.. ifconfig:: zfs_root_test
::
if (echo "${DISK}" | grep "/dev/loop"); then
# nixos-install command might fail in a chroot environment
# due to
# https://github.com/NixOS/nixpkgs/issues/220211
# it should be sufficient to test if the configuration builds
nix build "git+file://${MNT}/etc/nixos/#nixosConfigurations.exampleHost.config.system.build.toplevel"
nixos-install \
--root "${MNT}" \
--no-root-passwd \
--flake "git+file://${MNT}/etc/nixos#exampleHost" || true
else
# but with qemu test installation must be fully working
nixos-install \
--root "${MNT}" \
--no-root-passwd \
--flake "git+file://${MNT}/etc/nixos#exampleHost"
fi
.. ifconfig:: zfs_root_test
::
# list contents of boot dir to confirm
# that the mirroring succeeded
find "${MNT}"/boot/efis/ -type d
#. Unmount filesystems
::
umount -Rl "${MNT}"
zpool export -a
#. Reboot
.. code-block:: sh
reboot
.. ifconfig:: zfs_root_test
::
# For qemu test run, power off instead.
# Test run is successful if the vm powers off
if ! (echo "${DISK}" | grep "/dev/loop"); then
poweroff
fi
#. For instructions on maintenance tasks, see `Root on ZFS maintenance
page <../zfs_root_maintenance.html>`__.

View File

@@ -1,60 +0,0 @@
.. highlight:: sh
Preparation
======================
.. contents:: Table of Contents
:local:
**Note for arm64**
Currently there is a bug with the grub installation script. See `here
<https://github.com/NixOS/nixpkgs/issues/222491>`__ for details.
**Note for Immutable Root**
Immutable root can be enabled or disabled by setting
``zfs-root.boot.immutable`` option inside per-host configuration.
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Download `NixOS Live Image
<https://nixos.org/download.html#download-nixos>`__ and boot from it.
#. Connect to the Internet.
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Start SSH server::
systemctl restart sshd
#. Connect from another computer::
ssh root@192.168.1.91
#. Target disk
List available disks with::
find /dev/disk/by-id/
If using virtio as disk bus, use ``/dev/disk/by-path/``.
Declare disk array::
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use::
DISK='/dev/disk/by-id/disk1'
#. Set partition size:
Set swap size, set to 1 if you don't want swap to
take up too much space::
INST_PARTSIZE_SWAP=4
It is recommeneded to set this value higher if your computer has
less than 8GB of memory, otherwise ZFS might fail to build.
Root pool size, use all remaining disk space if not set::
INST_PARTSIZE_RPOOL=

View File

@@ -1,150 +0,0 @@
.. highlight:: sh
System Installation
======================
.. contents:: Table of Contents
:local:
#. Partition the disks::
for i in ${DISK}; do
# wipe flash-based storage device to improve
# performance.
# ALL DATA WILL BE LOST
# blkdiscard -f $i
sgdisk --zap-all $i
sgdisk -n1:1M:+1G -t1:EF00 $i
sgdisk -n2:0:+4G -t2:BE00 $i
sgdisk -n4:0:+${INST_PARTSIZE_SWAP}G -t4:8200 $i
if test -z $INST_PARTSIZE_RPOOL; then
sgdisk -n3:0:0 -t3:BF00 $i
else
sgdisk -n3:0:+${INST_PARTSIZE_RPOOL}G -t3:BF00 $i
fi
sgdisk -a1 -n5:24K:+1000K -t5:EF02 $i
sync && udevadm settle && sleep 3
cryptsetup open --type plain --key-file /dev/random $i-part4 ${i##*/}-part4
mkswap /dev/mapper/${i##*/}-part4
swapon /dev/mapper/${i##*/}-part4
done
#. Create boot pool::
zpool create \
-o compatibility=grub2 \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R /mnt \
bpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part2 ";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
Features enabled with ``-o compatibility=grub2`` can be seen
`here <https://github.com/openzfs/zfs/blob/master/cmd/zpool/compatibility.d/grub2>`__.
#. Create root pool::
zpool create \
-o ashift=12 \
-o autotrim=on \
-R /mnt \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part3 ";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/nixos
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info::
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/nixos
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets, let NixOS declaratively
manage mountpoints with ``mountpoint=legacy``::
zfs create -o mountpoint=legacy rpool/nixos/root
mount -t zfs rpool/nixos/root /mnt/
zfs create -o mountpoint=legacy rpool/nixos/home
mkdir /mnt/home
mount -t zfs rpool/nixos/home /mnt/home
zfs create -o mountpoint=legacy rpool/nixos/var
zfs create -o mountpoint=legacy rpool/nixos/var/lib
zfs create -o mountpoint=legacy rpool/nixos/var/log
zfs create -o mountpoint=none bpool/nixos
zfs create -o mountpoint=legacy bpool/nixos/root
mkdir /mnt/boot
mount -t zfs bpool/nixos/root /mnt/boot
mkdir -p /mnt/var/log
mkdir -p /mnt/var/lib
mount -t zfs rpool/nixos/var/lib /mnt/var/lib
mount -t zfs rpool/nixos/var/log /mnt/var/log
zfs create -o mountpoint=legacy rpool/nixos/empty
zfs snapshot rpool/nixos/empty@start
#. Format and mount ESP::
for i in ${DISK}; do
mkfs.vfat -n EFI ${i}-part1
mkdir -p /mnt/boot/efis/${i##*/}-part1
mount -t vfat ${i}-part1 /mnt/boot/efis/${i##*/}-part1
done

View File

@@ -1,189 +0,0 @@
.. highlight:: sh
System Configuration
======================
.. contents:: Table of Contents
:local:
#. Enter ephemeral nix-shell with git support::
mkdir -p /mnt/etc/
echo DISK=\"$DISK\" > ~/disk
nix-shell -p git
#. Clone template flake configuration::
source ~/disk
git clone https://github.com/ne9z/dotfiles-flake.git /mnt/etc/nixos
git -C /mnt/etc/nixos checkout openzfs-guide
#. Customize configuration to your hardware::
for i in $DISK; do
sed -i \
"s|/dev/disk/by-id/|${i%/*}/|" \
/mnt/etc/nixos/hosts/exampleHost/default.nix
break
done
diskNames=""
for i in $DISK; do
diskNames="$diskNames \"${i##*/}\""
done
sed -i "s|\"bootDevices_placeholder\"|$diskNames|g" \
/mnt/etc/nixos/hosts/exampleHost/default.nix
sed -i "s|\"abcd1234\"|\"$(head -c4 /dev/urandom | od -A none -t x4| sed 's| ||g')\"|g" \
/mnt/etc/nixos/hosts/exampleHost/default.nix
sed -i "s|\"x86_64-linux\"|\"$(uname -m)-linux\"|g" \
/mnt/etc/nixos/flake.nix
#. Set root password::
rootPwd=$(mkpasswd -m SHA-512 -s)
Declare password in configuration::
sed -i \
"s|rootHash_placeholder|${rootPwd}|" \
/mnt/etc/nixos/configuration.nix
#. You can enable NetworkManager for wireless networks and GNOME
desktop environment in ``configuration.nix``.
#. From now on, the complete configuration of the system will be
tracked by git, set a user name and email address to continue::
git -C /mnt/etc/nixos config user.email "you@example.com"
git -C /mnt/etc/nixos config user.name "Alice Q. Nixer"
#. Commit changes to local repo::
git -C /mnt/etc/nixos commit -asm 'initial installation'
#. Update flake lock file to track latest system version::
nix \
--extra-experimental-features 'nix-command flakes' \
flake update --commit-lock-file \
"git+file:///mnt/etc/nixos"
#. Install system and apply configuration::
nixos-install --no-root-passwd --flake "git+file:///mnt/etc/nixos#exampleHost"
#. Exit ephemeral nix shell with git::
exit
#. Unmount filesystems::
umount -Rl /mnt
zpool export -a
#. Reboot::
reboot
Replace a failed disk
=====================
When a disk fails in a mirrored setup, the disk can be
replaced with the following procedure.
#. Shutdown the computer.
#. Replace the failed disk with another disk. The
replacement should be at least the same size or
larger than the failed disk.
#. Boot the computer. When a disk fails, the system will boot, albeit
several minutes slower than normal. This is due to
the initrd and systemd designed to only import a pool
in degraded state after a 90s timeout. Swap
partition on that disk will also fail.
#. Launch a ephemeral nix shell with gptfdisk::
nix-shell -p gptfdisk
#. Identify the bad disk and a working old disk::
ZPOOL_VDEV_NAME_PATH=1 zpool status
pool: bpool
status: DEGRADED
action: Replace the device using 'zpool replace'.
...
config: bpool
mirror-0
2387489723748 UNAVAIL 0 0 0 was /dev/disk/by-id/ata-BAD-part2
/dev/disk/by-id/ata-OLD-part2 ONLINE 0 0 0
#. Store the bad disk and a working old disk in a variable, omit the partition number ``-partN``::
BAD=/dev/disk/by-id/ata-BAD
OLD=/dev/disk/by-id/ata-OLD
#. Identify the new disk::
find /dev/disk/by-id/
/dev/disk/by-id/ata-OLD-part1
/dev/disk/by-id/ata-OLD-part2
...
/dev/disk/by-id/ata-OLD-part5
/dev/disk/by-id/ata-NEW <-- new disk w/o partition table
#. Store the new disk in a variable::
NEW=/dev/disk/by-id/ata-NEW
#. Replicate partition table on the new disk::
sgdisk -Z $NEW
sgdisk --backup=backup $OLD
sgdisk --load-backup=backup $NEW
sgdisk --randomize-guids $NEW
#. If the new disk is larger than the old disk, expand root pool partition size::
sgdisk --delete=3 $NEW
# expand to all remaining disk space
sgdisk -n3:0:0 -t3:BF00 $NEW
Note that this space will only become available once all disks in the mirrored pool are
replaced with larger disks.
#. Format and mount EFI system partition::
mkfs.vfat -n EFI ${NEW}-part1
mkdir -p /boot/efis/${NEW##*/}-part1
mount -t vfat ${NEW}-part1 /boot/efis/${NEW##*/}-part1
#. Replace failed disk in pool::
zpool offline bpool ${BAD}-part2
zpool offline rpool ${BAD}-part3
zpool replace bpool ${BAD}-part2 ${NEW}-part2
zpool replace rpool ${BAD}-part3 ${NEW}-part3
zpool online bpool ${NEW}-part2
zpool online rpool ${NEW}-part3
Let the new disk resilver. Check status with ``zpool status``.
#. Update NixOS system configuration and commit changes to git repo::
sed -i "s|${BAD##*/}|${NEW##*/}|" /etc/nixos/hosts/exampleHost/default.nix
git -C /etc/nixos commit
#. Apply the updated NixOS system configuration, reinstall bootloader, then reboot::
nixos-rebuild boot --install-bootloader
reboot

View File

@@ -45,8 +45,10 @@ to modprobe until you make these changes and reboot.
tee -a /etc/nixos/zfs.nix <<EOF
{ config, pkgs, ... }:
{ boot.supportedFilesystems = [ "zfs" ];
networking.hostId = (builtins.substring 0 8 (builtins.readFile "/etc/machine-id"));
{
boot.supportedFilesystems = [ "zfs" ];
networking.hostId = "$(head -c4 /dev/urandom | od -A none -t x4 | sed 's| ||g')";
boot.zfs.forceImportRoot = false;
}
EOF
@@ -56,40 +58,8 @@ to modprobe until you make these changes and reboot.
Root on ZFS
-----------
ZFS can be used as root file system for NixOS.
An installation guide is available.
Start from "Preparation".
.. toctree::
:maxdepth: 1
:glob:
:maxdepth: 1
:glob:
Root on ZFS/*
Contribute
----------
#. Fork and clone `this repo <https://github.com/openzfs/openzfs-docs>`__.
#. Launch an ephemeral nix-shell with the following packages::
nix-shell -p python39 python39Packages.pip gnumake \
python39Packages.setuptools
#. Create python virtual environment and install packages::
cd openzfs-docs
python -m venv .venv
source .venv/bin/activate
pip install -r docs/requirements.txt
#. Make your changes.
#. Test::
make html
sensible-browser _build/html/index.html
#. ``git commit --signoff`` to a branch, ``git push``, and create a pull
request. Mention @ne9z.
*

View File

@@ -1,11 +0,0 @@
RHEL Root on ZFS
=======================================
Start from "Preparation".
Contents
--------
.. toctree::
:maxdepth: 2
:glob:
RHEL-based distro Root on ZFS/*

View File

@@ -1,74 +0,0 @@
.. highlight:: sh
Preparation
======================
.. contents:: Table of Contents
:local:
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Download a variant of `AlmaLinux Minimal Live ISO
<https://repo.almalinux.org/almalinux/9.1/live/x86_64/>`__
and boot from it.
#. Connect to the Internet.
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Start SSH server::
echo PermitRootLogin yes >> /etc/ssh/sshd_config
systemctl restart sshd
#. Connect from another computer::
ssh root@192.168.1.19
#. Target disk
List available disks with::
find /dev/disk/by-id/
If using virtio as disk bus, use ``/dev/disk/by-path/``.
Declare disk array::
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use::
DISK='/dev/disk/by-id/disk1'
#. Set partition size:
Set swap size, set to 1 if you don't want swap to
take up too much space::
INST_PARTSIZE_SWAP=4
Root pool size, use all remaining disk space if not set::
INST_PARTSIZE_RPOOL=
#. Temporarily set SELinux to permissive in live environment::
setenforce 0
SELinux will be enabled on the installed system.
#. Add ZFS repo and install ZFS inside live system::
dnf install -y https://zfsonlinux.org/epel/zfs-release-2-2$(rpm --eval "%{dist}").noarch.rpm
rpm -e --nodeps zfs-fuse || true
source /etc/os-release
export VERSION_ID
dnf config-manager --disable zfs
dnf config-manager --enable zfs-kmod
dnf install -y zfs
modprobe zfs
#. Install partition tool and arch-install-scripts::
dnf install -y epel-release
dnf install -y gdisk dosfstools cryptsetup
dnf download arch-install-scripts
rpm -i --nodeps arch-install-scripts*.rpm
dnf remove -y epel-release

View File

@@ -1,151 +0,0 @@
.. highlight:: sh
System Installation
======================
.. contents:: Table of Contents
:local:
#. Partition the disks::
for i in ${DISK}; do
# wipe flash-based storage device to improve
# performance.
# ALL DATA WILL BE LOST
# blkdiscard -f $i
sgdisk --zap-all $i
sgdisk -n1:1M:+1G -t1:EF00 $i
sgdisk -n2:0:+4G -t2:BE00 $i
sgdisk -n4:0:+${INST_PARTSIZE_SWAP}G -t4:8200 $i
if test -z $INST_PARTSIZE_RPOOL; then
sgdisk -n3:0:0 -t3:BF00 $i
else
sgdisk -n3:0:+${INST_PARTSIZE_RPOOL}G -t3:BF00 $i
fi
sgdisk -a1 -n5:24K:+1000K -t5:EF02 $i
sync && udevadm settle && sleep 3
cryptsetup open --type plain --key-file /dev/random $i-part4 ${i##*/}-part4
mkswap /dev/mapper/${i##*/}-part4
swapon /dev/mapper/${i##*/}-part4
done
#. Create boot pool::
zpool create \
-o compatibility=grub2 \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R /mnt \
bpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part2 ";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
Features enabled with ``-o compatibility=grub2`` can be seen
`here <https://github.com/openzfs/zfs/blob/master/cmd/zpool/compatibility.d/grub2>`__.
#. Create root pool::
zpool create \
-o ashift=12 \
-o autotrim=on \
-R /mnt \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf "$i-part3 ";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/alma
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info::
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/alma
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets, let Alma declaratively
manage mountpoints with ``mountpoint=legacy``::
zfs create -o mountpoint=/ -o canmount=noauto rpool/alma/root
zfs mount rpool/alma/root
zfs create -o mountpoint=legacy rpool/alma/home
mkdir /mnt/home
mount -t zfs rpool/alma/home /mnt/home
zfs create -o mountpoint=legacy rpool/alma/var
zfs create -o mountpoint=legacy rpool/alma/var/lib
zfs create -o mountpoint=legacy rpool/alma/var/log
zfs create -o mountpoint=none bpool/alma
zfs create -o mountpoint=legacy bpool/alma/root
mkdir /mnt/boot
mount -t zfs bpool/alma/root /mnt/boot
mkdir -p /mnt/var/log
mkdir -p /mnt/var/lib
mount -t zfs rpool/alma/var/lib /mnt/var/lib
mount -t zfs rpool/alma/var/log /mnt/var/log
#. Format and mount ESP::
for i in ${DISK}; do
mkfs.vfat -n EFI ${i}-part1
mkdir -p /mnt/boot/efis/${i##*/}-part1
mount -t vfat ${i}-part1 /mnt/boot/efis/${i##*/}-part1
done
mkdir -p /mnt/boot/efi
mount -t vfat $(echo $DISK | cut -f1 -d' ')-part1 /mnt/boot/efi

View File

@@ -1,80 +0,0 @@
.. highlight:: sh
System Configuration
======================
.. contents:: Table of Contents
:local:
#. Generate fstab::
mkdir -p /mnt/etc/
genfstab -t PARTUUID /mnt | grep -v swap > /mnt/etc/fstab
sed -i "s|vfat.*rw|vfat rw,x-systemd.idle-timeout=1min,x-systemd.automount,noauto,nofail|" /mnt/etc/fstab
#. Install basic system packages::
dnf --installroot=/mnt \
--releasever=$VERSION_ID -y install \
@core grub2-efi-x64 \
grub2-pc-modules grub2-efi-x64-modules \
shim-x64 efibootmgr \
kernel-$(uname -r)
dnf --installroot=/mnt \
--releasever=$VERSION_ID -y install \
https://zfsonlinux.org/epel/zfs-release-2-2$(rpm --eval "%{dist}").noarch.rpm
dnf config-manager --installroot=/mnt --disable zfs
dnf config-manager --installroot=/mnt --enable zfs-kmod
dnf --installroot=/mnt --releasever=$VERSION_ID \
-y install zfs zfs-dracut
#. Configure dracut::
echo 'add_dracutmodules+=" zfs "' >> /mnt/etc/dracut.conf.d/zfs.conf
echo 'forced_drivers+=" zfs "' >> /mnt/etc/dracut.conf.d/zfs.conf
if grep mpt3sas /proc/modules; then
echo 'forced_drivers+=" mpt3sas "' >> /mnt/etc/dracut.conf.d/zfs.conf
fi
if grep virtio_blk /proc/modules; then
echo 'filesystems+=" virtio_blk "' >> /mnt/etc/dracut.conf.d/fs.conf
fi
#. Generate host id::
zgenhostid -f -o /mnt/etc/hostid
#. Install locale package, example for English locale::
dnf --installroot=/mnt install -y glibc-minimal-langpack glibc-langpack-en
#. By default SSH server is enabled, allowing root login by password,
disable SSH server::
systemctl disable sshd --root=/mnt
systemctl enable firewalld --root=/mnt
#. Chroot::
history -w /mnt/home/sys-install-pre-chroot.txt
arch-chroot /mnt /usr/bin/env DISK="$DISK" bash --login
#. For SELinux, relabel filesystem on reboot::
fixfiles -F onboot
#. Generate initrd::
for directory in /lib/modules/*; do
kernel_version=$(basename $directory)
dracut --force --kver $kernel_version
done
#. Set locale, keymap, timezone, hostname and root password::
rm -f /etc/localtime
systemd-firstboot --prompt --root-password=PASSWORD --force
#. Set root password, the password set earlier does not work due to SELinux::
passwd

View File

@@ -1,81 +0,0 @@
.. highlight:: sh
Bootloader
======================
.. contents:: Table of Contents
:local:
#. Apply GRUB workaround::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
source /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
This ``sed`` workaround needs to be applied for every
GRUB update, as the update will overwrite the
changes.
#. Install GRUB::
echo 'GRUB_ENABLE_BLSCFG=false' >> /etc/default/grub
mkdir -p /boot/efi/almalinux/grub-bootdir/i386-pc/
mkdir -p /boot/efi/almalinux/grub-bootdir/x86_64-efi/
for i in ${DISK}; do
grub2-install --target=i386-pc --boot-directory \
/boot/efi/almalinux/grub-bootdir/i386-pc/ $i
done
cp -r /usr/lib/grub/x86_64-efi/ /boot/efi/EFI/almalinux/
grub2-mkconfig -o /boot/efi/EFI/almalinux/grub.cfg
grub2-mkconfig -o /boot/efi/almalinux/grub-bootdir/i386-pc/grub2/grub.cfg
mkdir -p /boot/grub2
grub2-mkconfig -o /boot/grub2/grub.cfg
#. For both legacy and EFI booting: mirror ESP content::
unalias -a
ESP_MIRROR=$(mktemp -d)
cp -r /boot/efi/EFI $ESP_MIRROR
for i in /boot/efis/*; do
cp -r $ESP_MIRROR/EFI $i
done
rm -rf $ESP_MIRROR
#. Note: you need to regenerate GRUB menu after kernel
updates, otherwise computer will still boot old
kernel on reboot::
grub2-mkconfig -o /boot/efi/EFI/almalinux/grub.cfg
grub2-mkconfig -o /boot/efi/almalinux/grub-bootdir/i386-pc/grub2/grub.cfg
grub2-mkconfig -o /boot/grub2/grub.cfg
Finish Installation
~~~~~~~~~~~~~~~~~~~~
#. Exit chroot::
exit
#. Export pools::
umount -Rl /mnt
zpool export -a
#. Reboot::
reboot
Post installaion
~~~~~~~~~~~~~~~~
#. Install package groups::
dnf group list --hidden -v # query package groups
dnf group install gnome-desktop
#. Add new user, configure swap.
#. You can create a snapshot of the newly installed
system for later rollback,
see `this page <https://openzfs.github.io/openzfs-docs/Getting%20Started/Arch%20Linux/Root%20on%20ZFS/6-create-boot-environment.html>`__.

View File

@@ -0,0 +1,662 @@
.. highlight:: sh
.. ifconfig:: zfs_root_test
# For the CI/CD test run of this guide,
# Enable verbose logging of bash shell and fail immediately when
# a commmand fails.
set -vxeuf
distro=${1}
cp /etc/resolv.conf ./"rootfs-${distro}"/etc/resolv.conf
arch-chroot ./"rootfs-${distro}" sh <<-'ZFS_ROOT_GUIDE_TEST'
set -vxeuf
# install alpine setup scripts
apk update
apk add alpine-conf curl
.. In this document, there are three types of code-block markups:
``::`` are commands intended for both the vm test and the users
``.. ifconfig:: zfs_root_test`` are commands intended only for vm test
``.. code-block:: sh`` are commands intended only for users
Rocky Linux Root on ZFS
=======================================
**Customization**
Unless stated otherwise, it is not recommended to customize system
configuration before reboot.
Preparation
---------------------------
#. Disable Secure Boot. ZFS modules can not be loaded if Secure Boot is enabled.
#. Because the kernel of latest Live CD might be incompatible with
ZFS, we will use Alpine Linux Extended, which ships with ZFS by
default.
Download latest extended variant of `Alpine Linux
live image
<https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso>`__,
verify `checksum <https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-extended-3.17.3-x86_64.iso.asc>`__
and boot from it.
.. code-block:: sh
gpg --auto-key-retrieve --keyserver hkps://keyserver.ubuntu.com --verify alpine-extended-*.asc
dd if=input-file of=output-file bs=1M
.. ifconfig:: zfs_root_test
# check whether the download page exists
# alpine version must be in sync with ci/cd test chroot tarball
#. Login as root user. There is no password.
#. Configure Internet
.. code-block:: sh
setup-interfaces -r
# You must use "-r" option to start networking services properly
# example:
network interface: wlan0
WiFi name: <ssid>
ip address: dhcp
<enter done to finish network config>
manual netconfig: n
#. If you are using wireless network and it is not shown, see `Alpine
Linux wiki
<https://wiki.alpinelinux.org/wiki/Wi-Fi#wpa_supplicant>`__ for
further details. ``wpa_supplicant`` can be installed with ``apk
add wpa_supplicant`` without internet connection.
#. Configure SSH server
.. code-block:: sh
setup-sshd
# example:
ssh server: openssh
allow root: "prohibit-password" or "yes"
ssh key: "none" or "<public key>"
#. Set root password or ``/root/.ssh/authorized_keys``.
#. Connect from another computer
.. code-block:: sh
ssh root@192.168.1.91
#. Configure NTP client for time synchronization
.. code-block:: sh
setup-ntp busybox
.. ifconfig:: zfs_root_test
# this step is unnecessary for chroot and returns 1 when executed
#. Set up apk-repo. A list of available mirrors is shown.
Press space bar to continue
.. code-block:: sh
setup-apkrepos
#. Throughout this guide, we use predictable disk names generated by
udev
.. code-block:: sh
apk update
apk add eudev
setup-devd udev
.. ifconfig:: zfs_root_test
# for some reason, udev is extremely slow in chroot
# it is not needed for chroot anyway. so, skip this step
#. Target disk
List available disks with
.. code-block:: sh
find /dev/disk/by-id/
If virtio is used as disk bus, power off the VM and set serial numbers for disk.
For QEMU, use ``-drive format=raw,file=disk2.img,serial=AaBb``.
For libvirt, edit domain XML. See `this page
<https://bugzilla.redhat.com/show_bug.cgi?id=1245013>`__ for examples.
Declare disk array
.. code-block:: sh
DISK='/dev/disk/by-id/ata-FOO /dev/disk/by-id/nvme-BAR'
For single disk installation, use
.. code-block:: sh
DISK='/dev/disk/by-id/disk1'
.. ifconfig:: zfs_root_test
# for github test run, use chroot and loop devices
DISK="$(losetup -a| grep rhel | cut -f1 -d: | xargs -t -I '{}' printf '{} ')"
#. Set a mount point
::
MNT=$(mktemp -d)
#. Set partition size:
Set swap size in GB, set to 1 if you don't want swap to
take up too much space
.. code-block:: sh
SWAPSIZE=4
.. ifconfig:: zfs_root_test
# For the test run, use 1GB swap space to avoid hitting CI/CD
# quota
SWAPSIZE=1
Set how much space should be left at the end of the disk, minimum 1GB
::
RESERVE=1
#. Install ZFS support from live media::
apk add zfs
#. Install partition tool
::
apk add parted e2fsprogs cryptsetup util-linux
System Installation
---------------------------
#. Partition the disks.
Note: you must clear all existing partition tables and data structures from the disks,
especially those with existing ZFS pools or mdraid and those that have been used as live media.
Those data structures may interfere with boot process.
For flash-based storage, this can be done by uncommenting the blkdiscard command below:
::
partition_disk () {
local disk="${1}"
#blkdiscard -f "${disk}"
parted --script --align=optimal "${disk}" -- \
mklabel gpt \
mkpart EFI 2MiB 1GiB \
mkpart bpool 1GiB 5GiB \
mkpart rpool 5GiB -$((SWAPSIZE + RESERVE))GiB \
mkpart swap -$((SWAPSIZE + RESERVE))GiB -"${RESERVE}"GiB \
mkpart BIOS 1MiB 2MiB \
set 1 esp on \
set 5 bios_grub on \
set 5 legacy_boot on
partprobe "${disk}"
}
for i in ${DISK}; do
partition_disk "${i}"
done
.. ifconfig:: zfs_root_test
::
# When working with GitHub chroot runners, we are using loop
# devices as installation target. However, the alias support for
# loop device was just introduced in March 2023. See
# https://github.com/systemd/systemd/pull/26693
# For now, we will create the aliases maunally as a workaround
looppart="1 2 3 4 5"
for i in ${DISK}; do
for j in ${looppart}; do
if test -e "${i}p${j}"; then
ln -s "${i}p${j}" "${i}-part${j}"
fi
done
done
#. Setup encrypted swap. This is useful if the available memory is
small::
for i in ${DISK}; do
cryptsetup open --type plain --key-file /dev/random "${i}"-part4 "${i##*/}"-part4
mkswap /dev/mapper/"${i##*/}"-part4
swapon /dev/mapper/"${i##*/}"-part4
done
#. Load ZFS kernel module
.. code-block:: sh
modprobe zfs
#. Create boot pool
::
# shellcheck disable=SC2046
zpool create -d \
-o feature@async_destroy=enabled \
-o feature@bookmarks=enabled \
-o feature@embedded_data=enabled \
-o feature@empty_bpobj=enabled \
-o feature@enabled_txg=enabled \
-o feature@extensible_dataset=enabled \
-o feature@filesystem_limits=enabled \
-o feature@hole_birth=enabled \
-o feature@large_blocks=enabled \
-o feature@lz4_compress=enabled \
-o feature@spacemap_histogram=enabled \
-o ashift=12 \
-o autotrim=on \
-O acltype=posixacl \
-O canmount=off \
-O compression=lz4 \
-O devices=off \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/boot \
-R "${MNT}" \
bpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part2";
done)
If not using a multi-disk setup, remove ``mirror``.
You should not need to customize any of the options for the boot pool.
GRUB does not support all of the zpool features. See ``spa_feature_names``
in `grub-core/fs/zfs/zfs.c
<http://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c#n276>`__.
This step creates a separate boot pool for ``/boot`` with the features
limited to only those that GRUB supports, allowing the root pool to use
any/all features.
#. Create root pool
::
# shellcheck disable=SC2046
zpool create \
-o ashift=12 \
-o autotrim=on \
-R "${MNT}" \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
rpool \
mirror \
$(for i in ${DISK}; do
printf '%s ' "${i}-part3";
done)
If not using a multi-disk setup, remove ``mirror``.
#. Create root system container:
- Unencrypted
::
zfs create \
-o canmount=off \
-o mountpoint=none \
rpool/rhel
- Encrypted:
Pick a strong password. Once compromised, changing password will not keep your
data safe. See ``zfs-change-key(8)`` for more info
.. code-block:: sh
zfs create \
-o canmount=off \
-o mountpoint=none \
-o encryption=on \
-o keylocation=prompt \
-o keyformat=passphrase \
rpool/rhel
You can automate this step (insecure) with: ``echo POOLPASS | zfs create ...``.
Create system datasets,
manage mountpoints with ``mountpoint=legacy``
::
zfs create -o canmount=noauto -o mountpoint=/ rpool/rhel/root
zfs mount rpool/rhel/root
zfs create -o mountpoint=legacy rpool/rhel/home
mkdir "${MNT}"/home
mount -t zfs rpool/rhel/home "${MNT}"/home
zfs create -o mountpoint=legacy rpool/rhel/var
zfs create -o mountpoint=legacy rpool/rhel/var/lib
zfs create -o mountpoint=legacy rpool/rhel/var/log
zfs create -o mountpoint=none bpool/rhel
zfs create -o mountpoint=legacy bpool/rhel/root
mkdir "${MNT}"/boot
mount -t zfs bpool/rhel/root "${MNT}"/boot
mkdir -p "${MNT}"/var/log
mkdir -p "${MNT}"/var/lib
mount -t zfs rpool/rhel/var/lib "${MNT}"/var/lib
mount -t zfs rpool/rhel/var/log "${MNT}"/var/log
#. Format and mount ESP
::
for i in ${DISK}; do
mkfs.vfat -n EFI "${i}"-part1
mkdir -p "${MNT}"/boot/efis/"${i##*/}"-part1
mount -t vfat -o iocharset=iso8859-1 "${i}"-part1 "${MNT}"/boot/efis/"${i##*/}"-part1
done
mkdir -p "${MNT}"/boot/efi
mount -t vfat -o iocharset=iso8859-1 "$(echo "${DISK}" | sed "s|^ *||" | cut -f1 -d' '|| true)"-part1 "${MNT}"/boot/efi
System Configuration
---------------------------
#. Download and extract minimal Rhel root filesystem::
apk add curl
curl --fail-early --fail -L \
https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-Container-Base-9.1-20230215.0.x86_64.tar.xz \
-o rootfs.tar.gz
curl --fail-early --fail -L \
https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-Container-Base-9.1-20230215.0.x86_64.tar.xz.CHECKSUM \
-o checksum
# BusyBox sha256sum treats all lines in the checksum file
# as checksums and requires two spaces " "
# between filename and checksum
grep 'Container-Base' checksum \
| grep '^SHA256' \
| sed -E 's|.*= ([a-z0-9]*)$|\1 rootfs.tar.gz|' > ./sha256checksum
sha256sum -c ./sha256checksum
tar x -C "${MNT}" -af rootfs.tar.gz
#. Enable community repo
.. code-block:: sh
sed -i '/edge/d' /etc/apk/repositories
sed -i -E 's/#(.*)community/\1community/' /etc/apk/repositories
#. Generate fstab::
apk add arch-install-scripts
genfstab -t PARTUUID "${MNT}" \
| grep -v swap \
| sed "s|vfat.*rw|vfat rw,x-systemd.idle-timeout=1min,x-systemd.automount,noauto,nofail|" \
> "${MNT}"/etc/fstab
#. Chroot
.. code-block:: sh
cp /etc/resolv.conf "${MNT}"/etc/resolv.conf
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" bash
.. ifconfig:: zfs_root_test
cp /etc/resolv.conf "${MNT}"/etc/resolv.conf
for i in /dev /proc /sys; do mkdir -p "${MNT}"/"${i}"; mount --rbind "${i}" "${MNT}"/"${i}"; done
chroot "${MNT}" /usr/bin/env DISK="${DISK}" bash <<-'ZFS_ROOT_NESTED_CHROOT'
set -vxeuf
#. Unset all shell aliases, which can interfere with installation::
unalias -a
#. Install base packages
.. code-block:: sh
dnf -y install --allowerasing @core grub2-efi-x64 \
grub2-pc grub2-pc-modules grub2-efi-x64-modules shim-x64 \
efibootmgr kernel-core
.. ifconfig:: zfs_root_test
# skip installing firmware in test
dnf -y install --allowerasing --setopt=install_weak_deps=False \
@core grub2-efi-x64 \
grub2-pc grub2-pc-modules grub2-efi-x64-modules shim-x64 \
efibootmgr kernel-core
#. Install ZFS packages::
dnf install -y https://zfsonlinux.org/epel/zfs-release-2-2"$(rpm --eval "%{dist}"|| true)".noarch.rpm
dnf config-manager --disable zfs
dnf config-manager --enable zfs-kmod
dnf install -y zfs zfs-dracut
#. Add zfs modules to dracut::
echo 'add_dracutmodules+=" zfs "' >> /etc/dracut.conf.d/zfs.conf
echo 'force_drivers+=" zfs "' >> /etc/dracut.conf.d/zfs.conf
#. Add other drivers to dracut::
if grep mpt3sas /proc/modules; then
echo 'force_drivers+=" mpt3sas "' >> /etc/dracut.conf.d/zfs.conf
fi
if grep virtio_blk /proc/modules; then
echo 'filesystems+=" virtio_blk "' >> /etc/dracut.conf.d/fs.conf
fi
#. Build initrd::
find -D exec /lib/modules -maxdepth 1 \
-mindepth 1 -type d \
-exec sh -vxc \
'if test -e "$1"/modules.dep;
then kernel=$(basename "$1");
dracut --verbose --force --kver "${kernel}";
fi' sh {} \;
#. For SELinux, relabel filesystem on reboot::
fixfiles -F onboot
#. Generate host id::
zgenhostid -f -o /etc/hostid
#. Install locale package, example for English locale::
dnf install -y glibc-minimal-langpack glibc-langpack-en
#. Set locale, keymap, timezone, hostname
::
rm -f /etc/localtime
systemd-firstboot \
--force \
--locale=en_US.UTF-8 \
--timezone=Etc/UTC \
--hostname=testhost \
--keymap=us
#. Set root passwd
::
printf 'root:yourpassword' | chpasswd
Bootloader
---------------------------
#. Apply GRUB workaround
::
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile.d/zpool_vdev_name_path.sh
# shellcheck disable=SC1091
. /etc/profile.d/zpool_vdev_name_path.sh
# GRUB fails to detect rpool name, hard code as "rpool"
sed -i "s|rpool=.*|rpool=rpool|" /etc/grub.d/10_linux
This workaround needs to be applied for every GRUB update, as the
update will overwrite the changes.
#. RHEL uses Boot Loader Specification module for GRUB,
which does not support ZFS. Disable it::
echo 'GRUB_ENABLE_BLSCFG=false' >> /etc/default/grub
This means that you need to regenerate GRUB menu and mirror them
after every kernel update, otherwise computer will still boot old
kernel on reboot.
#. Install GRUB::
mkdir -p /boot/efi/rocky/grub-bootdir/i386-pc/
for i in ${DISK}; do
grub2-install --target=i386-pc --boot-directory \
/boot/efi/rocky/grub-bootdir/i386-pc/ "${i}"
done
dnf reinstall -y grub2-efi-x64 shim-x64
cp -r /usr/lib/grub/x86_64-efi/ /boot/efi/EFI/rocky/
#. Generate GRUB menu::
mkdir -p /boot/grub2
grub2-mkconfig -o /boot/grub2/grub.cfg
cp /boot/grub2/grub.cfg \
/boot/efi/efi/rocky/grub.cfg
cp /boot/grub2/grub.cfg \
/boot/efi/rocky/grub-bootdir/i386-pc/grub2/grub.cfg
.. ifconfig:: zfs_root_test
::
find /boot/efis/ -name "grub.cfg" -print0 \
| xargs -t -0I '{}' grub2-script-check -v '{}'
#. For both legacy and EFI booting: mirror ESP content::
espdir=$(mktemp -d)
find /boot/efi/ -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' cp -r '{}' "${espdir}"
find "${espdir}" -maxdepth 1 -mindepth 1 -type d -print0 \
| xargs -t -0I '{}' sh -vxc "find /boot/efis/ -maxdepth 1 -mindepth 1 -type d -print0 | xargs -t -0I '[]' cp -r '{}' '[]'"
#. Exit chroot
.. code-block:: sh
exit
.. ifconfig:: zfs_root_test
# nested chroot ends here
ZFS_ROOT_NESTED_CHROOT
.. ifconfig:: zfs_root_test
::
# list contents of boot dir to confirm
# that the mirroring succeeded
find "${MNT}"/boot/efis/ -type d > list_of_efi_dirs
for i in ${DISK}; do
if ! grep "${i##*/}-part1/efi\|${i##*/}-part1/EFI" list_of_efi_dirs; then
echo "disk ${i} not found in efi system partition, installation error";
cat list_of_efi_dirs
exit 1
fi
done
#. Unmount filesystems and create initial system snapshot
You can later create a boot environment from this snapshot.
See `Root on ZFS maintenance page <../zfs_root_maintenance.html>`__.
::
umount -Rl "${MNT}"
zfs snapshot -r rpool@initial-installation
zfs snapshot -r bpool@initial-installation
#. Export all pools
.. code-block:: sh
zpool export -a
.. ifconfig:: zfs_root_test
# we are now inside a chroot, where the export will fail
# export pools when we are outside chroot
#. Reboot
.. code-block:: sh
reboot
#. For BIOS-legacy boot users only: the GRUB bootloader installed
might be unusable. In this case, see Bootloader Recovery section
in `Root on ZFS maintenance page <../zfs_root_maintenance.html>`__.
This issue is not related to Alpine Linux chroot, as Arch Linux
installed with this method does not have this issue.
UEFI bootloader is not affected by this issue.
.. ifconfig:: zfs_root_test
# chroot ends here
ZFS_ROOT_GUIDE_TEST
Post installaion
---------------------------
#. Install package groups
.. code-block:: sh
dnf group list --hidden -v # query package groups
dnf group install gnome-desktop
#. Add new user, configure swap.

View File

@@ -71,16 +71,6 @@ And for EL8 and newer, separately run::
dnf install -y kernel-devel
dnf install -y zfs
It might be necessary to rebuild the ZFS modules::
for directory in /lib/modules/*; do
kernel_version=$(basename $directory)
dkms autoinstall -k $kernel_version
done
If for some reason, the ZFS kernel modules are not successfully built,
you can also run the above command to debug the problem.
.. note::
When switching from DKMS to kABI-tracking kmods first uninstall the
existing DKMS packages. This should remove the kernel modules for all
@@ -126,8 +116,8 @@ time you can create such configuration in ``/etc/modules-load.d``::
Previous minor EL releases
--------------------------
The current release package uses `$releasever` rather than specify a particular
minor release as previous release packages did. Typically `$releasever` will
The current release package uses `"${releasever}"` rather than specify a particular
minor release as previous release packages did. Typically `"${releasever}"` will
resolve to just the major version (e.g. `8`), and the resulting repository URL
will be aliased to the current minor version (e.g. `8.7`), but you can specify
`--releasever` to use previous repositories. ::
@@ -175,15 +165,13 @@ And for EL8 and newer::
Use *zfs-testing* for DKMS packages and *zfs-testing-kmod*
for kABI-tracking kmod packages.
RHEL-based distro Root on ZFS
-------------------------------
Start from "Preparation".
Root on ZFS
-----------
.. toctree::
:maxdepth: 1
:glob:
:maxdepth: 1
:glob:
RHEL-based distro Root on ZFS/*
*
.. _kABI-tracking kmod: https://elrepoproject.blogspot.com/2016/02/kabi-tracking-kmod-packages.html
.. _DKMS: https://en.wikipedia.org/wiki/Dynamic_Kernel_Module_Support

View File

@@ -21,3 +21,4 @@ documentation <https://pthree.org/2012/04/17/install-zfs-on-debian-gnulinux/>`__
openSUSE/index
RHEL-based distro/index
Ubuntu/index
zfs_root_maintenance

View File

@@ -0,0 +1,310 @@
.. highlight:: sh
Root on ZFS maintenance
========================
Boot Environment
----------------
This section is compatible with Alpine, Arch, Fedora and RHEL guides.
Not necessary for NixOS. Incompatible with Ubuntu and Debian guides.
Note: boot environments as described below are intended only for
system recovery purposes, that is, you boot into the alternate boot
environment once to perform system recovery on the default datasets:
.. code-block:: sh
rpool/distro/root
bpool/distro/root
then reboot to those datasets once you have successfully recovered the
system.
Switching the default boot environment complicates bootloader recovery
and other maintenance operations and is thus currently not supported.
#. If you want to use the ``@initial-installation`` snapshot created
during installation, set ``my_boot_env=initial-installation`` and
skip Step 3 and 4.
#. Identify which dataset is currently mounted as root
``/`` and boot ``/boot``
::
set -x
boot_dataset=$(df -P /boot | tail -n1 | cut -f1 -d' ' || true )
root_dataset=$(df -P / | tail -n1 | cut -f1 -d' ' || true )
#. Choose a name for the new boot environment
::
my_boot_env=backup
#. Take snapshots of the ``/`` and ``/boot`` datasets
::
zfs snapshot "${boot_dataset}"@"${my_boot_env}"
zfs snapshot "${root_dataset}"@"${my_boot_env}"
#. Create clones from read-only snapshots
::
new_root_dataset="${root_dataset%/*}"/"${my_boot_env}"
new_boot_dataset="${boot_dataset%/*}"/"${my_boot_env}"
zfs clone -o canmount=noauto \
-o mountpoint=/ \
"${root_dataset}"@"${my_boot_env}" \
"${new_root_dataset}"
zfs clone -o canmount=noauto \
-o mountpoint=legacy \
"${boot_dataset}"@"${my_boot_env}" \
"${new_boot_dataset}"
#. Mount clone and update file system table (fstab)
::
MNT=$(mktemp -d)
mount -t zfs -o zfsutil "${new_root_dataset}" "${MNT}"
mount -t zfs "${new_boot_dataset}" "${MNT}"/boot
sed -i s,"${root_dataset}","${new_root_dataset}",g "${MNT}"/etc/fstab
sed -i s,"${boot_dataset}","${new_boot_dataset}",g "${MNT}"/etc/fstab
if test -f "${MNT}"/boot/grub/grub.cfg; then
is_grub2=n
sed -i s,"${boot_dataset#bpool/}","${new_boot_dataset#bpool/}",g "${MNT}"/boot/grub/grub.cfg
elif test -f "${MNT}"/boot/grub2/grub.cfg; then
is_grub2=y
sed -i s,"${boot_dataset#bpool/}","${new_boot_dataset#bpool/}",g "${MNT}"/boot/grub2/grub.cfg
else
echo "ERROR: no grub menu found!"
exit 1
fi
Do not proceed if no grub menu was found!
#. Unmount clone
::
umount -Rl "${MNT}"
#. Add new boot environment as GRUB menu entry
::
echo "# ${new_boot_dataset}" > new_boot_env_entry_"${new_boot_dataset##*/}"
printf '\n%s' "menuentry 'Boot environment ${new_boot_dataset#bpool/} from ${boot_dataset#bpool/}' " \
>> new_boot_env_entry_"${new_boot_dataset##*/}"
if [ "${is_grub2}" = y ]; then
# shellcheck disable=SC2016
printf '{ search --set=drive1 --label bpool; configfile ($drive1)/%s@/grub2/grub.cfg; }' \
"${new_boot_dataset#bpool/}" >> new_boot_env_entry_"${new_boot_dataset##*/}"
else
# shellcheck disable=SC2016
printf '{ search --set=drive1 --label bpool; configfile ($drive1)/%s@/grub/grub.cfg; }' \
"${new_boot_dataset#bpool/}" >> new_boot_env_entry_"${new_boot_dataset##*/}"
fi
find /boot/efis/ -name "grub.cfg" -print0 \
| xargs -t -0I '{}' sh -vxc "tail -n1 new_boot_env_entry_${new_boot_dataset##*/} >> '{}'"
.. ifconfig:: zfs_root_test
::
find /boot/efis/ -name "grub.cfg" -print0 \
| xargs -t -0I '{}' grub-script-check -v '{}'
#. Do not delete ``new_boot_env_entry_"${new_boot_dataset##*/}"`` file. It
is needed when you want to remove the new boot environment from
GRUB menu later.
#. After reboot, select boot environment entry from GRUB
menu to boot from the clone. Press ESC inside
submenu to return to the previous menu.
#. Steps above can also be used to create a new clone
from an existing snapshot.
#. To delete the boot environment, first store its name in a
variable::
my_boot_env=backup
#. Ensure that the boot environment is not
currently used
::
set -x
new_boot_dataset="${boot_dataset%/*}"/"${my_boot_env}"
boot_dataset=$(df -P /boot | tail -n1 | cut -f1 -d' ' || true )
rm_boot_dataset=$(head -n1 new_boot_env_entry_"${new_boot_dataset##*/}" | sed 's|^# *||' || true )
if [ "${boot_dataset}" = "${rm_boot_dataset}" ]; then
echo "ERROR: the dataset you want to delete is the current root! abort!"
exit 1
fi
#. Then check the origin snapshot
::
rm_root_dataset=rpool/"${rm_boot_dataset#bpool/}"
rm_boot_dataset_origin=$(zfs get -H origin "${rm_boot_dataset}"|cut -f3 || true )
rm_root_dataset_origin=$(zfs get -H origin "${rm_root_dataset}"|cut -f3 || true )
#. Finally, destroy clone (boot environment) and its
origin snapshot
::
zfs destroy "${rm_root_dataset}"
zfs destroy "${rm_root_dataset_origin}"
zfs destroy "${rm_boot_dataset}"
zfs destroy "${rm_boot_dataset_origin}"
#. Remove GRUB entry
::
new_entry_escaped=$(tail -n1 new_boot_env_entry_"${new_boot_dataset##*/}" | sed -e 's/[\/&]/\\&/g' || true )
find /boot/efis/ -name "grub.cfg" -print0 | xargs -t -0I '{}' sed -i "/${new_entry_escaped}/d" '{}'
.. ifconfig:: zfs_root_test
::
find /boot/efis/ -name "grub.cfg" -print0 \
| xargs -t -0I '{}' grub-script-check -v '{}'
Disk replacement
----------------
When a disk fails in a mirrored setup, the disk can be replaced with
the following procedure.
#. Shutdown the computer.
#. Replace the failed disk with another disk. The replacement should
be at least the same size or larger than the failed disk.
#. Boot the computer.
When a disk fails, the system will boot, albeit several minutes
slower than normal.
For NixOS, this is due to the initrd and systemd designed to only
import a pool in degraded state after a 90s timeout.
Swap partition on that disk will also fail.
#. Install GNU ``parted`` with your distribution package manager.
#. Identify the bad disk and a working old disk
.. code-block:: sh
ZPOOL_VDEV_NAME_PATH=1 zpool status
pool: bpool
status: DEGRADED
action: Replace the device using 'zpool replace'.
...
config: bpool
mirror-0
2387489723748 UNAVAIL 0 0 0 was /dev/disk/by-id/ata-BAD-part2
/dev/disk/by-id/ata-disk_known_good-part2 ONLINE 0 0 0
#. Store the bad disk and a working old disk in a variable, omit the partition number ``-partN``
.. code-block:: sh
disk_to_replace=/dev/disk/by-id/ata-disk_to_replace
disk_known_good=/dev/disk/by-id/ata-disk_known_good
#. Identify the new disk
.. code-block:: sh
find /dev/disk/by-id/
/dev/disk/by-id/ata-disk_known_good-part1
/dev/disk/by-id/ata-disk_known_good-part2
...
/dev/disk/by-id/ata-disk_known_good-part5
/dev/disk/by-id/ata-disk_new <-- new disk w/o partition table
#. Store the new disk in a variable
.. code-block:: sh
disk_new=/dev/disk/by-id/ata-disk_new
#. Create partition table on ``"${disk_new}"``, refer to respective
installation pages for details.
#. Format and mount EFI system partition, refer to respective
installation pages for details.
#. Replace failed disk in ZFS pool
.. code-block:: sh
zpool offline bpool "${disk_to_replace}"-part2
zpool offline rpool "${disk_to_replace}"-part3
zpool replace bpool "${disk_to_replace}"-part2 "${disk_new}"-part2
zpool replace rpool "${disk_to_replace}"-part3 "${disk_new}"-part3
zpool online bpool "${disk_new}"-part2
zpool online rpool "${disk_new}"-part3
Let the new disk resilver. Check status with ``zpool status``.
#. Reinstall and mirror bootloader, refer to respective installation
pages for details.
If you are using NixOS, see below.
#. For NixOS, replace bad disk with new disk inside per-host
configuration file.
.. code-block:: sh
sed -i "s|"${disk_to_replace##*/}"|"${disk_new##*/}"|" /etc/nixos/hosts/exampleHost/default.nix
#. Commit and apply the changed configuration, reinstall bootloader, then reboot
.. code-block:: sh
git -C /etc/nixos commit -asm "replace "${disk_to_replace##*/}" with "${disk_new##*/}"."
nixos-rebuild boot --install-bootloader
reboot
Bootloader Recovery
-------------------
This section is compatible with Alpine, Arch, Fedora, RHEL and NixOS
root on ZFS guides.
Sometimes the GRUB bootloader might be accidentally overwritten,
rendering the system inaccessible. However, as long as the disk
partitions where boot pool and root pool resides remain untouched, the
system can still be booted easily.
#. Download GRUB rescue image from `this repo
<https://github.com/ne9z/grub-rescue-flake/releases>`__.
You can also build the image yourself if you are familiar with Nix
package manager.
#. Extract either x86_64-efi or i386-pc image from the archive.
#. Write the image to a disk.
#. Boot the computer from the GRUB rescue disk. Select your distro in
GRUB menu.
#. Reinstall bootloader. See respective installation pages for details.

View File

@@ -22,7 +22,7 @@ import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = u'OpenZFS'
copyright = u'2021, OpenZFS'
copyright = u'2023, OpenZFS'
author = u'OpenZFS'
# The short X.Y version
@@ -44,6 +44,7 @@ extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
"sphinx_issues",
"sphinx_rtd_theme",
"notfound.extension"
@@ -69,7 +70,7 @@ master_doc = 'index'
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -80,6 +81,14 @@ exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
pygments_style = None
# https://www.sphinx-doc.org/en/master/usage/extensions/ifconfig.html
# hide commands for tests in user documentation
def setup(app):
app.add_config_value('zfs_root_test', default=True, rebuild='env')
zfs_root_test = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env python3
#
# Copyright 2023 Maurice Zhou <yuchen@apvc.uk>
#
# Released without warranty under the terms of the
# Apache License 2.0.
import pylit
pylit.defaults.code_block_markers['shell'] = '::'
pylit.defaults.text_extensions = [".rst"]
pylit.main()

105
scripts/zfs_root_guide_test.sh Executable file
View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# working directory: root of repo
set -vxuef
distro="${1}"
# clean up previous tests
find /dev/mapper/ -name '*-part4' -print0 \
| xargs -t -0I'{}' sh -vxc "swapoff '{}' && cryptsetup close '{}'"
find . -mindepth 1 -maxdepth 1 -type d -name 'rootfs-*' \
| while read -r dir; do
grep "$(pwd || true)/${dir##./}" /proc/mounts \
| cut -f2 -d' ' | sort | tac \
| xargs -t -I '{}' sh -vxc "if test -d '{}'; then umount -Rl '{}'; fi"
done
find /dev -mindepth 1 -maxdepth 1 -type l -name 'loop*' -exec rm {} +
zpool export -a
losetup --detach-all
# download alpine linux chroot
# it is easier to install rhel with Alpine Linux live media
# which has native zfs support
if ! test -f rootfs.tar.gz; then
curl --fail-early --fail -Lo rootfs.tar.gz https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-minirootfs-3.17.3-x86_64.tar.gz
curl --fail-early --fail -Lo rootfs.tar.gz.sig https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-minirootfs-3.17.3-x86_64.tar.gz.asc
gpg --auto-key-retrieve --keyserver hkps://keyserver.ubuntu.com --verify rootfs.tar.gz.sig
fi
mkdir rootfs-"${distro}"
tar --auto-compress --extract --file rootfs.tar.gz --directory ./rootfs-"${distro}"
# Create empty disk image
qemu-img create -f raw "${distro}"_disk1.img 16G
qemu-img create -f raw "${distro}"_disk2.img 16G
losetup --partscan "$(losetup -f || true)" "${distro}"_disk1.img
losetup --partscan "$(losetup -f || true)" "${distro}"_disk2.img
run_test () {
local path="${1}"
local distro="${2}"
sed 's|.. ifconfig:: zfs_root_test|::|g' \
"${path}" > "${distro}".rst
sed -i '/highlight:: sh/d' "${distro}".rst
# Generate installation script from documentation
python scripts/zfs_root_gen_bash.py "${distro}".rst "${distro}".sh
# Postprocess script for bash
sed -i 's|^ *::||g' "${distro}".sh
# ensure heredocs work
sed -i 's|^ *ZFS_ROOT_GUIDE_TEST|ZFS_ROOT_GUIDE_TEST|g' "${distro}".sh
sed -i 's|^ *ZFS_ROOT_NESTED_CHROOT|ZFS_ROOT_NESTED_CHROOT|g' "${distro}".sh
sed -i 's|^ *EOF|EOF|g' "${distro}".sh
# check whether nixos.sh have syntax errors
sh -n "${distro}".sh
## !shellcheck does not handle nested chroots
# create another file with <<EOF construct removed
sed 's|<<.*||g' "${distro}".sh > "${distro}"-shellcheck.sh
shellcheck \
--check-sourced \
--enable=all \
--shell=dash \
--severity=style \
--format=tty \
"${distro}"-shellcheck.sh
# Make the installation script executable and run
chmod a+x "${distro}".sh
./"${distro}".sh "${distro}"
}
case "${distro}" in
("nixos")
run_test 'docs/Getting Started/NixOS/Root on ZFS.rst' "${distro}"
;;
("rhel")
run_test 'docs/Getting Started/RHEL-based distro/Root on ZFS.rst' "${distro}"
;;
("alpine")
run_test 'docs/Getting Started/Alpine Linux/Root on ZFS.rst' "${distro}"
;;
("archlinux")
run_test 'docs/Getting Started/Arch Linux/Root on ZFS.rst' "${distro}"
;;
("fedora")
run_test 'docs/Getting Started/Fedora/Root on ZFS.rst' "${distro}"
;;
("maintenance")
grep -B1000 'MAINTENANCE SCRIPT ENTRY POINT' 'docs/Getting Started/Alpine Linux/Root on ZFS.rst' > test_maintenance.rst
cat 'docs/Getting Started/zfs_root_maintenance.rst' >> test_maintenance.rst
grep -A1000 'MAINTENANCE SCRIPT ENTRY POINT' 'docs/Getting Started/Alpine Linux/Root on ZFS.rst' >> test_maintenance.rst
run_test './test_maintenance.rst' "${distro}"
;;
(*)
echo "no distro specified"
exit 1
;;
esac