Skip to content

Instantly share code, notes, and snippets.

@zinntikumugai
Last active January 28, 2026 12:40
Show Gist options
  • Select an option

  • Save zinntikumugai/d6d88359a6005e54a89aa66b7e6385db to your computer and use it in GitHub Desktop.

Select an option

Save zinntikumugai/d6d88359a6005e54a89aa66b7e6385db to your computer and use it in GitHub Desktop.
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'USAGE'
Usage:
sudo ./truenas_bareos_ro_clone.sh \
--truenas-host <host> \
--truenas-user <ssh_user> \
--src-dataset <pool/dataset> \
--dst-dataset <pool/dataset> \
--dst-mountpoint </data1/...> \
--export-path </mnt/data1/...> \
--allowed-host <ip_or_hostname> \
--local-mount <local_mount_dir> \
--idmap-mode <maproot|mapall> \
[--nfsvers <4.1|4.2|3>] \
[--mapall-user <user>] [--mapall-group <group>] \
[--comment <string>] \
[--snap-name <fixed_snap_name>] \
[--cleanup-prefix <prefix>] [--keep <N>] \
[--restart-nfs]
Notes:
- --dst-mountpoint は /mnt を付けず /data1/... を推奨(/mnt/mnt 化回避)
- 固定スナップショットは ${SRC}@${snap-name} を destroy -> recreate
- cleanup は SRC 側の snapshot を prefix で集め、最新 N 個だけ残す
Example:
sudo ./truenas_bareos_ro_clone.sh \
--truenas-host nas.server.z1n.in \
--truenas-user root \
--src-dataset data1/samba1 \
--dst-dataset data1/bareos_ro/samba1 \
--dst-mountpoint /data1/bareos_ro/samba1 \
--export-path /mnt/data1/bareos_ro/samba1 \
--allowed-host 192.168.10.50 \
--idmap-mode maproot \
--mapall-user nas-user \
--mapall-group nas-user \
--local-mount /mnt/bareos-src/samba1 \
--nfsvers 4.1 \
--snap-name bareos-last \
--cleanup-prefix bareos- --keep 3 \
--restart-nfs
USAGE
}
TRUENAS_HOST=""
TRUENAS_USER=""
SRC_DATASET=""
DST_DATASET=""
DST_MOUNTPOINT=""
EXPORT_PATH=""
ALLOWED_HOST=""
LOCAL_MOUNT=""
NFSVERS="4.1"
MAPALL_USER=""
MAPALL_GROUP=""
COMMENT=""
RESTART_NFS="false"
IDMAP_MODE="maproot" # maproot|mapall
SNAP_NAME="bareos-last" # 固定スナップショット名(@bareos-last)
CLEANUP_PREFIX="" # 例: "bareos-"(旧運用で timestamp を残していた場合)
KEEP="0" # prefix 掃除時の保持世代数(0なら掃除しない)
while [[ $# -gt 0 ]]; do
case "$1" in
--truenas-host) TRUENAS_HOST="$2"; shift 2;;
--truenas-user) TRUENAS_USER="$2"; shift 2;;
--src-dataset) SRC_DATASET="$2"; shift 2;;
--dst-dataset) DST_DATASET="$2"; shift 2;;
--dst-mountpoint) DST_MOUNTPOINT="$2"; shift 2;;
--export-path) EXPORT_PATH="$2"; shift 2;;
--allowed-host) ALLOWED_HOST="$2"; shift 2;;
--local-mount) LOCAL_MOUNT="$2"; shift 2;;
--nfsvers) NFSVERS="$2"; shift 2;;
--mapall-user) MAPALL_USER="$2"; shift 2;;
--mapall-group) MAPALL_GROUP="$2"; shift 2;;
--comment) COMMENT="$2"; shift 2;;
--snap-name) SNAP_NAME="$2"; shift 2;;
--cleanup-prefix) CLEANUP_PREFIX="$2"; shift 2;;
--keep) KEEP="$2"; shift 2;;
--restart-nfs) RESTART_NFS="true"; shift 1;;
--idmap-mode) IDMAP_MODE="$2"; shift 2;;
-h|--help) usage; exit 0;;
*) echo "Unknown arg: $1" >&2; usage; exit 2;;
esac
done
required_vars=(TRUENAS_HOST TRUENAS_USER SRC_DATASET DST_DATASET DST_MOUNTPOINT EXPORT_PATH ALLOWED_HOST LOCAL_MOUNT)
for v in "${required_vars[@]}"; do
if [[ -z "${!v}" ]]; then
echo "Missing required: --$(echo "$v" | tr 'A-Z_' 'a-z-')" >&2
usage
exit 2
fi
done
case "${IDMAP_MODE}" in
maproot|mapall) ;;
*) echo "ERROR: --idmap-mode must be maproot or mapall" >&2; exit 2;;
esac
if [[ -z "$COMMENT" ]]; then
COMMENT="bareos-ro-clone:${DST_DATASET}"
fi
SSH_TARGET="${TRUENAS_USER}@${TRUENAS_HOST}"
FIXED_SNAP_SPEC="${SRC_DATASET}@${SNAP_NAME}"
echo "[1/5] local: unmount if mounted: ${LOCAL_MOUNT}"
mkdir -p "${LOCAL_MOUNT}"
if command -v mountpoint >/dev/null 2>&1 && mountpoint -q "${LOCAL_MOUNT}"; then
umount -fl "${LOCAL_MOUNT}" || true
fi
echo "[2/5] remote: recreate fixed snapshot: ${FIXED_SNAP_SPEC}"
echo " (order: destroy DST clone -> destroy old fixed snapshot -> create fixed snapshot)"
ssh -o BatchMode=yes -o StrictHostKeyChecking=accept-new "${SSH_TARGET}" bash -s <<REMOTE
set -euo pipefail
SRC="${SRC_DATASET}"
DST="${DST_DATASET}"
DST_MP="${DST_MOUNTPOINT}"
FIXED_SNAP="${FIXED_SNAP_SPEC}"
SNAP_NAME="${SNAP_NAME}"
# unmount busy 対策用(このパスに紐づく NFS share を disable -> nfs restart -> unmount retry)
EXPORT_PATH="${EXPORT_PATH}"
MIDCLT="/usr/bin/midclt"
disable_nfs_share_by_path() {
local path="\$1"
# share を path で特定(複数あり得るが基本 1 件想定)
local id
id=\$(sudo -n "\${MIDCLT}" call sharing.nfs.query | python3 - "\${path}" <<'PY'
import json,sys
path=sys.argv[1]
shares=json.load(sys.stdin)
for s in shares:
if s.get("path")==path:
print(s.get("id"))
sys.exit(0)
sys.exit(1)
PY
) || return 1
echo "INFO: disabling NFS share id=\${id} path=\${path}" >&2
sudo -n "\${MIDCLT}" call sharing.nfs.update "\${id}" '{"enabled": false}' >/dev/null
echo "INFO: restarting nfs service (to release server-side refs)" >&2
sudo -n "\${MIDCLT}" call service.restart nfs >/dev/null || true
}
unmount_with_busy_fix() {
local dataset="\$1"
local mp="\$2"
# まず通常の unmount
if sudo zfs unmount -f "\${dataset}" >/dev/null 2>&1; then
return 0
fi
# 失敗理由を取得
local err
err=\$(sudo zfs unmount -f "\${dataset}" 2>&1 || true)
echo "err=\${err}"
# 「既に unmount 済み」は成功扱い(冪等化)
if echo "\${err}" | grep -qiE 'not currently mounted|not mounted'; then
echo "INFO: dataset is already unmounted: \${dataset}" >&2
return 0
fi
# busy っぽい場合だけ追加対応
if echo "\${err}" | grep -qiE 'dataset is busy|pool or dataset is busy|busy'; then
echo "WARN: zfs unmount failed (busy). err=\${err}" >&2
# NFS share を disable して nfs を再起動 → unmount を再試行
if disable_nfs_share_by_path "\${EXPORT_PATH}"; then
if sudo zfs unmount -f "\${dataset}" >/dev/null 2>&1; then
echo "INFO: unmount succeeded after disabling NFS share" >&2
return 0
fi
else
echo "INFO: no NFS share found for path=\${EXPORT_PATH} (or failed to disable)" >&2
fi
# まだダメなら調査情報を吐いて失敗
echo "ERROR: unmount still failing after busy fix attempt." >&2
echo "---- findmnt ----" >&2
findmnt -T "\${mp}" -R >&2 || true
echo "---- fuser ----" >&2
fuser -vm "\${mp}" >&2 || true
echo "---- mount grep ----" >&2
mount | grep -F "\${mp}" >&2 || true
exit 1
fi
# busy 以外ならそのまま失敗理由を出して終了
echo "ERROR: zfs unmount failed (non-busy). err=\${err}" >&2
exit 1
}
# 1) 既存DSTクローンを破棄(これが最重要:ぶら下がりを先に外す)
if sudo zfs list -H -o name "\${DST}" >/dev/null 2>&1; then
# busy 対応付き unmount
unmount_with_busy_fix "\${DST}" "\${DST_MP}"
sudo zfs destroy -r "\${DST}"
fi
# 2) 固定スナップショットがあれば破棄して作り直す
if sudo zfs list -H -t snapshot -o name "\${FIXED_SNAP}" >/dev/null 2>&1; then
sudo zfs destroy -r "\${FIXED_SNAP}"
fi
# 3) 固定スナップショット作成
sudo zfs snapshot -r "\${FIXED_SNAP}"
# 4) クローン作成(mountpointは /mnt を付けない形を推奨)
sudo zfs clone -o mountpoint="\${DST_MP}" -o canmount=on "\${FIXED_SNAP}" "\${DST}"
sudo zfs set readonly=on "\${DST}"
sudo zfs mount "\${DST}" >/dev/null 2>&1 || true
REMOTE
echo "[3/5] remote: optional cleanup old snapshots on SRC (prefix=${CLEANUP_PREFIX}, keep=${KEEP})"
if [[ -n "${CLEANUP_PREFIX}" && "${KEEP}" -gt 0 ]]; then
ssh -o BatchMode=yes -o StrictHostKeyChecking=accept-new "${SSH_TARGET}" bash -s <<REMOTE
set -euo pipefail
SRC="${SRC_DATASET}"
PREFIX="${CLEANUP_PREFIX}"
KEEP=${KEEP}
# 例: data1/samba1@bareos-202601.. のようなものを対象にする
# creation順で並べ、最新 KEEP 個を残して、それ以外を削除
mapfile -t snaps < <(sudo zfs list -H -t snapshot -o name -s creation | awk -v s="\${SRC}@" -v p="\${PREFIX}" '
index($0, s)==1 {
# snapshot名部分
split($0,a,"@"); snap=a[2];
if (index(snap,p)==1) print $0;
}'
)
count=\${#snaps[@]}
if (( count <= KEEP )); then
exit 0
fi
to_delete=\$(( count - KEEP ))
for ((i=0; i<to_delete; i++)); do
# もし何かがクローン元になっていて消せない場合は fail するので、ログを出して継続
if ! sudo zfs destroy -r "\${snaps[i]}"; then
echo "WARN: failed to destroy snapshot (maybe cloned): \${snaps[i]}" >&2
fi
done
REMOTE
fi
echo "[4/5] remote: ensure NFS share exists (midclt) path=${EXPORT_PATH} net=${ALLOWED_HOST}/32 ro=true idmap=${IDMAP_MODE}"
ssh -T -o BatchMode=yes -o StrictHostKeyChecking=accept-new "${SSH_TARGET}" bash -s <<REMOTE
set -euo pipefail
export EXPORT_PATH="${EXPORT_PATH}"
export ALLOWED_HOST="${ALLOWED_HOST}"
export COMMENT="${COMMENT}"
export MAPALL_USER="${MAPALL_USER}"
export MAPALL_GROUP="${MAPALL_GROUP}"
export IDMAP_MODE="${IDMAP_MODE}"
export RESTART_NFS="${RESTART_NFS}"
python3 - <<'PY'
import os, json, subprocess, sys
MIDCLT="/usr/bin/midclt"
def run(cmd):
p = subprocess.run(cmd, text=True, capture_output=True)
return p
def midclt_call(*args):
return run(["sudo","-n",MIDCLT,"call",*args])
def call_json(*args):
p = midclt_call(*args)
if p.returncode != 0:
raise RuntimeError(f"midclt {' '.join(args)} failed rc={p.returncode}\nSTDERR:\n{p.stderr}\nSTDOUT:\n{p.stdout}")
out = (p.stdout or "").strip()
if not out:
raise RuntimeError(f"midclt {' '.join(args)} returned empty stdout\nSTDERR:\n{p.stderr}")
return json.loads(out)
export_path = os.environ["EXPORT_PATH"]
allowed_host = os.environ["ALLOWED_HOST"]
comment = os.environ.get("COMMENT","")
idmap_mode = os.environ.get("IDMAP_MODE","maproot").strip()
mapall_user = os.environ.get("MAPALL_USER","").strip()
mapall_group = os.environ.get("MAPALL_GROUP","").strip()
restart_nfs = os.environ.get("RESTART_NFS","false").lower() == "true"
payload = {
"path": export_path,
"comment": comment,
"enabled": True,
"ro": True,
"networks": [f"{allowed_host}/32"],
"hosts": [],
}
# ここが肝:大量ファイルでもO(1)で解決する
if idmap_mode == "maproot":
payload["maproot_user"] = "root"
payload["maproot_group"] = "root"
# mapall は空にする(混在させない)
payload["mapall_user"] = ""
payload["mapall_group"] = ""
elif idmap_mode == "mapall":
# 従来互換。Permission denied が出るなら ACL を別途考える必要がある
if mapall_user:
payload["mapall_user"] = mapall_user
if mapall_group:
payload["mapall_group"] = mapall_group
payload["maproot_user"] = ""
payload["maproot_group"] = ""
else:
raise RuntimeError("IDMAP_MODE must be maproot or mapall")
payload_json = json.dumps(payload, separators=(",",":"))
shares = call_json("sharing.nfs.query")
matches = [s for s in shares if s.get("path")==export_path]
if matches:
sid = matches[0]["id"]
print(f"INFO: share exists for path; updating id={sid}", file=sys.stderr)
p = midclt_call("sharing.nfs.update", str(sid), payload_json)
if p.returncode != 0:
raise RuntimeError(f"update failed rc={p.returncode}\nSTDERR:\n{p.stderr}\nSTDOUT:\n{p.stdout}")
else:
print("INFO: share not found for path; creating...", file=sys.stderr)
p = midclt_call("sharing.nfs.create", payload_json)
if p.returncode != 0:
# overlap などの競合は起こりうるので再queryしてupdateに倒す
print(f"WARN: create failed; retry query->update\n{p.stderr}\n{p.stdout}", file=sys.stderr)
shares2 = call_json("sharing.nfs.query")
matches2 = [s for s in shares2 if s.get("path")==export_path]
if not matches2:
raise RuntimeError("create failed and could not locate share by path")
sid2 = matches2[0]["id"]
p2 = midclt_call("sharing.nfs.update", str(sid2), payload_json)
if p2.returncode != 0:
raise RuntimeError(f"fallback update failed rc={p2.returncode}\nSTDERR:\n{p2.stderr}\nSTDOUT:\n{p2.stdout}")
if restart_nfs:
p = midclt_call("service.restart", "nfs")
if p.returncode != 0:
print(f"WARN: restart nfs failed\n{p.stderr}\n{p.stdout}", file=sys.stderr)
PY
REMOTE
echo "[5/5] local: mount NFS (ro) ${TRUENAS_HOST}:${EXPORT_PATH} -> ${LOCAL_MOUNT}"
mount -t nfs -o "ro,nfsvers=${NFSVERS}" "${TRUENAS_HOST}:${EXPORT_PATH}" "${LOCAL_MOUNT}"
echo "OK:"
echo " fixed snapshot: ${FIXED_SNAP_SPEC}"
echo " clone dataset: ${DST_DATASET} (RO) from ${FIXED_SNAP_SPEC}"
echo " export path: ${EXPORT_PATH} (hosts=${ALLOWED_HOST})"
echo " mounted: ${LOCAL_MOUNT}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment