diff --git a/Proxmox/reduce-ssd-writes.sh b/Proxmox/reduce-ssd-writes.sh index 98186c2..dac447b 100755 --- a/Proxmox/reduce-ssd-writes.sh +++ b/Proxmox/reduce-ssd-writes.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env bash # This has not been tested, use at your own risk. @@ -12,14 +12,14 @@ echo "Enabling a variety of settings to reduce writes on SSDs. This may cause da sleep 10 -echo “options zfs zfs_txg_timeout=30” > /etc/modprobe.d/zfs.conf # Set txg_timeout to 30 seconds. This introduces a higher risk of data loss. +echo 'options zfs zfs_txg_timeout=30' > /etc/modprobe.d/zfs.conf # Set txg_timeout to 30 seconds. This introduces a higher risk of data loss. zfs set atime=off rpool # Turn off atime. zfs set logbias=throughput rpool # Change logbias to throughput. zfs set compression=lz4 rpool # Set compression to lz4 instead of the older LZJB. zfs set recordsize=16K rpool # This works better with lots of little writes, and seems to be recommended for VMs on SSDs unless your workload is many large writes. # ZFS seems to recommend smaller recordsizes for VMs if you're running on SSDs in general depending on the workload: -# https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/Workload%20Tuning.html#virtual-machines +# https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/Workload%20Tuning.html#virtual-machines # https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/Workload%20Tuning.html#zvol-volblocksize # 16K: https://serverfault.com/a/1120640 @@ -28,4 +28,3 @@ zpool get all | grep ashift echo "And trim should be enabled:" systemctl status fstrim.timer -