Index menü
zpool status -stvD
pool: backup_pool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM SLOW
backup_pool ONLINE 0 0 0 -
mirror-0 ONLINE 0 0 0 -
ata-WDC_WD60EZAZ-00SF3B0_WD-WX22D51FH5N3-part1 ONLINE 0 0 0 0 (untrimmed)
ata-WDC_WD60EZAZ-00SF3B0_WD-WX42D611KJ3K-part1 ONLINE 0 0 0 0 (untrimmed)
logs
mirror-1 ONLINE 0 0 0 -
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part4 ONLINE 0 0 0 0 (untrimmed)
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part4 ONLINE 0 0 0 0 (untrimmed)
cache
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part3 ONLINE 0 0 0 0 (untrimmed)
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part3 ONLINE 0 0 0 0 (untrimmed)
errors: No known data errors
pool: data_pool
state: ONLINE
scan: scrub repaired 0B in 0 days 00:40:26 with 0 errors on Sun Dec 12 01:04:27 2021
config:
NAME STATE READ WRITE CKSUM SLOW
data_pool ONLINE 0 0 0 -
mirror-0 ONLINE 0 0 0 -
ata-WDC_WD1005FBYZ-01YCBB2_WD-WMC6M0D0NFEE-part1 ONLINE 0 0 0 0 (trim unsupported)
ata-WDC_WD1005FBYZ-01YCBB2_WD-WMC6N0L2TXDR-part1 ONLINE 0 0 0 0 (trim unsupported)
logs
mirror-1 ONLINE 0 0 0 -
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part2 ONLINE 0 0 0 0 (untrimmed)
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part2 ONLINE 0 0 0 0 (untrimmed)
cache
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part1 ONLINE 0 0 0 0 (untrimmed)
nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part1 ONLINE 0 0 0 0 (untrimmed)
errors: No known data errors
arc_summary
arc_summary -s arc
------------------------------------------------------------------------ ZFS Subsystem Report Wed Dec 15 09:48:44 2021 Linux 5.4.0-91-generic 0.8.3-1ubuntu12.13 Machine: server (x86_64) 0.8.3-1ubuntu12.13 ARC status: HEALTHY Memory throttle count: 0 ARC size (current): 99.0 % 9.2 GiB Target size (adaptive): 100.0 % 9.3 GiB Min size (hard limit): 20.0 % 1.9 GiB Max size (high water): 5:1 9.3 GiB Most Frequently Used (MFU) cache size: 3.9 % 346.4 MiB Most Recently Used (MRU) cache size: 96.1 % 8.4 GiB Metadata cache size (hard limit): 75.0 % 7.0 GiB Metadata cache size (current): 9.8 % 702.1 MiB Dnode cache size (hard limit): 10.0 % 715.3 MiB Dnode cache size (current): 17.4 % 124.6 MiB ARC hash breakdown: Elements max: 1.5M Elements current: 92.1 % 1.4M Collisions: 3.1M Chain max: 6 Chains: 182.2k ARC misc: Deleted: 10.6M Mutex misses: 253 Eviction skips: 32.2k
arc_summary -s l2arc
------------------------------------------------------------------------ ZFS Subsystem Report Wed Dec 15 09:48:57 2021 Linux 5.4.0-91-generic 0.8.3-1ubuntu12.13 Machine: server (x86_64) 0.8.3-1ubuntu12.13 L2ARC status: HEALTHY Low memory aborts: 0 Free on write: 2.9k R/W clashes: 0 Bad checksums: 0 I/O errors: 0 L2ARC size (adaptive): 154.0 GiB Compressed: 79.2 % 121.9 GiB Header size: 0.1 % 109.0 MiB L2ARC breakdown: 3.6M Hit ratio: 4.2 % 151.2k Miss ratio: 95.8 % 3.4M Feeds: 46.8k L2ARC writes: Writes sent: 100 % 23.2 KiB L2ARC evicts: Lock retries: 11 Upon reading: 0
arc_summary -s archits
------------------------------------------------------------------------ ZFS Subsystem Report Wed Dec 15 09:49:07 2021 Linux 5.4.0-91-generic 0.8.3-1ubuntu12.13 Machine: server (x86_64) 0.8.3-1ubuntu12.13 ARC total accesses (hits + misses): 47.4M Cache hit ratio: 92.5 % 43.8M Cache miss ratio: 7.5 % 3.6M Actual hit ratio (MFU + MRU hits): 92.3 % 43.7M Data demand efficiency: 77.5 % 3.9M Data prefetch efficiency: 1.9 % 2.4M Cache hits by cache type: Most frequently used (MFU): 87.7 % 38.4M Most recently used (MRU): 12.0 % 5.3M Most frequently used (MFU) ghost: 0.3 % 134.7k Most recently used (MRU) ghost: 0.2 % 78.4k Cache hits by data type: Demand data: 6.9 % 3.0M Demand prefetch data: 0.1 % 45.3k Demand metadata: 92.6 % 40.6M Demand prefetch metadata: 0.4 % 167.1k Cache misses by data type: Demand data: 24.8 % 882.0k Demand prefetch data: 66.5 % 2.4M Demand metadata: 6.5 % 231.8k Demand prefetch metadata: 2.2 % 79.3k
zpool iostat -vly 1
capacity operations bandwidth total_wait disk_wait syncq_wait asyncq_wait scrub trim pool alloc free read write read write read write read write read write read write wait wait ------------------------------------------------------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- backup_pool 893G 4.58T 0 0 0 0 - - - - - - - - - - mirror 893G 4.58T 0 0 0 0 - - - - - - - - - - ata-WDC_WD60EZAZ-00SF3B0_WD-WX22D51FH5N3-part1 - - 0 0 0 0 - - - - - - - - - - ata-WDC_WD60EZAZ-00SF3B0_WD-WX42D611KJ3K-part1 - - 0 0 0 0 - - - - - - - - - - logs - - - - - - - - - - - - - - - - mirror 896K 9.50G 0 0 0 0 - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part4 - - 0 0 0 0 - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part4 - - 0 0 0 0 - - - - - - - - - - cache - - - - - - - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part3 18.7G 1.31G 0 0 0 0 - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part3 18.6G 1.37G 0 0 0 0 - - - - - - - - - - ------------------------------------------------------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- data_pool 329G 599G 0 0 0 0 - - - - - - - - - - mirror 329G 599G 0 0 0 0 - - - - - - - - - - ata-WDC_WD1005FBYZ-01YCBB2_WD-WMC6M0D0NFEE-part1 - - 0 0 0 0 - - - - - - - - - - ata-WDC_WD1005FBYZ-01YCBB2_WD-WMC6N0L2TXDR-part1 - - 0 0 0 0 - - - - - - - - - - logs - - - - - - - - - - - - - - - - mirror 196K 19.5G 0 0 0 0 - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part2 - - 0 0 0 0 - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part2 - - 0 0 0 0 - - - - - - - - - - cache - - - - - - - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937831T-part1 41.8G 58.2G 0 0 0 0 - - - - - - - - - - nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNX0R937938H-part1 42.9G 57.1G 0 0 0 0 - - - - - - - - - - ------------------------------------------------------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----