zabbix 自动发现 监控 硬盘读写 disk io

杜楚
2023-12-01

直接 上配置:

 

1、配置文件

cat userparameter_harddisk.conf 

 

#discovery hard disk
UserParameter=custom.vfs.discovery.diskname,/opt/app/zabbix-agent/scripts/check_harddisk.sh diskname_discovery
#disk status
# See https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
# reads completed successfully
UserParameter=custom.vfs.dev.read.ops[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$4}'
# sectors read
UserParameter=custom.vfs.dev.read.sectors[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$6}'
# time spent reading (ms)
UserParameter=custom.vfs.dev.read.ms[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$7}'
# writes completed
UserParameter=custom.vfs.dev.write.ops[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$8}'
# sectors written
UserParameter=custom.vfs.dev.write.sectors[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$10}'
# time spent writing (ms)
UserParameter=custom.vfs.dev.write.ms[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$11}'
# I/Os currently in progress
UserParameter=custom.vfs.dev.io.active[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$12}'
# time spent doing I/Os (ms)
UserParameter=custom.vfs.dev.io.ms[*],egrep $1 /proc/diskstats | head -1 | awk '{print $$13}'
# iostat %util
UserParameter=iostat.util[*],cat /tmp/.iostat.log |grep $1|tail -10|awk '{sum+=$NF}END{print sum/NR}'
# iostat await
UserParameter=iostat.await[*],cat /tmp/.iostat.log |grep $1|tail -10|awk '{sum+=$10}END{print sum/NR}'

 

cat /opt/app/zabbix-agent/scripts/check_harddisk.sh

#!/bin/bash
# function:monitor redisstatus from zabbix
# License: GPL
# mail:admin@itnihao.com
# version:1.0 date:2013-02-04
#chmod 4755 $(which netstat)

diskname_discovery () {
HardDisk=($(grep '\b[a-z][a-z][a-z]\b' /proc/diskstats|awk '{print $3}'))
[ "${HardDisk[0]}" == "" ] && exit
printf '{\n'
printf '\t"data":[\n'
for((i=0;i<${#HardDisk[@]};++i))
{
num=$(echo $((${#HardDisk[@]}-1)))
if [ "$i" != ${num} ];
then
printf "\t\t{ \n"
printf "\t\t\t\"{#DISKNAME}\":\"${HardDisk[$i]}\"},\n"
else
printf "\t\t{ \n"
printf "\t\t\t\"{#DISKNAME}\":\"${HardDisk[$num]}\"}]}\n"
fi
}
}

case "$1" in
diskname_discovery)
diskname_discovery
;;
*)
echo "Usage: $0 {diskname_discovery}"
;;
esac

 

 

2、crontab设置

 

#Ansible: zabbix disk
*/3 * * * * /usr/bin/iostat -x -m 2 20 >> /tmp/.iostat.log
#Ansible: zabbix disk log delete
59 23 * * * echo > /tmp/.iostat.log

 

3、导入模板

 

cat templates.xml

 

<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>3.4</version>
<date>2018-07-05T05:30:35Z</date>
<groups>
<group>
<name>Template For Base</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template Linux Disk IO</template>
<name>Template Linux Disk IO</name>
<description/>
<groups>
<group>
<name>Template For Base</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<items/>
<discovery_rules>
<discovery_rule>
<name>Linux Disk device discovery</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.discovery.diskname</key>
<delay>3600</delay>
<status>0</status>
<allowed_hosts/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<filter>
<evaltype>0</evaltype>
<formula/>
<conditions>
<condition>
<macro>{#DISKNAME}</macro>
<value/>
<operator>8</operator>
<formulaid>A</formulaid>
</condition>
</conditions>
</filter>
<lifetime>30d</lifetime>
<description>Discovery of disk devices on Linux</description>
<item_prototypes>
<item_prototype>
<name>Disk:{#DISKNAME}:I/O's currently in progress</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.io.active[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>iops</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:time spent doing I/O</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.io.ms[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>ms</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing>
<step>
<type>10</type>
<params/>
</step>
</preprocessing>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:completed reads per second</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.read.ops[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>Reads/sec</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing>
<step>
<type>10</type>
<params/>
</step>
</preprocessing>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:completed writes per second</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.write.ops[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>Writes/sec</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing>
<step>
<type>10</type>
<params/>
</step>
</preprocessing>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:iostat await</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>iostat.await[{#DISKNAME}]</key>
<delay>300</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>0</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:iostat %util</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>iostat.util[{#DISKNAME}]</key>
<delay>300</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>0</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
</item_prototypes>
<trigger_prototypes>
<trigger_prototype>
<expression>{Template Linux Disk IO:iostat.util[{#DISKNAME}].min(#3)}&gt;85</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>{HOST.NAME} {#DISKNAME} iostat %util &gt; 85</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger_prototype>
<trigger_prototype>
<expression>{Template Linux Disk IO:custom.vfs.dev.read.ops[{#DISKNAME}].min(#3)}&gt;10000</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>{HOST.NAME} {#DISKNAME} read ops is too high (&gt;10000)</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger_prototype>
<trigger_prototype>
<expression>{Template Linux Disk IO:custom.vfs.dev.write.ops[{#DISKNAME}].min(#3)}&gt;10000</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>{HOST.NAME} {#DISKNAME} write ops is too high (&gt;10000)</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger_prototype>
</trigger_prototypes>
<graph_prototypes>
<graph_prototype>
<name>Disk:{#DISKNAME}: I/O's currently in progress</name>
<width>900</width>
<height>200</height>
<yaxismin>0.0000</yaxismin>
<yaxismax>100.0000</yaxismax>
<show_work_period>1</show_work_period>
<show_triggers>1</show_triggers>
<type>0</type>
<show_legend>1</show_legend>
<show_3d>0</show_3d>
<percent_left>0.0000</percent_left>
<percent_right>0.0000</percent_right>
<ymin_type_1>0</ymin_type_1>
<ymax_type_1>0</ymax_type_1>
<ymin_item_1>0</ymin_item_1>
<ymax_item_1>0</ymax_item_1>
<graph_items>
<graph_item>
<sortorder>0</sortorder>
<drawtype>0</drawtype>
<color>00C800</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>custom.vfs.dev.io.active[{#DISKNAME}]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
<graph_prototype>
<name>Disk:{#DISKNAME}: IOPS reads and writes status</name>
<width>900</width>
<height>200</height>
<yaxismin>0.0000</yaxismin>
<yaxismax>100.0000</yaxismax>
<show_work_period>1</show_work_period>
<show_triggers>1</show_triggers>
<type>0</type>
<show_legend>1</show_legend>
<show_3d>0</show_3d>
<percent_left>0.0000</percent_left>
<percent_right>0.0000</percent_right>
<ymin_type_1>0</ymin_type_1>
<ymax_type_1>0</ymax_type_1>
<ymin_item_1>0</ymin_item_1>
<ymax_item_1>0</ymax_item_1>
<graph_items>
<graph_item>
<sortorder>0</sortorder>
<drawtype>5</drawtype>
<color>C80000</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>custom.vfs.dev.read.ops[{#DISKNAME}]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>0</drawtype>
<color>0000C8</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>custom.vfs.dev.write.ops[{#DISKNAME}]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
<graph_prototype>
<name>Disk:{#DISKNAME}: iostat stats</name>
<width>900</width>
<height>200</height>
<yaxismin>0.0000</yaxismin>
<yaxismax>100.0000</yaxismax>
<show_work_period>1</show_work_period>
<show_triggers>1</show_triggers>
<type>0</type>
<show_legend>1</show_legend>
<show_3d>0</show_3d>
<percent_left>0.0000</percent_left>
<percent_right>0.0000</percent_right>
<ymin_type_1>0</ymin_type_1>
<ymax_type_1>0</ymax_type_1>
<ymin_item_1>0</ymin_item_1>
<ymax_item_1>0</ymax_item_1>
<graph_items>
<graph_item>
<sortorder>0</sortorder>
<drawtype>0</drawtype>
<color>C80000</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>iostat.util[{#DISKNAME}]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>0</drawtype>
<color>00C800</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>iostat.await[{#DISKNAME}]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
</graph_prototypes>
<host_prototypes/>
<jmx_endpoint/>
</discovery_rule>
</discovery_rules>
<httptests/>
<macros/>
<templates/>
<screens/>
</template>
</templates>
</zabbix_export>

 

4、重启zabbix-agent

 

转载于:https://www.cnblogs.com/Qing-840/p/9267821.html

 类似资料: