diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..c31a6a82c2 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,139 @@ +--- +version: 2.1 + +orbs: + prometheus: prometheus/prometheus@0.11.0 + +executors: + # Whenever the Go version is updated here, .promu.yml and .promu-cgo.yml + # should also be updated. + golang: + docker: + - image: circleci/golang:1.17 + +jobs: + test: + executor: golang + + steps: + - prometheus/setup_environment + - run: go mod download + - run: make + - prometheus/store_artifact: + file: node_exporter + + codespell: + docker: + - image: circleci/python + + steps: + - checkout + - run: sudo pip install codespell + - run: codespell --skip=".git,./vendor,ttar,go.mod,go.sum,*pem,./collector/fixtures" -L uint,packages\',uptodate + + test_mixins: + executor: golang + steps: + - checkout + - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest + - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest + - run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest + - run: make promtool + - run: make -C docs/node-mixin clean + - run: make -C docs/node-mixin jb_install + - run: make -C docs/node-mixin + - run: git diff --exit-code + + build: + machine: + image: ubuntu-2004:202101-01 + + parallelism: 3 + + steps: + - prometheus/setup_environment + - run: docker run --privileged linuxkit/binfmt:v0.8 + - run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX + - run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX + - persist_to_workspace: + root: . + paths: + - .build + - store_artifacts: + path: .build + destination: /build + + test_docker: + machine: + image: ubuntu-2004:202101-01 + + environment: + DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.16-base + REPO_PATH: github.com/prometheus/node_exporter + + steps: + - prometheus/setup_environment + - attach_workspace: + at: . + - run: + command: | + if [ -n "$CIRCLE_TAG" ]; then + make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG + else + make docker + fi + - run: docker images + - run: docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T + - run: + command: | + if [ -n "$CIRCLE_TAG" ]; then + make test-docker DOCKER_IMAGE_TAG=$CIRCLE_TAG + else + make test-docker + fi + +workflows: + version: 2 + node_exporter: + jobs: + - test: + filters: + tags: + only: /.*/ + - build: + filters: + tags: + only: /.*/ + - codespell: + filters: + tags: + only: /.*/ + - test_docker: + requires: + - test + - build + filters: + tags: + only: /.*/ + - test_mixins: + filters: + tags: + only: /.*/ + - prometheus/publish_master: + context: org-context + requires: + - test + - build + filters: + branches: + only: master + - prometheus/publish_release: + context: org-context + requires: + - test + - build + filters: + tags: + only: /^v.*/ + branches: + ignore: /.*/ diff --git a/.dockerignore b/.dockerignore index 1694cb06ee..cfeaf5f65c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,3 +2,7 @@ .tarballs/ !.build/linux-amd64 +!.build/linux-armv7 +!.build/linux-arm64 +!.build/linux-ppc64le +!.build/linux-s390x diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000..f96c76a659 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,31 @@ +name: golangci-lint +on: + push: + paths: + - "go.sum" + - "go.mod" + - "**.go" + - "scripts/errcheck_excludes.txt" + - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" + pull_request: + paths: + - "go.sum" + - "go.mod" + - "**.go" + - "scripts/errcheck_excludes.txt" + - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.42.0 diff --git a/.gitignore b/.gitignore index 6f86022719..8fb3d18214 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,5 @@ dependencies-stamp # Test files extracted from ttar collector/fixtures/sys/ + +/vendor diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000..0412d93b82 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,22 @@ +linters: + enable: + - revive + disable: + # Disable soon to deprecated[1] linters that lead to false + # positives when build tags disable certain files[2] + # 1: https://github.com/golangci/golangci-lint/issues/1841 + # 2: https://github.com/prometheus/node_exporter/issues/1545 + - deadcode + - unused + - structcheck + - varcheck + +issues: + exclude-rules: + - path: _test.go + linters: + - errcheck + +linters-settings: + errcheck: + exclude: scripts/errcheck_excludes.txt diff --git a/.promu-cgo.yml b/.promu-cgo.yml new file mode 100644 index 0000000000..7179a566d7 --- /dev/null +++ b/.promu-cgo.yml @@ -0,0 +1,27 @@ +go: + # Whenever the Go version is updated here, .circle/config.yml and + # .promu.yml should also be updated. + version: 1.17 + cgo: true +repository: + path: github.com/prometheus/node_exporter +build: + binaries: + - name: node_exporter + flags: -a -tags 'netgo osusergo static_build' + ldflags: | + -X github.com/prometheus/common/version.Version={{.Version}} + -X github.com/prometheus/common/version.Revision={{.Revision}} + -X github.com/prometheus/common/version.Branch={{.Branch}} + -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} + -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} +tarball: + files: + - LICENSE + - NOTICE +crossbuild: + platforms: + - darwin/amd64 + - darwin/arm64 + - netbsd/amd64 + - netbsd/386 diff --git a/.promu.yml b/.promu.yml index f3e72453a4..373000dd74 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,31 +1,24 @@ go: - cgo: true + # Whenever the Go version is updated here, .circle/config.yml and + # .promu-cgo.yml should also be updated. + version: 1.17 repository: path: github.com/prometheus/node_exporter build: binaries: - name: node_exporter - flags: -a -tags 'netgo static_build' + flags: -a -tags 'netgo osusergo static_build' ldflags: | - -X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}} - -X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}} - -X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}} - -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}} - -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} + -X github.com/prometheus/common/version.Version={{.Version}} + -X github.com/prometheus/common/version.Revision={{.Revision}} + -X github.com/prometheus/common/version.Branch={{.Branch}} + -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} + -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE crossbuild: platforms: - - linux/amd64 - - linux/386 - - darwin/amd64 - - darwin/386 - - netbsd/amd64 - - netbsd/386 - - linux/arm - - linux/arm64 - # Temporarily deactivated as this does not currently build with promu. - #- linux/mips64 - #- linux/mips64le + - linux + - openbsd/amd64 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 70ab1420f5..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go -go: -- 1.8.x -- 1.9.x -- master - -go_import_path: github.com/prometheus/node_exporter - -script: -- make diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000000..281c946463 --- /dev/null +++ b/.yamllint @@ -0,0 +1,27 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + commas: disable + comments: disable + comments-indentation: disable + document-start: disable + indentation: + spaces: consistent + key-duplicates: + ignore: | + config/testdata/section_key_dup.bad.yml + line-length: disable + truthy: + ignore: | + .github/workflows/codeql-analysis.yml + .github/workflows/funcbench.yml + .github/workflows/fuzzing.yml + .github/workflows/prombench.yml + .github/workflows/golangci-lint.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index db655a72d3..675057ec08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,359 @@ -## v0.15.0 / 2017-10-06 +## master / unreleased + +* [CHANGE] +* [FEATURE] +* [ENHANCEMENT] +* [BUGFIX] + +## 1.3.0 / 2021-10-20 + +NOTE: In order to support globs in the textfile collector path, filenames exposed by + `node_textfile_mtime_seconds` now contain the full path name. + +* [CHANGE] Add path label to rapl collector #2146 +* [CHANGE] Exclude filesystems under /run/credentials #2157 +* [CHANGE] Add TCPTimeouts to netstat default filter #2189 +* [FEATURE] Add lnstat collector for metrics from /proc/net/stat/ #1771 +* [FEATURE] Add darwin powersupply collector #1777 +* [FEATURE] Add support for monitoring GPUs on Linux #1998 +* [FEATURE] Add Darwin thermal collector #2032 +* [FEATURE] Add os release collector #2094 +* [FEATURE] Add netdev.address-info collector #2105 +* [FEATURE] Add clocksource metrics to time collector #2197 +* [ENHANCEMENT] Support glob textfile collector directories #1985 +* [ENHANCEMENT] ethtool: Expose node_ethtool_info metric #2080 +* [ENHANCEMENT] Use include/exclude flags for ethtool filtering #2165 +* [ENHANCEMENT] Add flag to disable guest CPU metrics #2123 +* [ENHANCEMENT] Add DMI collector #2131 +* [ENHANCEMENT] Add threads metrics to processes collector #2164 +* [ENHANCMMENT] Reduce timer GC delays in the Linux filesystem collector #2169 +* [ENHANCMMENT] Add TCPTimeouts to netstat default filter #2189 +* [ENHANCMMENT] Use SysctlTimeval for boottime collector on BSD #2208 +* [BUGFIX] ethtool: Sanitize metric names #2093 +* [BUGFIX] Fix ethtool collector for multiple interfaces #2126 +* [BUGFIX] Fix possible panic on macOS #2133 +* [BUGFIX] Collect flag_info and bug_info only for one core #2156 +* [BUGFIX] Prevent duplicate ethtool metric names #2187 + +## 1.2.2 / 2021-08-06 + +* [BUGFIX] Fix processes collector long int parsing #2112 + +## 1.2.1 / 2021-07-23 + +* [BUGFIX] Fix zoneinfo parsing prometheus/procfs#386 +* [BUGFIX] Fix nvme collector log noise #2091 +* [BUGFIX] Fix rapl collector log noise #2092 + +## 1.2.0 / 2021-07-15 + +NOTE: Ignoring invalid network speed will be the default in 2.x +NOTE: Filesystem collector flags have been renamed. `--collector.filesystem.ignored-mount-points` is now `--collector.filesystem.mount-points-exclude` and `--collector.filesystem.ignored-fs-types` is now `--collector.filesystem.fs-types-exclude`. The old flags will be removed in 2.x. + +* [CHANGE] Rename filesystem collector flags to match other collectors #2012 +* [CHANGE] Make node_exporter print usage to STDOUT #2039 +* [FEATURE] Add conntrack statistics metrics #1155 +* [FEATURE] Add ethtool stats collector #1832 +* [FEATURE] Add flag to ignore network speed if it is unknown #1989 +* [FEATURE] Add tapestats collector for Linux #2044 +* [FEATURE] Add nvme collector #2062 +* [ENHANCEMENT] Add ErrorLog plumbing to promhttp #1887 +* [ENHANCEMENT] Add more Infiniband counters #2019 +* [ENHANCEMENT] netclass: retrieve interface names and filter before parsing #2033 +* [ENHANCEMENT] Add time zone offset metric #2060 +* [BUGFIX] Handle errors from disabled PSI subsystem #1983 +* [BUGFIX] Fix panic when using backwards compatible flags #2000 +* [BUGFIX] Fix wrong value for OpenBSD memory buffer cache #2015 +* [BUGFIX] Only initiate collectors once #2048 +* [BUGFIX] Handle small backwards jumps in CPU idle #2067 + +## 1.1.2 / 2021-03-05 + +* [BUGFIX] Handle errors from disabled PSI subsystem #1983 +* [BUGFIX] Sanitize strings from /sys/class/power_supply #1984 +* [BUGFIX] Silence missing netclass errors #1986 + +## 1.1.1 / 2021-02-12 + +* [BUGFIX] Fix ineffassign issue #1957 +* [BUGFIX] Fix some noisy log lines #1962 + +## 1.1.0 / 2021-02-05 + +NOTE: We have improved some of the flag naming conventions (PR #1743). The old names are + deprecated and will be removed in 2.0. They will continue to work for backwards + compatibility. + +* [CHANGE] Improve filter flag names #1743 +* [CHANGE] Add btrfs and powersupplyclass to list of exporters enabled by default #1897 +* [FEATURE] Add fibre channel collector #1786 +* [FEATURE] Expose cpu bugs and flags as info metrics. #1788 +* [FEATURE] Add network_route collector #1811 +* [FEATURE] Add zoneinfo collector #1922 +* [ENHANCEMENT] Add more InfiniBand counters #1694 +* [ENHANCEMENT] Add flag to aggr ipvs metrics to avoid high cardinality metrics #1709 +* [ENHANCEMENT] Adding backlog/current queue length to qdisc collector #1732 +* [ENHANCEMENT] Include TCP OutRsts in netstat metrics #1733 +* [ENHANCEMENT] Add pool size to entropy collector #1753 +* [ENHANCEMENT] Remove CGO dependencies for OpenBSD amd64 #1774 +* [ENHANCEMENT] bcache: add writeback_rate_debug stats #1658 +* [ENHANCEMENT] Add check state for mdadm arrays via node_md_state metric #1810 +* [ENHANCEMENT] Expose XFS inode statistics #1870 +* [ENHANCEMENT] Expose zfs zpool state #1878 +* [ENHANCEMENT] Added an ability to pass collector.supervisord.url via SUPERVISORD_URL environment variable #1947 +* [BUGFIX] filesystem_freebsd: Fix label values #1728 +* [BUGFIX] Fix various procfs parsing errors #1735 +* [BUGFIX] Handle no data from powersupplyclass #1747 +* [BUGFIX] udp_queues_linux.go: change upd to udp in two error strings #1769 +* [BUGFIX] Fix node_scrape_collector_success behaviour #1816 +* [BUGFIX] Fix NodeRAIDDegraded to not use a string rule expressions #1827 +* [BUGFIX] Fix node_md_disks state label from fail to failed #1862 +* [BUGFIX] Handle EPERM for syscall in timex collector #1938 +* [BUGFIX] bcache: fix typo in a metric name #1943 +* [BUGFIX] Fix XFS read/write stats (https://github.com/prometheus/procfs/pull/343) + +## 1.0.1 / 2020-06-15 + +* [BUGFIX] filesystem_freebsd: Fix label values #1728 +* [BUGFIX] Update prometheus/procfs to fix log noise #1735 +* [BUGFIX] Fix build tags for collectors #1745 +* [BUGFIX] Handle no data from powersupplyclass #1747, #1749 + +## 1.0.0 / 2020-05-25 + +### **Breaking changes** + +* The netdev collector CLI argument `--collector.netdev.ignored-devices` was renamed to `--collector.netdev.device-blacklist` in order to conform with the systemd collector. #1279 +* The label named `state` on `node_systemd_service_restart_total` metrics was changed to `name` to better describe the metric. #1393 +* Refactoring of the mdadm collector changes several metrics + - `node_md_disks_active` is removed + - `node_md_disks` now has a `state` label for "failed", "spare", "active" disks. + - `node_md_is_active` is replaced by `node_md_state` with a state set of "active", "inactive", "recovering", "resync". +* Additional label `mountaddr` added to NFS device metrics to distinguish mounts from the same URL, but different IP addresses. #1417 +* Metrics node_cpu_scaling_frequency_min_hrts and node_cpu_scaling_frequency_max_hrts of the cpufreq collector were renamed to node_cpu_scaling_frequency_min_hertz and node_cpu_scaling_frequency_max_hertz. #1510 +* Collectors that are enabled, but are unable to find data to collect, now return 0 for `node_scrape_collector_success`. + +### Changes + +* [CHANGE] Add `--collector.netdev.device-whitelist`. #1279 +* [CHANGE] Ignore iso9600 filesystem on Linux #1355 +* [CHANGE] Refactor mdadm collector #1403 +* [CHANGE] Add `mountaddr` label to NFS metrics. #1417 +* [CHANGE] Don't count empty collectors as success. #1613 +* [FEATURE] New flag to disable default collectors #1276 +* [FEATURE] Add experimental TLS support #1277, #1687, #1695 +* [FEATURE] Add collector for Power Supply Class #1280 +* [FEATURE] Add new schedstat collector #1389 +* [FEATURE] Add FreeBSD zfs support #1394 +* [FEATURE] Add uname support for Darwin and OpenBSD #1433 +* [FEATURE] Add new metric node_cpu_info #1489 +* [FEATURE] Add new thermal_zone collector #1425 +* [FEATURE] Add new cooling_device metrics to thermal zone collector #1445 +* [FEATURE] Add swap usage on darwin #1508 +* [FEATURE] Add Btrfs collector #1512 +* [FEATURE] Add RAPL collector #1523 +* [FEATURE] Add new softnet collector #1576 +* [FEATURE] Add new udp_queues collector #1503 +* [FEATURE] Add basic authentication #1673 +* [ENHANCEMENT] Log pid when there is a problem reading the process stats #1341 +* [ENHANCEMENT] Collect InfiniBand port state and physical state #1357 +* [ENHANCEMENT] Include additional XFS runtime statistics. #1423 +* [ENHANCEMENT] Report non-fatal collection errors in the exporter metric. #1439 +* [ENHANCEMENT] Expose IPVS firewall mark as a label #1455 +* [ENHANCEMENT] Add check for systemd version before attempting to query certain metrics. #1413 +* [ENHANCEMENT] Add a flag to adjust mount timeout #1486 +* [ENHANCEMENT] Add new counters for flush requests in Linux 5.5 #1548 +* [ENHANCEMENT] Add metrics and tests for UDP receive and send buffer errors #1534 +* [ENHANCEMENT] The sockstat collector now exposes IPv6 statistics in addition to the existing IPv4 support. #1552 +* [ENHANCEMENT] Add infiniband info metric #1563 +* [ENHANCEMENT] Add unix socket support for supervisord collector #1592 +* [ENHANCEMENT] Implement loadavg on all BSDs without cgo #1584 +* [ENHANCEMENT] Add model_name and stepping to node_cpu_info metric #1617 +* [ENHANCEMENT] Add `--collector.perf.cpus` to allow setting the CPU list for perf stats. #1561 +* [ENHANCEMENT] Add metrics for IO errors and retires on Darwin. #1636 +* [ENHANCEMENT] Add perf tracepoint collection flag #1664 +* [ENHANCEMENT] ZFS: read contents of objset file #1632 +* [ENHANCEMENT] Linux CPU: Cache CPU metrics to make them monotonically increasing #1711 +* [BUGFIX] Read /proc/net files with a single read syscall #1380 +* [BUGFIX] Renamed label `state` to `name` on `node_systemd_service_restart_total`. #1393 +* [BUGFIX] Fix netdev nil reference on Darwin #1414 +* [BUGFIX] Strip path.rootfs from mountpoint labels #1421 +* [BUGFIX] Fix seconds reported by schedstat #1426 +* [BUGFIX] Fix empty string in path.rootfs #1464 +* [BUGFIX] Fix typo in cpufreq metric names #1510 +* [BUGFIX] Read /proc/stat in one syscall #1538 +* [BUGFIX] Fix OpenBSD cache memory information #1542 +* [BUGFIX] Refactor textfile collector to avoid looping defer #1549 +* [BUGFIX] Fix network speed math #1580 +* [BUGFIX] collector/systemd: use regexp to extract systemd version #1647 +* [BUGFIX] Fix initialization in perf collector when using multiple CPUs #1665 +* [BUGFIX] Fix accidentally empty lines in meminfo_linux #1671 + +## 0.18.1 / 2019-06-04 + +### Changes +* [BUGFIX] Fix incorrect sysctl call in BSD meminfo collector, resulting in broken swap metrics on FreeBSD #1345 +* [BUGFIX] Fix rollover bug in mountstats collector #1364 + +## 0.18.0 / 2019-05-09 + +### **Breaking changes** + +* Renamed `interface` label to `device` in netclass collector for consistency with + other network metrics #1224 +* The cpufreq metrics now separate the `cpufreq` and `scaling` data based on what the driver provides. #1248 +* The labels for the network_up metric have changed, see issue #1236 +* Bonding collector now uses `mii_status` instead of `operstatus` #1124 +* Several systemd metrics have been turned off by default to improve performance #1254 + These include unit_tasks_current, unit_tasks_max, service_restart_total, and unit_start_time_seconds +* The systemd collector blacklist now includes automount, device, mount, and slice units by default. #1255 + +### Changes + +* [CHANGE] Bonding state uses mii_status #1124 +* [CHANGE] Add a limit to the number of in-flight requests #1166 +* [CHANGE] Renamed `interface` label to `device` in netclass collector #1224 +* [CHANGE] Add separate cpufreq and scaling metrics #1248 +* [CHANGE] Several systemd metrics have been turned off by default to improve performance #1254 +* [CHANGE] Expand systemd collector blacklist #1255 +* [CHANGE] Split cpufreq metrics into a separate collector #1253 +* [FEATURE] Add a flag to disable exporter metrics #1148 +* [FEATURE] Add kstat-based Solaris metrics for boottime, cpu and zfs collectors #1197 +* [FEATURE] Add uname collector for FreeBSD #1239 +* [FEATURE] Add diskstats collector for OpenBSD #1250 +* [FEATURE] Add pressure collector exposing pressure stall information for Linux #1174 +* [FEATURE] Add perf exporter for Linux #1274 +* [ENHANCEMENT] Add Infiniband counters #1120 +* [ENHANCEMENT] Add TCPSynRetrans to netstat default filter #1143 +* [ENHANCEMENT] Move network_up labels into new metric network_info #1236 +* [ENHANCEMENT] Use 64-bit counters for Darwin netstat +* [BUGFIX] Add fallback for missing /proc/1/mounts #1172 +* [BUGFIX] Fix node_textfile_mtime_seconds to work properly on symlinks #1326 + +## 0.17.0 / 2018-11-30 + +Build note: Linux builds can now be built without CGO. + +### **Breaking changes** + +supvervisord collector reports `start_time_seconds` rather than `uptime` #952 + +The wifi collector is disabled by default due to suspected caching issues and goroutine leaks. +* https://github.com/prometheus/node_exporter/issues/870 +* https://github.com/prometheus/node_exporter/issues/1008 + +Darwin meminfo metrics have been renamed to match Prometheus conventions. #1060 + +### Changes + +* [CHANGE] Use /proc/mounts instead of statfs(2) for ro state #1002 +* [CHANGE] Exclude only subdirectories of /var/lib/docker #1003 +* [CHANGE] Filter out non-installed units when collecting all systemd units #1011 +* [CHANGE] `service_restart_total` and `socket_refused_connections_total` will not be reported if you're running an older version of systemd +* [CHANGE] collector/timex: remove cgo dependency #1079 +* [CHANGE] filesystem: Ignore Docker netns mounts #1047 +* [CHANGE] Ignore additional virtual filesystems #1104 +* [FEATURE] Add netclass collector #851 +* [FEATURE] Add processes collector #950 +* [FEATURE] Collect start time for systemd units #952 +* [FEATURE] Add socket unit stats to systemd collector #968 +* [FEATURE] Collect NRestarts property for systemd service units #992 +* [FEATURE] Collect NRefused property for systemd socket units (available as of systemd v239) #995 +* [FEATURE] Allow removal of rootfs prefix for run in docker #1058 +* [ENHANCEMENT] Support for octal characters in mountpoints #954 +* [ENHANCEMENT] Update wifi stats to support multiple stations #980 +* [ENHANCEMENT] Add transmit/receive bytes total for wifi stations #1150 +* [ENHANCEMENT] Handle stuck NFS mounts #997 +* [ENHANCEMENT] infiniband: Handle iWARP RDMA modules N/A #974 +* [ENHANCEMENT] Update diskstats for linux kernel 4.19 #1109 +* [ENHANCEMENT] Collect TasksCurrent, TasksMax per systemd unit #1098 + +* [BUGFIX] Fix FreeBSD CPU temp #965 +* [BUGFIX] Fix goroutine leak in supervisord collector #978 +* [BUGFIX] Fix mdadm collector issues #985 +* [BUGFIX] Fix ntp collector thread safety #1014 +* [BUGFIX] Systemd units will not be ignored if you're running older versions of systemd #1039 +* [BUGFIX] Handle vanishing PIDs #1043 +* [BUGFIX] Correctly cast Darwin memory info #1060 +* [BUGFIX] Filter systemd units in Go for compatibility with older versions #1083 +* [BUGFIX] Update cpu collector for OpenBSD 6.4 #1094 +* [BUGFIX] Fix typo on HELP of `read_time_seconds_total` #1057 +* [BUGFIX] collector/diskstats: don't fail if there are extra stats #1125 +* [BUGFIX] collector/hwmon\_linux: handle temperature sensor file #1123 +* [BUGFIX] collector/filesystem: add bounds check #1133 +* [BUGFIX] Fix dragonfly's CPU counting frequency #1140 +* [BUGFIX] Add fallback for missing /proc/1/mounts #1172 + +## 0.16.0 / 2018-05-15 + +**Breaking changes** + +This release contains major breaking changes to metric names. Many metrics have new names, labels, and label values in order to conform to current naming conventions. +* Linux node_cpu metrics now break out `guest` values into separate metrics. See Issue #737 +* Many counter metrics have been renamed to include `_total`. +* Many metrics have been renamed/modified to include base units, for example `node_cpu` is now `node_cpu_seconds_total`. + +In order to help with the transition we have an [upgrade guide](docs/V0_16_UPGRADE_GUIDE.md). + +Other breaking changes: +* The megacli collector has been removed, is now replaced by the storcli.py textfile helper. +* The gmond collector has been removed. +* The textfile collector will now treat timestamps as errors. + +* [CHANGE] Split out guest cpu metrics on Linux. #744 +* [CHANGE] Exclude Linux proc from filesystem type regexp #774 +* [CHANGE] Ignore more virtual filesystems #775 +* [CHANGE] Remove obsolete megacli collector. #798 +* [CHANGE] Ignore /var/lib/docker by default. #814 +* [CHANGE] Cleanup NFS metrics #834 +* [CHANGE] Only report core throttles per core, not per cpu #836 +* [CHANGE] Treat custom textfile metric timestamps as errors #769 +* [CHANGE] Use lowercase cpu label name in interrupts #849 +* [CHANGE] Enable bonding collector by default. #872 +* [CHANGE] Greatly reduce the metrics vmstat returns by default. #874 +* [CHANGE] Greatly trim what netstat collector exposes by default #876 +* [CHANGE] Drop `exec_` prefix and move `node_boot_time_seconds` from `exec` to new `boottime` collector and enable for Darwin/Dragonfly/FreeBSD/NetBSD/OpenBSD. #839, #901 +* [CHANGE] Remove depreated gmond collector #852 +* [CHANGE] align Darwin disk stat names with Linux #930 +* [FEATURE] Add `collect[]` parameter #699 +* [FEATURE] Add text collector conversion for ipmitool output. #746 +* [FEATURE] Add openbsd meminfo #724 +* [FEATURE] Add systemd summary metrics #765 +* [FEATURE] Add OpenBSD CPU collector #805 +* [FEATURE] Add NFS Server metrics collector. #803 +* [FEATURE] add sample directory size exporter #789 +* [ENHANCEMENT] added Wear_Leveling_Count attribute to smartmon.sh script #707 +* [ENHANCEMENT] Simplify Utsname string conversion #716 +* [ENHANCEMENT] apt.sh: handle multiple origins in apt-get output #757 +* [ENHANCEMENT] Export systemd timers last trigger seconds. #807 +* [ENHANCEMENT] updates for zfsonlinux 0.7.5 #779 +* [BUGFIX] Fix smartmon.sh textfile script #700 +* [BUGFIX] netdev: Change valueType to CounterValue #749 +* [BUGFIX] textfile: fix duplicate metrics error #738 +* [BUGFIX] Fix panic by updating github.com/ema/qdisc dependency #778 +* [BUGFIX] Use uint64 in the ZFS collector #714 +* [BUGFIX] multiply page size after float64 coercion to avoid signed integer overflow #780 +* [BUGFIX] smartmon: Escape double quotes in device model family #772 +* [BUGFIX] Fix log level regression in #533 #815 +* [BUGFIX] Correct the ClocksPerSec scaling factor on Darwin #846 +* [BUGFIX] Count core throttles per core and per package #871 +* [BUGFIX] Fix netdev collector for linux #890 #910 +* [BUGFIX] Fix memory corruption when number of filesystems > 16 on FreeBSD #900 +* [BUGFIX] Fix parsing of interface aliases in netdev linux #904 + +## 0.15.2 / 2017-12-06 + +* [BUGFIX] cpu: Support processor-less (memory-only) NUMA nodes #734 + +## 0.15.1 / 2017-11-07 + +* [BUGFIX] xfs: expose correct fields, fix metric names #708 +* [BUGFIX] Correct buffer_bytes > INT_MAX on BSD/amd64. #712 +* [BUGFIX] netstat: return nothing when /proc/net/snmp6 not found #718 +* [BUGFIX] Fix off by one in Linux interrupts collector #721 +* [BUGFIX] Add and use sysReadFile in hwmon collector #728 + +## 0.15.0 / 2017-10-06 **Breaking changes** @@ -40,7 +395,7 @@ Windows support is now removed, the [wmi_exporter](https://github.com/martinlind * [BUGFIX] Ignore wifi collector permission errors #646 * [BUGFIX] Always try to return smartmon_device_info metric #663 -## v0.14.0 / 2017-03-21 +## 0.14.0 / 2017-03-21 NOTE: We are deprecating several collectors in this release. * `gmond` - Out of scope. @@ -82,7 +437,7 @@ Breaking changes: * [BUGFIX] Allow graceful failure in hwmon collector #427 * [BUGFIX] Fix the reporting of active+total disk metrics for inactive raids. #522 -## v0.13.0 / 2016-11-26 +## 0.13.0 / 2016-11-26 NOTE: We have disabled builds of linux/ppc64 and linux/ppc64le due to build bugs. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9a1aff4127 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ada80dd655..8780b12adf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,6 +17,13 @@ Prometheus uses GitHub to manage reviews of pull requests. Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). +* Sign your work to certify that your changes were created by yourself or you + have the right to submit it under our license. Read + https://developercertificate.org/ for all details and append your sign-off to + every commit message like this: + + Signed-off-by: Random J Developer + ## Collector Implementation Guidelines diff --git a/Dockerfile b/Dockerfile index 98ce8c6a13..51ab464280 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,26 @@ +# Run tests +FROM quay.io/prometheus/golang-builder:1.17-main +MAINTAINER The Prometheus Authors + +COPY . /go/src/github.com/prometheus/node_exporter +WORKDIR /go/src/github.com/prometheus/node_exporter +RUN make promu +RUN make + +# Run Build +FROM quay.io/prometheus/golang-builder:1.17-main +MAINTAINER The Prometheus Authors + +COPY . /go/src/github.com/prometheus/node_exporter +WORKDIR /go/src/github.com/prometheus/node_exporter +RUN make promu +RUN make build + +# Make docker image FROM quay.io/prometheus/busybox:glibc MAINTAINER The Prometheus Authors -COPY node_exporter /bin/node_exporter +COPY --from=1 /go/src/github.com/prometheus/node_exporter/node_exporter /bin/node_exporter EXPOSE 9100 USER nobody diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le deleted file mode 100644 index 75c1aaf5cb..0000000000 --- a/Dockerfile.ppc64le +++ /dev/null @@ -1,7 +0,0 @@ -FROM ppc64le/busybox:glibc - -COPY node_exporter /bin/node_exporter - -EXPOSE 9100 -USER nobody -ENTRYPOINT [ "/bin/node_exporter" ] diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000000..673564cd38 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,13 @@ +#!groovy + +pipeline { + agent { label 'dockerd' } + + stages { + stage('Build') { + steps { + dockerBuildTagPush() + } + } + } +} diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 7088f96e4a..ffe90361c3 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,2 +1,2 @@ -* Ben Kochie -* Johannes 'fish' Ziemke +* Ben Kochie @SuperQ +* Johannes 'fish' Ziemke @discordianfish diff --git a/Makefile b/Makefile index 4f5ebe0c50..e72dea5dff 100644 --- a/Makefile +++ b/Makefile @@ -11,44 +11,68 @@ # See the License for the specific language governing permissions and # limitations under the License. -GO ?= GO15VENDOREXPERIMENT=1 go -GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOARCH := $(shell $(GO) env GOARCH) -GOHOSTARCH := $(shell $(GO) env GOHOSTARCH) +# Ensure that 'all' is the default target otherwise it will be the first target from Makefile.common. +all:: -PROMU ?= $(GOPATH)/bin/promu -STATICCHECK ?= $(GOPATH)/bin/staticcheck -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) +# Needs to be defined before including Makefile.common to auto-generate targets +DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x + +include Makefile.common + +PROMTOOL_VERSION ?= 2.30.0 +PROMTOOL_URL ?= https://github.com/prometheus/prometheus/releases/download/v$(PROMTOOL_VERSION)/prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM).tar.gz +PROMTOOL ?= $(FIRST_GOPATH)/bin/promtool -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_NAME ?= node-exporter -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) MACH ?= $(shell uname -m) -DOCKERFILE ?= Dockerfile -ifeq ($(GOHOSTARCH),amd64) - # Only supported on amd64 - test-flags := -race -endif +STATICCHECK_IGNORE = -ifeq ($(OS),Windows_NT) - OS_detected := Windows +ifeq ($(GOHOSTOS), linux) + test-e2e := test-e2e else - OS_detected := $(shell uname -s) + test-e2e := skip-test-e2e endif -ifeq ($(OS_detected), Linux) - test-e2e := test-e2e +# Use CGO for non-Linux builds. +ifeq ($(GOOS), linux) + PROMU_CONF ?= .promu.yml else - test-e2e := skip-test-e2e + ifndef GOOS + ifeq ($(GOHOSTOS), linux) + PROMU_CONF ?= .promu.yml + else + PROMU_CONF ?= .promu-cgo.yml + endif + else + # Do not use CGO for openbsd/amd64 builds + ifeq ($(GOOS), openbsd) + ifeq ($(GOARCH), amd64) + PROMU_CONF ?= .promu.yml + else + PROMU_CONF ?= .promu-cgo.yml + endif + else + PROMU_CONF ?= .promu-cgo.yml + endif + endif +endif + +PROMU := $(FIRST_GOPATH)/bin/promu --config $(PROMU_CONF) + +e2e-out = collector/fixtures/e2e-output.txt +ifeq ($(MACH), ppc64le) + e2e-out = collector/fixtures/e2e-64k-page-output.txt +endif +ifeq ($(MACH), aarch64) + e2e-out = collector/fixtures/e2e-64k-page-output.txt endif # 64bit -> 32bit mapping for cross-checking. At least for amd64/386, the 64bit CPU can execute 32bit code but not the other way around, so we don't support cross-testing upwards. cross-test = skip-test-32bit define goarch_pair - ifeq ($$(OS_detected),Linux) - ifeq ($$(GOARCH),$1) + ifeq ($$(GOHOSTOS),linux) + ifeq ($$(GOHOSTARCH),$1) GOARCH_CROSS = $2 cross-test = test-32bit endif @@ -57,80 +81,62 @@ endef # By default, "cross" test with ourselves to cover unknown pairings. $(eval $(call goarch_pair,amd64,386)) -$(eval $(call goarch_pair,arm64,arm)) $(eval $(call goarch_pair,mips64,mips)) $(eval $(call goarch_pair,mips64el,mipsel)) -all: format vet staticcheck build test $(cross-test) $(test-e2e) - -style: - @echo ">> checking code style" - @! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' +all:: vet checkmetrics checkrules common-all $(cross-test) $(test-e2e) +.PHONY: test test: collector/fixtures/sys/.unpacked @echo ">> running tests" $(GO) test -short $(test-flags) $(pkgs) +.PHONY: test-32bit test-32bit: collector/fixtures/sys/.unpacked @echo ">> running tests in 32-bit mode" @env GOARCH=$(GOARCH_CROSS) $(GO) test $(pkgs) +.PHONY: skip-test-32bit skip-test-32bit: - @echo ">> SKIP running tests in 32-bit mode: not supported on $(OS_detected)/$(GOARCH)" + @echo ">> SKIP running tests in 32-bit mode: not supported on $(GOHOSTOS)/$(GOHOSTARCH)" -collector/fixtures/sys/.unpacked: collector/fixtures/sys.ttar - ./ttar -C collector/fixtures -x -f collector/fixtures/sys.ttar +%/.unpacked: %.ttar + @echo ">> extracting fixtures" + if [ -d $(dir $@) ] ; then rm -rf $(dir $@) ; fi + ./ttar -C $(dir $*) -x -f $*.ttar touch $@ +update_fixtures: + rm -vf collector/fixtures/sys/.unpacked + ./ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys + +.PHONY: test-e2e test-e2e: build collector/fixtures/sys/.unpacked @echo ">> running end-to-end tests" ./end-to-end-test.sh +.PHONY: skip-test-e2e skip-test-e2e: - @echo ">> SKIP running end-to-end tests on $(OS_detected)" - -format: - @echo ">> formatting code" - @$(GO) fmt $(pkgs) + @echo ">> SKIP running end-to-end tests on $(GOHOSTOS)" -vet: - @echo ">> vetting code" - @$(GO) vet $(pkgs) +.PHONY: checkmetrics +checkmetrics: $(PROMTOOL) + @echo ">> checking metrics for correctness" + ./checkmetrics.sh $(PROMTOOL) $(e2e-out) -staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" - @$(STATICCHECK) $(pkgs) - -build: $(PROMU) - @echo ">> building binaries" - @$(PROMU) build --prefix $(PREFIX) - -tarball: $(PROMU) - @echo ">> building release tarball" - @$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -docker: -ifeq ($(MACH), ppc64le) - $(eval DOCKERFILE=Dockerfile.ppc64le) -endif - @echo ">> building docker image from $(DOCKERFILE)" - @docker build --file $(DOCKERFILE) -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . +.PHONY: checkrules +checkrules: $(PROMTOOL) + @echo ">> checking rules for correctness" + find . -name "*rules*.yml" | xargs -I {} $(PROMTOOL) check rules {} +.PHONY: test-docker test-docker: @echo ">> testing docker image" - ./test_image.sh "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" 9100 - -$(GOPATH)/bin/promu promu: - @GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu - -$(GOPATH)/bin/staticcheck: - @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck - + ./test_image.sh "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-amd64:$(DOCKER_IMAGE_TAG)" 9100 -.PHONY: all style format build test test-e2e vet tarball docker promu staticcheck +.PHONY: promtool +promtool: $(PROMTOOL) -# Declaring the binaries at their default locations as PHONY targets is a hack -# to ensure the latest version is downloaded on every make execution. -# If this is not desired, copy/symlink these binaries to a different path and -# set the respective environment variables. -.PHONY: $(GOPATH)/bin/promu $(GOPATH)/bin/staticcheck +$(PROMTOOL): + mkdir -p $(FIRST_GOPATH)/bin + curl -fsS -L $(PROMTOOL_URL) | tar -xvzf - -C $(FIRST_GOPATH)/bin --strip 1 "prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM)/promtool" diff --git a/Makefile.common b/Makefile.common new file mode 100644 index 0000000000..ed7d1826e4 --- /dev/null +++ b/Makefile.common @@ -0,0 +1,317 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +GOVENDOR := +GO111MODULE := +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif +endif +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell which gotestsum),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.12.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.42.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint yamllint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get -d $$m; \ + done + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifneq (,$(wildcard vendor)) + GO111MODULE=$(GO111MODULE) $(GO) mod vendor +endif + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" +ifdef GO111MODULE +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +else + $(GOLANGCI_LINT) run $(pkgs) +endif +endif + +.PHONY: common-yamllint +common-yamllint: + @echo ">> running yamllint on all YAML files in the repository" +ifeq (, $(shell which yamllint)) + @echo "yamllint not installed so skipping" +else + yamllint . +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifeq (,$(wildcard vendor)) + @git diff --exit-code -- go.sum go.mod +else + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/README.md b/README.md index f5ced0e1b9..5389f8a141 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Node exporter [![Build Status](https://travis-ci.org/prometheus/node_exporter.svg)][travis] +# Node exporter [![CircleCI](https://circleci.com/gh/prometheus/node_exporter/tree/master.svg?style=shield)][circleci] [![Buildkite status](https://badge.buildkite.com/94a0c1fb00b1f46883219c256efe9ce01d63b6505f3a942f9b.svg)](https://buildkite.com/prometheus/node-exporter) @@ -9,7 +9,70 @@ Prometheus exporter for hardware and OS metrics exposed by \*NIX kernels, written in Go with pluggable metric collectors. -The [WMI exporter](https://github.com/martinlindhe/wmi_exporter) is recommended for Windows users. +The [Windows exporter](https://github.com/prometheus-community/windows_exporter) is recommended for Windows users. +To expose NVIDIA GPU metrics, [prometheus-dcgm +](https://github.com/NVIDIA/dcgm-exporter) +can be used. + +## Installation and Usage + +If you are new to Prometheus and `node_exporter` there is a [simple step-by-step guide](https://prometheus.io/docs/guides/node-exporter/). + +The `node_exporter` listens on HTTP port 9100 by default. See the `--help` output for more options. + +### Ansible + +For automated installs with [Ansible](https://www.ansible.com/), there is the [Cloud Alchemy role](https://github.com/cloudalchemy/ansible-node-exporter). + +### RHEL/CentOS/Fedora + +There is a [community-supplied COPR repository](https://copr.fedorainfracloud.org/coprs/ibotty/prometheus-exporters/) which closely follows upstream releases. + +### Docker + +The `node_exporter` is designed to monitor the host system. It's not recommended +to deploy it as a Docker container because it requires access to the host system. + +For situations where Docker deployment is needed, some extra flags must be used to allow +the `node_exporter` access to the host namespaces. + +Be aware that any non-root mount points you want to monitor will need to be bind-mounted +into the container. + +If you start container for host monitoring, specify `path.rootfs` argument. +This argument must match path in bind-mount of host root. The node\_exporter will use +`path.rootfs` as prefix to access host filesystem. + +```bash +docker run -d \ + --net="host" \ + --pid="host" \ + -v "/:/host:ro,rslave" \ + quay.io/prometheus/node-exporter:latest \ + --path.rootfs=/host +``` + +For Docker compose, similar flag changes are needed. + +```yaml +--- +version: '3.8' + +services: + node_exporter: + image: quay.io/prometheus/node-exporter:latest + container_name: node_exporter + command: + - '--path.rootfs=/host' + network_mode: host + pid: host + restart: unless-stopped + volumes: + - '/:/host:ro,rslave' +``` + +On some systems, the `timex` collector requires an additional Docker flag, +`--cap-add=SYS_TIME`, in order to access the required syscalls. ## Collectors @@ -18,6 +81,7 @@ below list all existing collectors and the supported systems. Collectors are enabled by providing a `--collector.` flag. Collectors that are enabled by default can be disabled by providing a `--no-collector.` flag. +To enable only some specific collector(s), use `--collector.disable-defaults --collector. ...`. ### Enabled by default @@ -25,12 +89,18 @@ Name | Description | OS ---------|-------------|---- arp | Exposes ARP statistics from `/proc/net/arp`. | Linux bcache | Exposes bcache statistics from `/sys/fs/bcache/`. | Linux +bonding | Exposes the number of configured and active slaves of Linux bonding interfaces. | Linux +btrfs | Exposes btrfs statistics | Linux +boottime | Exposes system boot time derived from the `kern.boottime` sysctl. | Darwin, Dragonfly, FreeBSD, NetBSD, OpenBSD, Solaris conntrack | Shows conntrack statistics (does nothing if no `/proc/sys/net/netfilter/` present). | Linux -cpu | Exposes CPU statistics | Darwin, Dragonfly, FreeBSD, Linux -diskstats | Exposes disk I/O statistics. | Darwin, Linux +cpu | Exposes CPU statistics | Darwin, Dragonfly, FreeBSD, Linux, Solaris, OpenBSD +cpufreq | Exposes CPU frequency statistics | Linux, Solaris +diskstats | Exposes disk I/O statistics. | Darwin, Linux, OpenBSD +dmi | Expose Desktop Management Interface (DMI) info from `/sys/class/dmi/id/` | Linux edac | Exposes error detection and correction statistics. | Linux entropy | Exposes available entropy. | Linux exec | Exposes execution statistics. | Dragonfly, FreeBSD +fibrechannel | Exposes fibre channel information and statistics from `/sys/class/fc_host/`. | Linux filefd | Exposes file descriptor statistics from `/proc/sys/fs/file-nr`. | Linux filesystem | Exposes filesystem statistics, such as disk space used. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD hwmon | Expose hardware monitoring and sensor data from `/sys/class/hwmon/`. | Linux @@ -38,62 +108,125 @@ infiniband | Exposes network statistics specific to InfiniBand and Intel OmniPat ipvs | Exposes IPVS status from `/proc/net/ip_vs` and stats from `/proc/net/ip_vs_stats`. | Linux loadavg | Exposes load average. | Darwin, Dragonfly, FreeBSD, Linux, NetBSD, OpenBSD, Solaris mdadm | Exposes statistics about devices in `/proc/mdstat` (does nothing if no `/proc/mdstat` present). | Linux -meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux +meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD +netclass | Exposes network interface info from `/sys/class/net/` | Linux netdev | Exposes network interface statistics such as bytes transferred. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD netstat | Exposes network statistics from `/proc/net/netstat`. This is the same information as `netstat -s`. | Linux +nfs | Exposes NFS client statistics from `/proc/net/rpc/nfs`. This is the same information as `nfsstat -c`. | Linux +nfsd | Exposes NFS kernel server statistics from `/proc/net/rpc/nfsd`. This is the same information as `nfsstat -s`. | Linux +nvme | Exposes NVMe info from `/sys/class/nvme/` | Linux +os | Expose OS release info from `/etc/os-release` or `/usr/lib/os-release` | _any_ +powersupplyclass | Exposes Power Supply statistics from `/sys/class/power_supply` | Linux +pressure | Exposes pressure stall statistics from `/proc/pressure/`. | Linux (kernel 4.20+ and/or [CONFIG\_PSI](https://www.kernel.org/doc/html/latest/accounting/psi.html)) +rapl | Exposes various statistics from `/sys/class/powercap`. | Linux +schedstat | Exposes task scheduler statistics from `/proc/schedstat`. | Linux sockstat | Exposes various statistics from `/proc/net/sockstat`. | Linux +softnet | Exposes statistics from `/proc/net/softnet_stat`. | Linux stat | Exposes various statistics from `/proc/stat`. This includes boot time, forks and interrupts. | Linux +tapestats | Exposes statistics from `/sys/class/scsi_tape`. | Linux textfile | Exposes statistics read from local disk. The `--collector.textfile.directory` flag must be set. | _any_ +thermal | Exposes thermal statistics like `pmset -g therm`. | Darwin +thermal\_zone | Exposes thermal zone & cooling device statistics from `/sys/class/thermal`. | Linux time | Exposes the current system time. | _any_ timex | Exposes selected adjtimex(2) system call stats. | Linux -uname | Exposes system information as provided by the uname system call. | Linux +udp_queues | Exposes UDP total lengths of the rx_queue and tx_queue from `/proc/net/udp` and `/proc/net/udp6`. | Linux +uname | Exposes system information as provided by the uname system call. | Darwin, FreeBSD, Linux, OpenBSD vmstat | Exposes statistics from `/proc/vmstat`. | Linux -wifi | Exposes WiFi device and station statistics. | Linux xfs | Exposes XFS runtime statistics. | Linux (kernel 4.4+) -zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | [Linux](http://zfsonlinux.org/) +zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | [Linux](http://zfsonlinux.org/), Solaris ### Disabled by default +`node_exporter` also implements a number of collectors that are disabled by default. Reasons for this vary by +collector, and may include: +* High cardinality +* Prolonged runtime that exceeds the Prometheus `scrape_interval` or `scrape_timeout` +* Significant resource demands on the host + +You can enable additional collectors as desired by adding them to your +init system's or service supervisor's startup configuration for +`node_exporter` but caution is advised. Enable at most one at a time, +testing first on a non-production system, then by hand on a single +production node. When enabling additional collectors, you should +carefully monitor the change by observing the ` +scrape_duration_seconds` metric to ensure that collection completes +and does not time out. In addition, monitor the +`scrape_samples_post_metric_relabeling` metric to see the changes in +cardinality. + +The `perf` collector may not work out of the box on some Linux systems due to kernel +configuration and security settings. To allow access, set the following `sysctl` +parameter: + +``` +sysctl -w kernel.perf_event_paranoid=X +``` + +- 2 allow only user-space measurements (default since Linux 4.6). +- 1 allow both kernel and user measurements (default before Linux 4.6). +- 0 allow access to CPU-specific data but not raw tracepoint samples. +- -1 no restrictions. + +Depending on the configured value different metrics will be available, for most +cases `0` will provide the most complete set. For more information see [`man 2 +perf_event_open`](http://man7.org/linux/man-pages/man2/perf_event_open.2.html). + +By default, the `perf` collector will only collect metrics of the CPUs that +`node_exporter` is running on (ie +[`runtime.NumCPU`](https://golang.org/pkg/runtime/#NumCPU). If this is +insufficient (e.g. if you run `node_exporter` with its CPU affinity set to +specific CPUs), you can specify a list of alternate CPUs by using the +`--collector.perf.cpus` flag. For example, to collect metrics on CPUs 2-6, you +would specify: `--collector.perf --collector.perf.cpus=2-6`. The CPU +configuration is zero indexed and can also take a stride value; e.g. +`--collector.perf --collector.perf.cpus=1-10:5` would collect on CPUs +1, 5, and 10. + +The `perf` collector is also able to collect +[tracepoint](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html) +counts when using the `--collector.perf.tracepoint` flag. Tracepoints can be +found using [`perf list`](http://man7.org/linux/man-pages/man1/perf.1.html) or +from debugfs. And example usage of this would be +`--collector.perf.tracepoint="sched:sched_process_exec"`. + + Name | Description | OS ---------|-------------|---- -bonding | Exposes the number of configured and active slaves of Linux bonding interfaces. | Linux buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux devstat | Exposes device statistics | Dragonfly, FreeBSD drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4) | Linux +ethtool | Exposes network interface information and network driver statistics equivalent to `ethtool`, `ethtool -S`, and `ethtool -i`. | Linux interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD ksmd | Exposes kernel and system statistics from `/sys/kernel/mm/ksm`. | Linux +lnstat | Exposes stats from `/proc/net/stat/`. | Linux logind | Exposes session counts from [logind](http://www.freedesktop.org/wiki/Software/systemd/logind/). | Linux meminfo\_numa | Exposes memory statistics from `/proc/meminfo_numa`. | Linux mountstats | Exposes filesystem statistics from `/proc/self/mountstats`. Exposes detailed NFS client statistics. | Linux -nfs | Exposes NFS client statistics from `/proc/net/rpc/nfs`. This is the same information as `nfsstat -c`. | Linux +network_route | Exposes the routing table as metrics | Linux ntp | Exposes local NTP daemon health to check [time](./docs/TIME.md) | _any_ +perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux +processes | Exposes aggregate process statistics from `/proc`. | Linux qdisc | Exposes [queuing discipline](https://en.wikipedia.org/wiki/Network_scheduler#Linux_kernel) statistics | Linux runit | Exposes service status from [runit](http://smarden.org/runit/). | _any_ supervisord | Exposes service status from [supervisord](http://supervisord.org/). | _any_ systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux +wifi | Exposes WiFi device and station statistics. | Linux +zoneinfo | Exposes NUMA memory zone metrics. | Linux -### Deprecated - -*These collectors will be (re)moved in the future.* - -Name | Description | OS ----------|-------------|---- -gmond | Exposes statistics from Ganglia. | _any_ -megacli | Exposes RAID statistics from MegaCLI. | Linux ### Textfile Collector -The textfile collector is similar to the [Pushgateway](https://github.com/prometheus/pushgateway), +The `textfile` collector is similar to the [Pushgateway](https://github.com/prometheus/pushgateway), in that it allows exporting of statistics from batch jobs. It can also be used to export static metrics, such as what role a machine has. The Pushgateway -should be used for service-level metrics. The textfile module is for metrics +should be used for service-level metrics. The `textfile` module is for metrics that are tied to a machine. -To use it, set the `--collector.textfile.directory` flag on the Node exporter. The +To use it, set the `--collector.textfile.directory` flag on the `node_exporter` commandline. The collector will parse all files in that directory matching the glob `*.prom` using the [text -format](http://prometheus.io/docs/instrumenting/exposition_formats/). +format](http://prometheus.io/docs/instrumenting/exposition_formats/). **Note:** Timestamps are not supported. To atomically push completion time for a cron job: ``` @@ -107,10 +240,32 @@ echo 'role{role="application_server"} 1' > /path/to/directory/role.prom.$$ mv /path/to/directory/role.prom.$$ /path/to/directory/role.prom ``` -## Building and running +### Filtering enabled collectors + +The `node_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families. + +For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#). + +``` + params: + collect[]: + - foo + - bar +``` + +This can be useful for having different Prometheus servers collect specific metrics from nodes. + +## Development building and running + +Prerequisites: - go get github.com/prometheus/node_exporter - cd ${GOPATH-$HOME/go}/src/github.com/prometheus/node_exporter +* [Go compiler](https://golang.org/dl/) +* RHEL/CentOS: `glibc-static` package. + +Building: + + git clone https://github.com/prometheus/node_exporter.git + cd node_exporter make ./node_exporter @@ -122,33 +277,17 @@ To see all available configuration flags: make test +## TLS endpoint -## Using Docker -The node\_exporter is designed to monitor the host system. It's not recommended -to deploy it as Docker container because it requires access to the host system. -If you need to run it on Docker, you can deploy this exporter using the -[node-exporter Docker -image](https://quay.io/repository/prometheus/node-exporter) with the following -options and bind-mounts: +** EXPERIMENTAL ** -```bash -docker run -d -p 9100:9100 \ - -v "/proc:/host/proc:ro" \ - -v "/sys:/host/sys:ro" \ - -v "/:/rootfs:ro" \ - --net="host" \ - quay.io/prometheus/node-exporter \ - --path.procfs /host/proc \ - --path.sysfs /host/sys \ - --collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc)($|/)" -``` - -Be aware though that the mountpoint label in various metrics will now have -`/rootfs` as prefix. +The exporter supports TLS via a new web configuration file. -## Using a third-party repository for RHEL/CentOS/Fedora +```console +./node_exporter --web.config=web-config.yml +``` -There is a [community-supplied COPR repository](https://copr.fedorainfracloud.org/coprs/ibotty/prometheus-exporters/). It closely follows upstream releases. +See the [exporter-toolkit https package](https://github.com/prometheus/exporter-toolkit/blob/v0.1.0/https/README.md) for more details. [travis]: https://travis-ci.org/prometheus/node_exporter [hub]: https://hub.docker.com/r/prom/node-exporter/ diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..67741f015a --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,6 @@ +# Reporting a security issue + +The Prometheus security policy, including how to report vulnerabilities, can be +found here: + +https://prometheus.io/docs/operating/security/ diff --git a/VERSION b/VERSION index a551051694..f0bb29e763 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.15.0 +1.3.0 diff --git a/checkmetrics.sh b/checkmetrics.sh new file mode 100755 index 0000000000..c3104ca152 --- /dev/null +++ b/checkmetrics.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +if [[ ( -z "$1" ) || ( -z "$2" ) ]]; then + echo "usage: ./checkmetrics.sh /usr/bin/promtool e2e-output.txt" + exit 1 +fi + +# Ignore known issues in auto-generated and network specific collectors. +lint=$($1 check metrics < "$2" 2>&1 | grep -v -E "^node_(entropy|memory|netstat|wifi_station)_") + +if [[ -n $lint ]]; then + echo -e "Some Prometheus metrics do not follow best practices:\n" + echo "$lint" + + exit 1 +fi diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 3207176b47..0000000000 --- a/circle.yml +++ /dev/null @@ -1,63 +0,0 @@ -machine: - environment: - DOCKER_IMAGE_NAME: prom/node-exporter - QUAY_IMAGE_NAME: quay.io/prometheus/node-exporter - DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.9-base - REPO_PATH: github.com/prometheus/node_exporter - pre: - - sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci' - - sudo chmod 0755 /usr/bin/docker - - sudo curl -L 'https://github.com/aktau/github-release/releases/download/v0.6.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C $HOME/bin - services: - - docker - -dependencies: - pre: - - make promu - - docker info - override: - - promu crossbuild - - ln -s .build/linux-amd64/node_exporter node_exporter - - | - if [ -n "$CIRCLE_TAG" ]; then - make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - else - make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME - make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME - fi - post: - - mkdir $CIRCLE_ARTIFACTS/binaries/ && cp -a .build/* $CIRCLE_ARTIFACTS/binaries/ - - docker images - -test: - override: - - docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T - - make test-docker DOCKER_IMAGE_TAG=$CIRCLE_TAG - -deployment: - hub_branch: - branch: master - owner: prometheus - commands: - - docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - - docker push $DOCKER_IMAGE_NAME - - docker push $QUAY_IMAGE_NAME - hub_tag: - tag: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ - owner: prometheus - commands: - - promu crossbuild tarballs - - promu checksum .tarballs - - promu release .tarballs - - mkdir $CIRCLE_ARTIFACTS/releases/ && cp -a .tarballs/* $CIRCLE_ARTIFACTS/releases/ - - docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - - | - if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then - docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest" - docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest" - fi - - docker push $DOCKER_IMAGE_NAME - - docker push $QUAY_IMAGE_NAME diff --git a/collector/arp_linux.go b/collector/arp_linux.go index 49c437e80a..99b8acfab5 100644 --- a/collector/arp_linux.go +++ b/collector/arp_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noarp // +build !noarp package collector @@ -22,11 +23,13 @@ import ( "os" "strings" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) type arpCollector struct { entries *prometheus.Desc + logger log.Logger } func init() { @@ -34,13 +37,14 @@ func init() { } // NewARPCollector returns a new Collector exposing ARP stats. -func NewARPCollector() (Collector, error) { +func NewARPCollector(logger log.Logger) (Collector, error) { return &arpCollector{ entries: prometheus.NewDesc( prometheus.BuildFQName(namespace, "arp", "entries"), "ARP entries by device", []string{"device"}, nil, ), + logger: logger, }, nil } @@ -81,7 +85,7 @@ func parseARPEntries(data io.Reader) (map[string]uint32, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("failed to parse ARP info: %s", err) + return nil, fmt.Errorf("failed to parse ARP info: %w", err) } return entries, nil @@ -90,7 +94,7 @@ func parseARPEntries(data io.Reader) (map[string]uint32, error) { func (c *arpCollector) Update(ch chan<- prometheus.Metric) error { entries, err := getARPEntries() if err != nil { - return fmt.Errorf("could not get ARP entries: %s", err) + return fmt.Errorf("could not get ARP entries: %w", err) } for device, entryCount := range entries { diff --git a/collector/bcache_linux.go b/collector/bcache_linux.go index 33d48df8ae..63a22d033a 100644 --- a/collector/bcache_linux.go +++ b/collector/bcache_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nobcache // +build !nobcache package collector @@ -18,10 +19,14 @@ package collector import ( "fmt" - // https://godoc.org/github.com/prometheus/client_golang/prometheus + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/bcache" - "github.com/prometheus/procfs/sysfs" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + priorityStats = kingpin.Flag("collector.bcache.priorityStats", "Expose expensive priority stats.").Bool() ) func init() { @@ -30,28 +35,36 @@ func init() { // A bcacheCollector is a Collector which gathers metrics from Linux bcache. type bcacheCollector struct { - fs sysfs.FS + fs bcache.FS + logger log.Logger } // NewBcacheCollector returns a newly allocated bcacheCollector. // It exposes a number of Linux bcache statistics. -func NewBcacheCollector() (Collector, error) { - fs, err := sysfs.NewFS(*sysPath) +func NewBcacheCollector(logger log.Logger) (Collector, error) { + fs, err := bcache.NewFS(*sysPath) if err != nil { - return nil, fmt.Errorf("failed to open sysfs: %v", err) + return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &bcacheCollector{ - fs: fs, + fs: fs, + logger: logger, }, nil } // Update reads and exposes bcache stats. // It implements the Collector interface. func (c *bcacheCollector) Update(ch chan<- prometheus.Metric) error { - stats, err := c.fs.BcacheStats() + var stats []*bcache.Stats + var err error + if *priorityStats { + stats, err = c.fs.Stats() + } else { + stats, err = c.fs.StatsWithoutPriority() + } if err != nil { - return fmt.Errorf("failed to retrieve bcache stats: %v", err) + return fmt.Errorf("failed to retrieve bcache stats: %w", err) } for _, s := range stats { @@ -162,7 +175,7 @@ func (c *bcacheCollector) updateBcacheStats(ch chan<- prometheus.Metric, s *bcac }, { name: "cache_available_percent", - desc: "Percentage of cache device without dirty data, useable for writeback (may contain clean cached data).", + desc: "Percentage of cache device without dirty data, usable for writeback (may contain clean cached data).", value: float64(s.Bcache.CacheAvailablePercent), metricType: prometheus.GaugeValue, }, @@ -204,7 +217,7 @@ func (c *bcacheCollector) updateBcacheStats(ch chan<- prometheus.Metric, s *bcac metricType: prometheus.GaugeValue, }, { - name: "cache_read_races", + name: "cache_read_races_total", desc: "Counts instances where while data was being read from the cache, the bucket was reused and invalidated - i.e. where the pointer was stale after the read completed.", value: float64(s.Bcache.Internal.CacheReadRaces), metricType: prometheus.CounterValue, @@ -222,6 +235,46 @@ func (c *bcacheCollector) updateBcacheStats(ch chan<- prometheus.Metric, s *bcac extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, + { + name: "dirty_target_bytes", + desc: "Current dirty data target threshold for this backing device in bytes.", + value: float64(bdev.WritebackRateDebug.Target), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_rate", + desc: "Current writeback rate for this backing device in bytes.", + value: float64(bdev.WritebackRateDebug.Rate), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_rate_proportional_term", + desc: "Current result of proportional controller, part of writeback rate", + value: float64(bdev.WritebackRateDebug.Proportional), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_rate_integral_term", + desc: "Current result of integral controller, part of writeback rate", + value: float64(bdev.WritebackRateDebug.Integral), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, + { + name: "writeback_change", + desc: "Last writeback rate change step for this backing device.", + value: float64(bdev.WritebackRateDebug.Change), + metricType: prometheus.GaugeValue, + extraLabel: []string{"backing_device"}, + extraLabelValue: bdev.Name, + }, } allMetrics = append(allMetrics, metrics...) @@ -258,23 +311,28 @@ func (c *bcacheCollector) updateBcacheStats(ch chan<- prometheus.Metric, s *bcac extraLabel: []string{"cache_device"}, extraLabelValue: cache.Name, }, + } + if *priorityStats { // metrics in /sys/fs/bcache///priority_stats - { - name: "priority_stats_unused_percent", - desc: "The percentage of the cache that doesn't contain any data.", - value: float64(cache.Priority.UnusedPercent), - metricType: prometheus.GaugeValue, - extraLabel: []string{"cache_device"}, - extraLabelValue: cache.Name, - }, - { - name: "priority_stats_metadata_percent", - desc: "Bcache's metadata overhead.", - value: float64(cache.Priority.MetadataPercent), - metricType: prometheus.GaugeValue, - extraLabel: []string{"cache_device"}, - extraLabelValue: cache.Name, - }, + priorityStatsMetrics := []bcacheMetric{ + { + name: "priority_stats_unused_percent", + desc: "The percentage of the cache that doesn't contain any data.", + value: float64(cache.Priority.UnusedPercent), + metricType: prometheus.GaugeValue, + extraLabel: []string{"cache_device"}, + extraLabelValue: cache.Name, + }, + { + name: "priority_stats_metadata_percent", + desc: "Bcache's metadata overhead.", + value: float64(cache.Priority.MetadataPercent), + metricType: prometheus.GaugeValue, + extraLabel: []string{"cache_device"}, + extraLabelValue: cache.Name, + }, + } + metrics = append(metrics, priorityStatsMetrics...) } allMetrics = append(allMetrics, metrics...) } diff --git a/collector/bonding_linux.go b/collector/bonding_linux.go index 2d25974ca7..4c62207c9a 100644 --- a/collector/bonding_linux.go +++ b/collector/bonding_linux.go @@ -11,32 +11,36 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nobonding // +build !nobonding package collector import ( + "errors" "fmt" "io/ioutil" "os" - "path" + "path/filepath" "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" ) type bondingCollector struct { slaves, active typedDesc + logger log.Logger } func init() { - registerCollector("bonding", defaultDisabled, NewBondingCollector) + registerCollector("bonding", defaultEnabled, NewBondingCollector) } // NewBondingCollector returns a newly allocated bondingCollector. // It exposes the number of configured and active slave of linux bonding interfaces. -func NewBondingCollector() (Collector, error) { +func NewBondingCollector(logger log.Logger) (Collector, error) { return &bondingCollector{ slaves: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "bonding", "slaves"), @@ -48,6 +52,7 @@ func NewBondingCollector() (Collector, error) { "Number of active slaves per bonding interface.", []string{"master"}, nil, ), prometheus.GaugeValue}, + logger: logger, }, nil } @@ -56,9 +61,9 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error { statusfile := sysFilePath("class/net") bondingStats, err := readBondingStats(statusfile) if err != nil { - if os.IsNotExist(err) { - log.Debugf("Not collecting bonding, file does not exist: %s", statusfile) - return nil + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile) + return ErrNoData } return err } @@ -71,21 +76,21 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error { func readBondingStats(root string) (status map[string][2]int, err error) { status = map[string][2]int{} - masters, err := ioutil.ReadFile(path.Join(root, "bonding_masters")) + masters, err := ioutil.ReadFile(filepath.Join(root, "bonding_masters")) if err != nil { return nil, err } for _, master := range strings.Fields(string(masters)) { - slaves, err := ioutil.ReadFile(path.Join(root, master, "bonding", "slaves")) + slaves, err := ioutil.ReadFile(filepath.Join(root, master, "bonding", "slaves")) if err != nil { return nil, err } sstat := [2]int{0, 0} for _, slave := range strings.Fields(string(slaves)) { - state, err := ioutil.ReadFile(path.Join(root, master, fmt.Sprintf("lower_%s", slave), "operstate")) - if os.IsNotExist(err) { + state, err := ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("lower_%s", slave), "bonding_slave", "mii_status")) + if errors.Is(err, os.ErrNotExist) { // some older? kernels use slave_ prefix - state, err = ioutil.ReadFile(path.Join(root, master, fmt.Sprintf("slave_%s", slave), "operstate")) + state, err = ioutil.ReadFile(filepath.Join(root, master, fmt.Sprintf("slave_%s", slave), "bonding_slave", "mii_status")) } if err != nil { return nil, err diff --git a/collector/boot_time_bsd.go b/collector/boot_time_bsd.go new file mode 100644 index 0000000000..8a9c17b31a --- /dev/null +++ b/collector/boot_time_bsd.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (freebsd || dragonfly || openbsd || netbsd || darwin) && !noboottime +// +build freebsd dragonfly openbsd netbsd darwin +// +build !noboottime + +package collector + +import ( + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +type bootTimeCollector struct { + logger log.Logger +} + +func init() { + registerCollector("boottime", defaultEnabled, newBootTimeCollector) +} + +// newBootTimeCollector returns a new Collector exposing system boot time on BSD systems. +func newBootTimeCollector(logger log.Logger) (Collector, error) { + return &bootTimeCollector{ + logger: logger, + }, nil +} + +// Update pushes boot time onto ch +func (c *bootTimeCollector) Update(ch chan<- prometheus.Metric) error { + tv, err := unix.SysctlTimeval("kern.boottime") + if err != nil { + return err + } + + // This conversion maintains the usec precision. Using the time + // package did not. + v := float64(tv.Sec) + (float64(tv.Usec) / float64(1000*1000)) + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "boot_time_seconds"), + "Unix time of last boot, including microseconds.", + nil, nil, + ), prometheus.GaugeValue, v) + + return nil +} diff --git a/collector/boot_time_solaris.go b/collector/boot_time_solaris.go new file mode 100644 index 0000000000..3d55e7835f --- /dev/null +++ b/collector/boot_time_solaris.go @@ -0,0 +1,69 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build solaris && !noboottime +// +build solaris,!noboottime + +package collector + +import ( + "github.com/go-kit/log" + "github.com/illumos/go-kstat" + "github.com/prometheus/client_golang/prometheus" +) + +type bootTimeCollector struct { + boottime typedDesc + logger log.Logger +} + +func init() { + registerCollector("boottime", defaultEnabled, newBootTimeCollector) +} + +func newBootTimeCollector(logger log.Logger) (Collector, error) { + return &bootTimeCollector{ + boottime: typedDesc{ + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "boot_time_seconds"), + "Unix time of last boot, including microseconds.", + nil, nil, + ), prometheus.GaugeValue}, + logger: logger, + }, nil +} + +// newBootTimeCollector returns a new Collector exposing system boot time on Solaris systems. +// Update pushes boot time onto ch +func (c *bootTimeCollector) Update(ch chan<- prometheus.Metric) error { + tok, err := kstat.Open() + if err != nil { + return err + } + + defer tok.Close() + + ks, err := tok.Lookup("unix", 0, "system_misc") + if err != nil { + return err + } + + v, err := ks.GetNamed("boot_time") + if err != nil { + return err + } + + ch <- c.boottime.mustNewConstMetric(float64(v.UintVal)) + + return nil +} diff --git a/collector/btrfs_linux.go b/collector/btrfs_linux.go new file mode 100644 index 0000000000..01971c7c1a --- /dev/null +++ b/collector/btrfs_linux.go @@ -0,0 +1,190 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nobtrfs +// +build !nobtrfs + +package collector + +import ( + "fmt" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/btrfs" +) + +// A btrfsCollector is a Collector which gathers metrics from Btrfs filesystems. +type btrfsCollector struct { + fs btrfs.FS + logger log.Logger +} + +func init() { + registerCollector("btrfs", defaultEnabled, NewBtrfsCollector) +} + +// NewBtrfsCollector returns a new Collector exposing Btrfs statistics. +func NewBtrfsCollector(logger log.Logger) (Collector, error) { + fs, err := btrfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + return &btrfsCollector{ + fs: fs, + logger: logger, + }, nil +} + +// Update retrieves and exports Btrfs statistics. +// It implements Collector. +func (c *btrfsCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := c.fs.Stats() + if err != nil { + return fmt.Errorf("failed to retrieve Btrfs stats: %w", err) + } + + for _, s := range stats { + c.updateBtrfsStats(ch, s) + } + + return nil +} + +// btrfsMetric represents a single Btrfs metric that is converted into a Prometheus Metric. +type btrfsMetric struct { + name string + desc string + value float64 + extraLabel []string + extraLabelValue []string +} + +// updateBtrfsStats collects statistics for one bcache ID. +func (c *btrfsCollector) updateBtrfsStats(ch chan<- prometheus.Metric, s *btrfs.Stats) { + const subsystem = "btrfs" + + // Basic information about the filesystem. + devLabels := []string{"uuid"} + + // Retrieve the metrics. + metrics := c.getMetrics(s) + + // Convert all gathered metrics to Prometheus Metrics and add to channel. + for _, m := range metrics { + labels := append(devLabels, m.extraLabel...) + + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, m.name), + m.desc, + labels, + nil, + ) + + labelValues := []string{s.UUID} + if len(m.extraLabelValue) > 0 { + labelValues = append(labelValues, m.extraLabelValue...) + } + + ch <- prometheus.MustNewConstMetric( + desc, + prometheus.GaugeValue, + m.value, + labelValues..., + ) + } +} + +// getMetrics returns metrics for the given Btrfs statistics. +func (c *btrfsCollector) getMetrics(s *btrfs.Stats) []btrfsMetric { + metrics := []btrfsMetric{ + { + name: "info", + desc: "Filesystem information", + value: 1, + extraLabel: []string{"label"}, + extraLabelValue: []string{s.Label}, + }, + { + name: "global_rsv_size_bytes", + desc: "Size of global reserve.", + value: float64(s.Allocation.GlobalRsvSize), + }, + } + + // Information about devices. + for n, dev := range s.Devices { + metrics = append(metrics, btrfsMetric{ + name: "device_size_bytes", + desc: "Size of a device that is part of the filesystem.", + value: float64(dev.Size), + extraLabel: []string{"device"}, + extraLabelValue: []string{n}, + }) + } + + // Information about data, metadata and system data. + metrics = append(metrics, c.getAllocationStats("data", s.Allocation.Data)...) + metrics = append(metrics, c.getAllocationStats("metadata", s.Allocation.Metadata)...) + metrics = append(metrics, c.getAllocationStats("system", s.Allocation.System)...) + + return metrics +} + +// getAllocationStats returns allocation metrics for the given Btrfs Allocation statistics. +func (c *btrfsCollector) getAllocationStats(a string, s *btrfs.AllocationStats) []btrfsMetric { + metrics := []btrfsMetric{ + { + name: "reserved_bytes", + desc: "Amount of space reserved for a data type", + value: float64(s.ReservedBytes), + extraLabel: []string{"block_group_type"}, + extraLabelValue: []string{a}, + }, + } + + // Add all layout statistics. + for layout, stats := range s.Layouts { + metrics = append(metrics, c.getLayoutStats(a, layout, stats)...) + } + + return metrics +} + +// getLayoutStats returns metrics for a data layout. +func (c *btrfsCollector) getLayoutStats(a, l string, s *btrfs.LayoutUsage) []btrfsMetric { + return []btrfsMetric{ + { + name: "used_bytes", + desc: "Amount of used space by a layout/data type", + value: float64(s.UsedBytes), + extraLabel: []string{"block_group_type", "mode"}, + extraLabelValue: []string{a, l}, + }, + { + name: "size_bytes", + desc: "Amount of space allocated for a layout/data type", + value: float64(s.TotalBytes), + extraLabel: []string{"block_group_type", "mode"}, + extraLabelValue: []string{a, l}, + }, + { + name: "allocation_ratio", + desc: "Data allocation ratio for a layout/data type", + value: s.Ratio, + extraLabel: []string{"block_group_type", "mode"}, + extraLabelValue: []string{a, l}, + }, + } +} diff --git a/collector/btrfs_linux_test.go b/collector/btrfs_linux_test.go new file mode 100644 index 0000000000..7ce19aafab --- /dev/null +++ b/collector/btrfs_linux_test.go @@ -0,0 +1,119 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nobtrfs +// +build !nobtrfs + +package collector + +import ( + "strings" + "testing" + + "github.com/prometheus/procfs/btrfs" +) + +var expectedBtrfsMetrics = [][]btrfsMetric{ + { + {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{"fixture"}}, + {name: "global_rsv_size_bytes", value: 1.6777216e+07}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop26"}}, + {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, + {name: "used_bytes", value: 8.08189952e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, + {name: "size_bytes", value: 2.147483648e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, + {name: "allocation_ratio", value: 1, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, + {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"metadata"}}, + {name: "used_bytes", value: 933888, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid1"}}, + {name: "size_bytes", value: 1.073741824e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid1"}}, + {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid1"}}, + {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"system"}}, + {name: "used_bytes", value: 16384, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, + {name: "size_bytes", value: 8.388608e+06, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, + {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, + }, + { + {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{""}}, + {name: "global_rsv_size_bytes", value: 1.6777216e+07}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop22"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop23"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop24"}}, + {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, + {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, + {name: "used_bytes", value: 0, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, + {name: "size_bytes", value: 6.44087808e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, + {name: "allocation_ratio", value: 1.3333333333333333, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, + {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"metadata"}}, + {name: "used_bytes", value: 114688, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid6"}}, + {name: "size_bytes", value: 4.29391872e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid6"}}, + {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid6"}}, + {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"system"}}, + {name: "used_bytes", value: 16384, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, + {name: "size_bytes", value: 1.6777216e+07, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, + {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, + }, +} + +func checkMetric(exp, got *btrfsMetric) bool { + if exp.name != got.name || + exp.value != got.value || + len(exp.extraLabel) != len(got.extraLabel) || + len(exp.extraLabelValue) != len(got.extraLabelValue) { + return false + } + + for i := range exp.extraLabel { + if exp.extraLabel[i] != got.extraLabel[i] { + return false + } + + // Devices (loopXX) can appear in random order, so just check the first 4 characters. + if strings.HasPrefix(got.extraLabelValue[i], "loop") && + exp.extraLabelValue[i][:4] == got.extraLabelValue[i][:4] { + continue + } + + if exp.extraLabelValue[i] != got.extraLabelValue[i] { + return false + } + } + + return true +} + +func TestBtrfs(t *testing.T) { + fs, _ := btrfs.NewFS("fixtures/sys") + collector := &btrfsCollector{fs: fs} + + stats, err := collector.fs.Stats() + if err != nil { + t.Fatalf("Failed to retrieve Btrfs stats: %v", err) + } + if len(stats) != len(expectedBtrfsMetrics) { + t.Fatalf("Unexpected number of Btrfs stats: expected %v, got %v", len(expectedBtrfsMetrics), len(stats)) + } + + for i, s := range stats { + metrics := collector.getMetrics(s) + if len(metrics) != len(expectedBtrfsMetrics[i]) { + t.Fatalf("Unexpected number of Btrfs metrics: expected %v, got %v", len(expectedBtrfsMetrics[i]), len(metrics)) + } + + for j, m := range metrics { + exp := expectedBtrfsMetrics[i][j] + if !checkMetric(&exp, &m) { + t.Errorf("Incorrect btrfs metric: expected %#v, got: %#v", exp, m) + } + } + } +} diff --git a/collector/buddyinfo.go b/collector/buddyinfo.go index ca99b98343..c3cc5e046d 100644 --- a/collector/buddyinfo.go +++ b/collector/buddyinfo.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nobuddyinfo -// +build !netbsd +//go:build !nobuddyinfo && !netbsd +// +build !nobuddyinfo,!netbsd package collector @@ -20,8 +20,9 @@ import ( "fmt" "strconv" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "github.com/prometheus/procfs" ) @@ -30,7 +31,9 @@ const ( ) type buddyinfoCollector struct { - desc *prometheus.Desc + fs procfs.FS + desc *prometheus.Desc + logger log.Logger } func init() { @@ -38,29 +41,28 @@ func init() { } // NewBuddyinfoCollector returns a new Collector exposing buddyinfo stats. -func NewBuddyinfoCollector() (Collector, error) { +func NewBuddyinfoCollector(logger log.Logger) (Collector, error) { desc := prometheus.NewDesc( - prometheus.BuildFQName(namespace, buddyInfoSubsystem, "count"), + prometheus.BuildFQName(namespace, buddyInfoSubsystem, "blocks"), "Count of free blocks according to size.", []string{"node", "zone", "size"}, nil, ) - return &buddyinfoCollector{desc}, nil + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &buddyinfoCollector{fs, desc, logger}, nil } // Update calls (*buddyinfoCollector).getBuddyInfo to get the platform specific // buddyinfo metrics. func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error { - fs, err := procfs.NewFS(*procPath) - if err != nil { - return fmt.Errorf("failed to open procfs: %v", err) - } - - buddyInfo, err := fs.NewBuddyInfo() + buddyInfo, err := c.fs.BuddyInfo() if err != nil { - return fmt.Errorf("couldn't get buddyinfo: %s", err) + return fmt.Errorf("couldn't get buddyinfo: %w", err) } - log.Debugf("Set node_buddy: %#v", buddyInfo) + level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo) for _, entry := range buddyInfo { for size, value := range entry.Sizes { ch <- prometheus.MustNewConstMetric( diff --git a/collector/collector.go b/collector/collector.go index 1176b6f3cd..54f0ff6d58 100644 --- a/collector/collector.go +++ b/collector/collector.go @@ -15,13 +15,15 @@ package collector import ( + "errors" "fmt" "sync" "time" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" - "gopkg.in/alecthomas/kingpin.v2" + kingpin "gopkg.in/alecthomas/kingpin.v2" ) // Namespace defines the common namespace to be used by all metrics. @@ -42,21 +44,20 @@ var ( ) ) -func warnDeprecated(collector string) { - log.Warnf("The %s collector is deprecated and will be removed in the future!", collector) -} - const ( defaultEnabled = true defaultDisabled = false ) var ( - factories = make(map[string]func() (Collector, error)) - collectorState = make(map[string]*bool) + factories = make(map[string]func(logger log.Logger) (Collector, error)) + initiatedCollectorsMtx = sync.Mutex{} + initiatedCollectors = make(map[string]Collector) + collectorState = make(map[string]*bool) + forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled ) -func registerCollector(collector string, isDefaultEnabled bool, factory func() (Collector, error)) { +func registerCollector(collector string, isDefaultEnabled bool, factory func(logger log.Logger) (Collector, error)) { var helpDefaultState string if isDefaultEnabled { helpDefaultState = "enabled" @@ -68,62 +69,108 @@ func registerCollector(collector string, isDefaultEnabled bool, factory func() ( flagHelp := fmt.Sprintf("Enable the %s collector (default: %s).", collector, helpDefaultState) defaultValue := fmt.Sprintf("%v", isDefaultEnabled) - flag := kingpin.Flag(flagName, flagHelp).Default(defaultValue).Bool() + flag := kingpin.Flag(flagName, flagHelp).Default(defaultValue).Action(collectorFlagAction(collector)).Bool() collectorState[collector] = flag factories[collector] = factory } // NodeCollector implements the prometheus.Collector interface. -type nodeCollector struct { +type NodeCollector struct { Collectors map[string]Collector + logger log.Logger +} + +// DisableDefaultCollectors sets the collector state to false for all collectors which +// have not been explicitly enabled on the command line. +func DisableDefaultCollectors() { + for c := range collectorState { + if _, ok := forcedCollectors[c]; !ok { + *collectorState[c] = false + } + } +} + +// collectorFlagAction generates a new action function for the given collector +// to track whether it has been explicitly enabled or disabled from the command line. +// A new action function is needed for each collector flag because the ParseContext +// does not contain information about which flag called the action. +// See: https://github.com/alecthomas/kingpin/issues/294 +func collectorFlagAction(collector string) func(ctx *kingpin.ParseContext) error { + return func(ctx *kingpin.ParseContext) error { + forcedCollectors[collector] = true + return nil + } } -// NewNodeCollector creates a new NodeCollector -func NewNodeCollector() (*nodeCollector, error) { +// NewNodeCollector creates a new NodeCollector. +func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, error) { + f := make(map[string]bool) + for _, filter := range filters { + enabled, exist := collectorState[filter] + if !exist { + return nil, fmt.Errorf("missing collector: %s", filter) + } + if !*enabled { + return nil, fmt.Errorf("disabled collector: %s", filter) + } + f[filter] = true + } collectors := make(map[string]Collector) + initiatedCollectorsMtx.Lock() + defer initiatedCollectorsMtx.Unlock() for key, enabled := range collectorState { - if *enabled { - collector, err := factories[key]() + if !*enabled || (len(f) > 0 && !f[key]) { + continue + } + if collector, ok := initiatedCollectors[key]; ok { + collectors[key] = collector + } else { + collector, err := factories[key](log.With(logger, "collector", key)) if err != nil { return nil, err } collectors[key] = collector + initiatedCollectors[key] = collector } } - return &nodeCollector{Collectors: collectors}, nil + return &NodeCollector{Collectors: collectors, logger: logger}, nil } // Describe implements the prometheus.Collector interface. -func (n nodeCollector) Describe(ch chan<- *prometheus.Desc) { +func (n NodeCollector) Describe(ch chan<- *prometheus.Desc) { ch <- scrapeDurationDesc ch <- scrapeSuccessDesc } // Collect implements the prometheus.Collector interface. -func (n nodeCollector) Collect(ch chan<- prometheus.Metric) { +func (n NodeCollector) Collect(ch chan<- prometheus.Metric) { wg := sync.WaitGroup{} wg.Add(len(n.Collectors)) for name, c := range n.Collectors { go func(name string, c Collector) { - execute(name, c, ch) + execute(name, c, ch, n.logger) wg.Done() }(name, c) } wg.Wait() } -func execute(name string, c Collector, ch chan<- prometheus.Metric) { +func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) { begin := time.Now() err := c.Update(ch) duration := time.Since(begin) var success float64 if err != nil { - log.Errorf("ERROR: %s collector failed after %fs: %s", name, duration.Seconds(), err) + if IsNoDataError(err) { + level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) + } else { + level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) + } success = 0 } else { - log.Debugf("OK: %s collector succeeded after %fs.", name, duration.Seconds()) + level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds()) success = 1 } ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name) @@ -144,3 +191,10 @@ type typedDesc struct { func (d *typedDesc) mustNewConstMetric(value float64, labels ...string) prometheus.Metric { return prometheus.MustNewConstMetric(d.desc, d.valueType, value, labels...) } + +// ErrNoData indicates the collector found no data to collect, but had no other error. +var ErrNoData = errors.New("collector returned no data") + +func IsNoDataError(err error) bool { + return err == ErrNoData +} diff --git a/collector/conntrack_linux.go b/collector/conntrack_linux.go index 4a0e3750c2..e4ea954900 100644 --- a/collector/conntrack_linux.go +++ b/collector/conntrack_linux.go @@ -11,17 +11,45 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noconntrack // +build !noconntrack package collector import ( + "errors" + "fmt" + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" ) type conntrackCollector struct { - current *prometheus.Desc - limit *prometheus.Desc + current *prometheus.Desc + limit *prometheus.Desc + found *prometheus.Desc + invalid *prometheus.Desc + ignore *prometheus.Desc + insert *prometheus.Desc + insertFailed *prometheus.Desc + drop *prometheus.Desc + earlyDrop *prometheus.Desc + searchRestart *prometheus.Desc + logger log.Logger +} + +type conntrackStatistics struct { + found uint64 // Number of searched entries which were successful + invalid uint64 // Number of packets seen which can not be tracked + ignore uint64 // Number of packets seen which are already connected to a conntrack entry + insert uint64 // Number of entries inserted into the list + insertFailed uint64 // Number of entries for which list insertion was attempted but failed (happens if the same entry is already present) + drop uint64 // Number of packets dropped due to conntrack failure. Either new conntrack entry allocation failed, or protocol helper dropped the packet + earlyDrop uint64 // Number of dropped conntrack entries to make room for new ones, if maximum table size was reached + searchRestart uint64 // Number of conntrack table lookups which had to be restarted due to hashtable resizes } func init() { @@ -29,7 +57,7 @@ func init() { } // NewConntrackCollector returns a new Collector exposing conntrack stats. -func NewConntrackCollector() (Collector, error) { +func NewConntrackCollector(logger log.Logger) (Collector, error) { return &conntrackCollector{ current: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_entries"), @@ -41,24 +69,120 @@ func NewConntrackCollector() (Collector, error) { "Maximum size of connection tracking table.", nil, nil, ), + found: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_found"), + "Number of searched entries which were successful.", + nil, nil, + ), + invalid: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_invalid"), + "Number of packets seen which can not be tracked.", + nil, nil, + ), + ignore: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_ignore"), + "Number of packets seen which are already connected to a conntrack entry.", + nil, nil, + ), + insert: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_insert"), + "Number of entries inserted into the list.", + nil, nil, + ), + insertFailed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_insert_failed"), + "Number of entries for which list insertion was attempted but failed.", + nil, nil, + ), + drop: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_drop"), + "Number of packets dropped due to conntrack failure.", + nil, nil, + ), + earlyDrop: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_early_drop"), + "Number of dropped conntrack entries to make room for new ones, if maximum table size was reached.", + nil, nil, + ), + searchRestart: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_search_restart"), + "Number of conntrack table lookups which had to be restarted due to hashtable resizes.", + nil, nil, + ), + logger: logger, }, nil } func (c *conntrackCollector) Update(ch chan<- prometheus.Metric) error { value, err := readUintFromFile(procFilePath("sys/net/netfilter/nf_conntrack_count")) if err != nil { - // Conntrack probably not loaded into the kernel. - return nil + return c.handleErr(err) } ch <- prometheus.MustNewConstMetric( c.current, prometheus.GaugeValue, float64(value)) value, err = readUintFromFile(procFilePath("sys/net/netfilter/nf_conntrack_max")) if err != nil { - return nil + return c.handleErr(err) } ch <- prometheus.MustNewConstMetric( c.limit, prometheus.GaugeValue, float64(value)) + conntrackStats, err := getConntrackStatistics() + if err != nil { + return c.handleErr(err) + } + + ch <- prometheus.MustNewConstMetric( + c.found, prometheus.GaugeValue, float64(conntrackStats.found)) + ch <- prometheus.MustNewConstMetric( + c.invalid, prometheus.GaugeValue, float64(conntrackStats.invalid)) + ch <- prometheus.MustNewConstMetric( + c.ignore, prometheus.GaugeValue, float64(conntrackStats.ignore)) + ch <- prometheus.MustNewConstMetric( + c.insert, prometheus.GaugeValue, float64(conntrackStats.insert)) + ch <- prometheus.MustNewConstMetric( + c.insertFailed, prometheus.GaugeValue, float64(conntrackStats.insertFailed)) + ch <- prometheus.MustNewConstMetric( + c.drop, prometheus.GaugeValue, float64(conntrackStats.drop)) + ch <- prometheus.MustNewConstMetric( + c.earlyDrop, prometheus.GaugeValue, float64(conntrackStats.earlyDrop)) + ch <- prometheus.MustNewConstMetric( + c.searchRestart, prometheus.GaugeValue, float64(conntrackStats.searchRestart)) return nil } + +func (c *conntrackCollector) handleErr(err error) error { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "conntrack probably not loaded") + return ErrNoData + } + return fmt.Errorf("failed to retrieve conntrack stats: %w", err) +} + +func getConntrackStatistics() (*conntrackStatistics, error) { + c := conntrackStatistics{} + + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + connStats, err := fs.ConntrackStat() + if err != nil { + return nil, err + } + + for _, connStat := range connStats { + c.found += connStat.Found + c.invalid += connStat.Invalid + c.ignore += connStat.Ignore + c.insert += connStat.Insert + c.insertFailed += connStat.InsertFailed + c.drop += connStat.Drop + c.earlyDrop += connStat.EarlyDrop + c.searchRestart += connStat.SearchRestart + } + + return &c, nil +} diff --git a/collector/uname_linux_uint8.go b/collector/cpu_common.go similarity index 59% rename from collector/uname_linux_uint8.go rename to collector/cpu_common.go index bd5385f18b..6f644516bf 100644 --- a/collector/uname_linux_uint8.go +++ b/collector/cpu_common.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Prometheus Authors +// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,17 +11,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nouname,linux,arm !nouname,linux,ppc64 !nouname,linux,ppc64le !nouname,linux,s390x +//go:build !nocpu +// +build !nocpu package collector -func unameToString(input [65]uint8) string { - var str string - for _, a := range input { - if a == 0 { - break - } - str += string(a) - } - return str -} +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + cpuCollectorSubsystem = "cpu" +) + +var ( + nodeCPUSecondsDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "seconds_total"), + "Seconds the CPUs spent in each mode.", + []string{"cpu", "mode"}, nil, + ) +) diff --git a/collector/cpu_darwin.go b/collector/cpu_darwin.go index 37b014c2b7..6c461cc348 100644 --- a/collector/cpu_darwin.go +++ b/collector/cpu_darwin.go @@ -14,6 +14,7 @@ // Based on gopsutil/cpu/cpu_darwin_cgo.go @ ae251eb which is licensed under // BSD. See https://github.com/shirou/gopsutil/blob/master/LICENSE for details. +//go:build !nocpu // +build !nocpu package collector @@ -25,17 +26,20 @@ import ( "strconv" "unsafe" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) /* #cgo LDFLAGS: #include +#include #include #include #include #include #include +#include #if TARGET_OS_MAC #include #endif @@ -45,10 +49,11 @@ import ( import "C" // ClocksPerSec default value. from time.h -const ClocksPerSec = float64(128) +const ClocksPerSec = float64(C.CLK_TCK) type statCollector struct { - cpu *prometheus.Desc + cpu *prometheus.Desc + logger log.Logger } func init() { @@ -56,13 +61,10 @@ func init() { } // NewCPUCollector returns a new Collector exposing CPU stats. -func NewCPUCollector() (Collector, error) { +func NewCPUCollector(logger log.Logger) (Collector, error) { return &statCollector{ - cpu: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "cpu"), - "Seconds the cpus spent in each mode.", - []string{"cpu", "mode"}, nil, - ), + cpu: nodeCPUSecondsDesc, + logger: logger, }, nil } @@ -111,7 +113,7 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error { "nice": C.CPU_STATE_NICE, "idle": C.CPU_STATE_IDLE, } { - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(cpuTicks[v])/ClocksPerSec, "cpu"+strconv.Itoa(i), k) + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(cpuTicks[v])/ClocksPerSec, strconv.Itoa(i), k) } } return nil diff --git a/collector/cpu_dragonfly.go b/collector/cpu_dragonfly.go index 477a910c5f..61cba1eee4 100644 --- a/collector/cpu_dragonfly.go +++ b/collector/cpu_dragonfly.go @@ -11,15 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nocpu // +build !nocpu package collector import ( "errors" - "fmt" + "strconv" "unsafe" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -31,7 +33,7 @@ import ( #include int -getCPUTimes(uint64_t **cputime, size_t *cpu_times_len, long *freq) { +getCPUTimes(uint64_t **cputime, size_t *cpu_times_len) { size_t len; // Get number of cpu cores. @@ -44,15 +46,6 @@ getCPUTimes(uint64_t **cputime, size_t *cpu_times_len, long *freq) { return -1; } - // The bump on each statclock is - // ((cur_systimer - prev_systimer) * systimer_freq) >> 32 - // where - // systimer_freq = sysctl kern.cputimer.freq - len = sizeof(*freq); - if (sysctlbyname("kern.cputimer.freq", freq, &len, NULL, 0)) { - return -1; - } - // Get the cpu times. struct kinfo_cputime cp_t[ncpu]; bzero(cp_t, sizeof(struct kinfo_cputime)*ncpu); @@ -84,7 +77,8 @@ import "C" const maxCPUTimesLen = C.MAXCPU * C.CPUSTATES type statCollector struct { - cpu *prometheus.Desc + cpu *prometheus.Desc + logger log.Logger } func init() { @@ -92,33 +86,28 @@ func init() { } // NewStatCollector returns a new Collector exposing CPU stats. -func NewStatCollector() (Collector, error) { +func NewStatCollector(logger log.Logger) (Collector, error) { return &statCollector{ - cpu: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "cpu"), - "Seconds the cpus spent in each mode.", - []string{"cpu", "mode"}, nil, - ), + cpu: nodeCPUSecondsDesc, + logger: logger, }, nil } func getDragonFlyCPUTimes() ([]float64, error) { - // We want time spent per-cpu per CPUSTATE. + // We want time spent per-CPU per CPUSTATE. // CPUSTATES (number of CPUSTATES) is defined as 5U. // States: CP_USER | CP_NICE | CP_SYS | CP_IDLE | CP_INTR // - // Each value is a counter incremented at frequency - // kern.cputimer.freq + // Each value is in microseconds // // Look into sys/kern/kern_clock.c for details. var ( cpuTimesC *C.uint64_t - cpuTimerFreq C.long cpuTimesLength C.size_t ) - if C.getCPUTimes(&cpuTimesC, &cpuTimesLength, &cpuTimerFreq) == -1 { + if C.getCPUTimes(&cpuTimesC, &cpuTimesLength) == -1 { return nil, errors.New("could not retrieve CPU times") } defer C.free(unsafe.Pointer(cpuTimesC)) @@ -127,7 +116,7 @@ func getDragonFlyCPUTimes() ([]float64, error) { cpuTimes := make([]float64, cpuTimesLength) for i, value := range cput { - cpuTimes[i] = float64(value) / float64(cpuTimerFreq) + cpuTimes[i] = float64(value) / float64(1000000) } return cpuTimes, nil } @@ -143,7 +132,7 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error { // Export order: user nice sys intr idle cpuFields := []string{"user", "nice", "sys", "interrupt", "idle"} for i, value := range cpuTimes { - cpux := fmt.Sprintf("cpu%d", i/fieldsCount) + cpux := strconv.Itoa(i / fieldsCount) ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, value, cpux, cpuFields[i%fieldsCount]) } diff --git a/collector/cpu_dragonfly_test.go b/collector/cpu_dragonfly_test.go index 4be0d5bbbd..31f55c2ea0 100644 --- a/collector/cpu_dragonfly_test.go +++ b/collector/cpu_dragonfly_test.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nocpu // +build !nocpu package collector diff --git a/collector/cpu_freebsd.go b/collector/cpu_freebsd.go index ce5dd19292..96b0f033cb 100644 --- a/collector/cpu_freebsd.go +++ b/collector/cpu_freebsd.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nocpu // +build !nocpu package collector @@ -21,8 +22,9 @@ import ( "strconv" "unsafe" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "golang.org/x/sys/unix" ) @@ -81,8 +83,9 @@ func getCPUTimes() ([]cputime, error) { } type statCollector struct { - cpu typedDesc - temp typedDesc + cpu typedDesc + temp typedDesc + logger log.Logger } func init() { @@ -90,18 +93,15 @@ func init() { } // NewStatCollector returns a new Collector exposing CPU stats. -func NewStatCollector() (Collector, error) { +func NewStatCollector(logger log.Logger) (Collector, error) { return &statCollector{ - cpu: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, "cpu", "seconds_total"), - "Seconds the CPU spent in each mode.", - []string{"cpu", "mode"}, nil, - ), prometheus.CounterValue}, + cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, temp: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, "cpu", "temperature_celsius"), + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "temperature_celsius"), "CPU temperature", []string{"cpu"}, nil, ), prometheus.GaugeValue}, + logger: logger, }, nil } @@ -134,15 +134,31 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error { if err != nil { if err == unix.ENOENT { // No temperature information for this CPU - log.Debugf("no temperature information for CPU %d", cpu) + level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu) } else { // Unexpected error ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu) - log.Errorf("failed to query CPU temperature for CPU %d: %s", cpu, err) + level.Error(c.logger).Log("msg", "failed to query CPU temperature for CPU", "cpu", cpu, "err", err) } continue } - ch <- c.temp.mustNewConstMetric(float64(temp-2732)/10, lcpu) + + // Temp is a signed integer in deci-degrees Kelvin. + // Cast uint32 to int32 and convert to float64 degrees Celsius. + // + // 2732 is used as the conversion constant for deci-degrees + // Kelvin, in multiple places in the kernel that feed into this + // sysctl, so we want to maintain consistency: + // + // sys/dev/amdtemp/amdtemp.c + // #define AMDTEMP_ZERO_C_TO_K 2732 + // + // sys/dev/acpica/acpi_thermal.c + // #define TZ_ZEROC 2732 + // + // sys/dev/coretemp/coretemp.c + // #define TZ_ZEROC 2732 + ch <- c.temp.mustNewConstMetric(float64(int32(temp)-2732)/10, lcpu) } return err } diff --git a/collector/cpu_linux.go b/collector/cpu_linux.go index 9cbf2371d8..84ad373b00 100644 --- a/collector/cpu_linux.go +++ b/collector/cpu_linux.go @@ -11,195 +11,381 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nocpu // +build !nocpu package collector import ( "fmt" - "io/ioutil" - "os" "path/filepath" "regexp" - "strings" + "strconv" + "sync" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "github.com/prometheus/procfs" -) - -const ( - cpuCollectorSubsystem = "cpu" -) - -var ( - digitRegexp = regexp.MustCompile("[0-9]+") + "gopkg.in/alecthomas/kingpin.v2" ) type cpuCollector struct { + fs procfs.FS cpu *prometheus.Desc - cpuFreq *prometheus.Desc - cpuFreqMin *prometheus.Desc - cpuFreqMax *prometheus.Desc + cpuInfo *prometheus.Desc + cpuFlagsInfo *prometheus.Desc + cpuBugsInfo *prometheus.Desc + cpuGuest *prometheus.Desc cpuCoreThrottle *prometheus.Desc cpuPackageThrottle *prometheus.Desc + logger log.Logger + cpuStats []procfs.CPUStat + cpuStatsMutex sync.Mutex + + cpuFlagsIncludeRegexp *regexp.Regexp + cpuBugsIncludeRegexp *regexp.Regexp } +// Idle jump back limit in seconds. +const jumpBackSeconds = 3.0 + +var ( + enableCPUGuest = kingpin.Flag("collector.cpu.guest", "Enables metric node_cpu_guest_seconds_total").Default("true").Bool() + enableCPUInfo = kingpin.Flag("collector.cpu.info", "Enables metric cpu_info").Bool() + flagsInclude = kingpin.Flag("collector.cpu.info.flags-include", "Filter the `flags` field in cpuInfo with a value that must be a regular expression").String() + bugsInclude = kingpin.Flag("collector.cpu.info.bugs-include", "Filter the `bugs` field in cpuInfo with a value that must be a regular expression").String() + jumpBackDebugMessage = fmt.Sprintf("CPU Idle counter jumped backwards more than %f seconds, possible hotplug event, resetting CPU stats", jumpBackSeconds) +) + func init() { registerCollector("cpu", defaultEnabled, NewCPUCollector) } // NewCPUCollector returns a new Collector exposing kernel/system statistics. -func NewCPUCollector() (Collector, error) { - return &cpuCollector{ - cpu: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", cpuCollectorSubsystem), - "Seconds the cpus spent in each mode.", - []string{"cpu", "mode"}, nil, +func NewCPUCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + c := &cpuCollector{ + fs: fs, + cpu: nodeCPUSecondsDesc, + cpuInfo: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"), + "CPU information from /proc/cpuinfo.", + []string{"package", "core", "cpu", "vendor", "family", "model", "model_name", "microcode", "stepping", "cachesize"}, nil, ), - cpuFreq: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), - "Current cpu thread frequency in hertz.", - []string{"cpu"}, nil, + cpuFlagsInfo: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flag_info"), + "The `flags` field of CPU information from /proc/cpuinfo taken from the first core.", + []string{"flag"}, nil, ), - cpuFreqMin: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_min_hertz"), - "Minimum cpu thread frequency in hertz.", - []string{"cpu"}, nil, + cpuBugsInfo: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "bug_info"), + "The `bugs` field of CPU information from /proc/cpuinfo taken from the first core.", + []string{"bug"}, nil, ), - cpuFreqMax: prometheus.NewDesc( - prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"), - "Maximum cpu thread frequency in hertz.", - []string{"cpu"}, nil, + cpuGuest: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "guest_seconds_total"), + "Seconds the CPUs spent in guests (VMs) for each mode.", + []string{"cpu", "mode"}, nil, ), - // FIXME: This should be a per core metric, not per cpu! cpuCoreThrottle: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "core_throttles_total"), - "Number of times this cpu core has been throttled.", - []string{"cpu"}, nil, + "Number of times this CPU core has been throttled.", + []string{"package", "core"}, nil, ), cpuPackageThrottle: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "package_throttles_total"), - "Number of times this cpu package has been throttled.", - []string{"node"}, nil, + "Number of times this CPU package has been throttled.", + []string{"package"}, nil, ), - }, nil + logger: logger, + } + err = c.compileIncludeFlags(flagsInclude, bugsInclude) + if err != nil { + return nil, fmt.Errorf("fail to compile --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include, the values of them must be regular expressions: %w", err) + } + return c, nil +} + +func (c *cpuCollector) compileIncludeFlags(flagsIncludeFlag, bugsIncludeFlag *string) error { + if (*flagsIncludeFlag != "" || *bugsIncludeFlag != "") && !*enableCPUInfo { + *enableCPUInfo = true + level.Info(c.logger).Log("msg", "--collector.cpu.info has been set to `true` because you set the following flags, like --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include") + } + + var err error + if *flagsIncludeFlag != "" { + c.cpuFlagsIncludeRegexp, err = regexp.Compile(*flagsIncludeFlag) + if err != nil { + return err + } + } + if *bugsIncludeFlag != "" { + c.cpuBugsIncludeRegexp, err = regexp.Compile(*bugsIncludeFlag) + if err != nil { + return err + } + } + return nil } // Update implements Collector and exposes cpu related metrics from /proc/stat and /sys/.../cpu/. func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error { - if err := c.updateStat(ch); err != nil { - return err + if *enableCPUInfo { + if err := c.updateInfo(ch); err != nil { + return err + } } - if err := c.updateCPUfreq(ch); err != nil { + if err := c.updateStat(ch); err != nil { return err } - return nil + return c.updateThermalThrottle(ch) } -// updateCPUfreq reads /sys/bus/cpu/devices/cpu* and expose cpu frequency statistics. -func (c *cpuCollector) updateCPUfreq(ch chan<- prometheus.Metric) error { - cpus, err := filepath.Glob(sysFilePath("bus/cpu/devices/cpu[0-9]*")) +// updateInfo reads /proc/cpuinfo +func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error { + info, err := c.fs.CPUInfo() if err != nil { return err } + for _, cpu := range info { + ch <- prometheus.MustNewConstMetric(c.cpuInfo, + prometheus.GaugeValue, + 1, + cpu.PhysicalID, + cpu.CoreID, + strconv.Itoa(int(cpu.Processor)), + cpu.VendorID, + cpu.CPUFamily, + cpu.Model, + cpu.ModelName, + cpu.Microcode, + cpu.Stepping, + cpu.CacheSize) + } - var value uint64 - - // cpu loop - for _, cpu := range cpus { - _, cpuname := filepath.Split(cpu) - - if _, err := os.Stat(filepath.Join(cpu, "cpufreq")); os.IsNotExist(err) { - log.Debugf("CPU %q is missing cpufreq", cpu) - } else { - // sysfs cpufreq values are kHz, thus multiply by 1000 to export base units (hz). - // See https://www.kernel.org/doc/Documentation/cpu-freq/user-guide.txt - if value, err = readUintFromFile(filepath.Join(cpu, "cpufreq", "scaling_cur_freq")); err != nil { - return err - } - ch <- prometheus.MustNewConstMetric(c.cpuFreq, prometheus.GaugeValue, float64(value)*1000.0, cpuname) + if len(info) != 0 { + cpu := info[0] + if err := updateFieldInfo(cpu.Flags, c.cpuFlagsIncludeRegexp, c.cpuFlagsInfo, ch); err != nil { + return err + } + if err := updateFieldInfo(cpu.Bugs, c.cpuBugsIncludeRegexp, c.cpuBugsInfo, ch); err != nil { + return err + } + } - if value, err = readUintFromFile(filepath.Join(cpu, "cpufreq", "scaling_min_freq")); err != nil { - return err - } - ch <- prometheus.MustNewConstMetric(c.cpuFreqMin, prometheus.GaugeValue, float64(value)*1000.0, cpuname) + return nil +} - if value, err = readUintFromFile(filepath.Join(cpu, "cpufreq", "scaling_max_freq")); err != nil { - return err - } - ch <- prometheus.MustNewConstMetric(c.cpuFreqMax, prometheus.GaugeValue, float64(value)*1000.0, cpuname) - } +func updateFieldInfo(valueList []string, filter *regexp.Regexp, desc *prometheus.Desc, ch chan<- prometheus.Metric) error { + if filter == nil { + return nil + } - if _, err := os.Stat(filepath.Join(cpu, "thermal_throttle")); os.IsNotExist(err) { - log.Debugf("CPU %q is missing thermal_throttle", cpu) + for _, val := range valueList { + if !filter.MatchString(val) { continue } - if value, err = readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err != nil { - return err - } - ch <- prometheus.MustNewConstMetric(c.cpuCoreThrottle, prometheus.CounterValue, float64(value), cpuname) + ch <- prometheus.MustNewConstMetric(desc, + prometheus.GaugeValue, + 1, + val, + ) } + return nil +} - pkgs, err := filepath.Glob(sysFilePath("bus/node/devices/node[0-9]*")) +// updateThermalThrottle reads /sys/devices/system/cpu/cpu* and expose thermal throttle statistics. +func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error { + cpus, err := filepath.Glob(sysFilePath("devices/system/cpu/cpu[0-9]*")) if err != nil { return err } - // package/node loop - for _, pkg := range pkgs { - if _, err := os.Stat(filepath.Join(pkg, "cpulist")); os.IsNotExist(err) { - log.Debugf("package %q is missing cpulist", pkg) + packageThrottles := make(map[uint64]uint64) + packageCoreThrottles := make(map[uint64]map[uint64]uint64) + + // cpu loop + for _, cpu := range cpus { + // See + // https://www.kernel.org/doc/Documentation/x86/topology.txt + // https://www.kernel.org/doc/Documentation/cputopology.txt + // https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-devices-system-cpu + var err error + var physicalPackageID, coreID uint64 + + // topology/physical_package_id + if physicalPackageID, err = readUintFromFile(filepath.Join(cpu, "topology", "physical_package_id")); err != nil { + level.Debug(c.logger).Log("msg", "CPU is missing physical_package_id", "cpu", cpu) continue } - cpulist, err := ioutil.ReadFile(filepath.Join(pkg, "cpulist")) - if err != nil { - log.Debugf("could not read cpulist of package %q", pkg) - return err - } - // cpulist example of one package/node with HT: "0-11,24-35" - line := strings.Split(string(cpulist), "\n")[0] - firstCPU := strings.FieldsFunc(line, func(r rune) bool { - return r == '-' || r == ',' - })[0] - if _, err := os.Stat(filepath.Join(pkg, "cpu"+firstCPU, "thermal_throttle", "package_throttle_count")); os.IsNotExist(err) { - log.Debugf("Package %q CPU %q is missing package_throttle", pkg, firstCPU) + // topology/core_id + if coreID, err = readUintFromFile(filepath.Join(cpu, "topology", "core_id")); err != nil { + level.Debug(c.logger).Log("msg", "CPU is missing core_id", "cpu", cpu) continue } - if value, err = readUintFromFile(filepath.Join(pkg, "cpu"+firstCPU, "thermal_throttle", "package_throttle_count")); err != nil { - return err + + // metric node_cpu_core_throttles_total + // + // We process this metric before the package throttles as there + // are CPU+kernel combinations that only present core throttles + // but no package throttles. + // Seen e.g. on an Intel Xeon E5472 system with RHEL 6.9 kernel. + if _, present := packageCoreThrottles[physicalPackageID]; !present { + packageCoreThrottles[physicalPackageID] = make(map[uint64]uint64) + } + if _, present := packageCoreThrottles[physicalPackageID][coreID]; !present { + // Read thermal_throttle/core_throttle_count only once + if coreThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err == nil { + packageCoreThrottles[physicalPackageID][coreID] = coreThrottleCount + } else { + level.Debug(c.logger).Log("msg", "CPU is missing core_throttle_count", "cpu", cpu) + } + } + + // metric node_cpu_package_throttles_total + if _, present := packageThrottles[physicalPackageID]; !present { + // Read thermal_throttle/package_throttle_count only once + if packageThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "package_throttle_count")); err == nil { + packageThrottles[physicalPackageID] = packageThrottleCount + } else { + level.Debug(c.logger).Log("msg", "CPU is missing package_throttle_count", "cpu", cpu) + } } - pkgno := digitRegexp.FindAllString(pkg, 1)[0] - ch <- prometheus.MustNewConstMetric(c.cpuPackageThrottle, prometheus.CounterValue, float64(value), pkgno) } + for physicalPackageID, packageThrottleCount := range packageThrottles { + ch <- prometheus.MustNewConstMetric(c.cpuPackageThrottle, + prometheus.CounterValue, + float64(packageThrottleCount), + strconv.FormatUint(physicalPackageID, 10)) + } + + for physicalPackageID, coreMap := range packageCoreThrottles { + for coreID, coreThrottleCount := range coreMap { + ch <- prometheus.MustNewConstMetric(c.cpuCoreThrottle, + prometheus.CounterValue, + float64(coreThrottleCount), + strconv.FormatUint(physicalPackageID, 10), + strconv.FormatUint(coreID, 10)) + } + } return nil } -// updateStat reads /proc/stat through procfs and exports cpu related metrics. +// updateStat reads /proc/stat through procfs and exports CPU-related metrics. func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error { - fs, err := procfs.NewFS(*procPath) - if err != nil { - return fmt.Errorf("failed to open procfs: %v", err) - } - stats, err := fs.NewStat() + stats, err := c.fs.Stat() if err != nil { return err } - for cpuID, cpuStat := range stats.CPU { - cpuName := fmt.Sprintf("cpu%d", cpuID) - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.User, cpuName, "user") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Nice, cpuName, "nice") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.System, cpuName, "system") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Idle, cpuName, "idle") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Iowait, cpuName, "iowait") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.IRQ, cpuName, "irq") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.SoftIRQ, cpuName, "softirq") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Steal, cpuName, "steal") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Guest, cpuName, "guest") - ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.GuestNice, cpuName, "guest_nice") + c.updateCPUStats(stats.CPU) + + // Acquire a lock to read the stats. + c.cpuStatsMutex.Lock() + defer c.cpuStatsMutex.Unlock() + for cpuID, cpuStat := range c.cpuStats { + cpuNum := strconv.Itoa(cpuID) + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.User, cpuNum, "user") + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Nice, cpuNum, "nice") + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.System, cpuNum, "system") + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Idle, cpuNum, "idle") + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Iowait, cpuNum, "iowait") + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.IRQ, cpuNum, "irq") + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.SoftIRQ, cpuNum, "softirq") + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Steal, cpuNum, "steal") + + if *enableCPUGuest { + // Guest CPU is also accounted for in cpuStat.User and cpuStat.Nice, expose these as separate metrics. + ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.Guest, cpuNum, "user") + ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.GuestNice, cpuNum, "nice") + } } return nil } + +// updateCPUStats updates the internal cache of CPU stats. +func (c *cpuCollector) updateCPUStats(newStats []procfs.CPUStat) { + + // Acquire a lock to update the stats. + c.cpuStatsMutex.Lock() + defer c.cpuStatsMutex.Unlock() + + // Reset the cache if the list of CPUs has changed. + if len(c.cpuStats) != len(newStats) { + c.cpuStats = make([]procfs.CPUStat, len(newStats)) + } + + for i, n := range newStats { + // If idle jumps backwards by more than X seconds, assume we had a hotplug event and reset the stats for this CPU. + if (c.cpuStats[i].Idle - n.Idle) >= jumpBackSeconds { + level.Debug(c.logger).Log("msg", jumpBackDebugMessage, "cpu", i, "old_value", c.cpuStats[i].Idle, "new_value", n.Idle) + c.cpuStats[i] = procfs.CPUStat{} + } + + if n.Idle >= c.cpuStats[i].Idle { + c.cpuStats[i].Idle = n.Idle + } else { + level.Debug(c.logger).Log("msg", "CPU Idle counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Idle, "new_value", n.Idle) + } + + if n.User >= c.cpuStats[i].User { + c.cpuStats[i].User = n.User + } else { + level.Debug(c.logger).Log("msg", "CPU User counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].User, "new_value", n.User) + } + + if n.Nice >= c.cpuStats[i].Nice { + c.cpuStats[i].Nice = n.Nice + } else { + level.Debug(c.logger).Log("msg", "CPU Nice counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Nice, "new_value", n.Nice) + } + + if n.System >= c.cpuStats[i].System { + c.cpuStats[i].System = n.System + } else { + level.Debug(c.logger).Log("msg", "CPU System counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].System, "new_value", n.System) + } + + if n.Iowait >= c.cpuStats[i].Iowait { + c.cpuStats[i].Iowait = n.Iowait + } else { + level.Debug(c.logger).Log("msg", "CPU Iowait counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Iowait, "new_value", n.Iowait) + } + + if n.IRQ >= c.cpuStats[i].IRQ { + c.cpuStats[i].IRQ = n.IRQ + } else { + level.Debug(c.logger).Log("msg", "CPU IRQ counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].IRQ, "new_value", n.IRQ) + } + + if n.SoftIRQ >= c.cpuStats[i].SoftIRQ { + c.cpuStats[i].SoftIRQ = n.SoftIRQ + } else { + level.Debug(c.logger).Log("msg", "CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].SoftIRQ, "new_value", n.SoftIRQ) + } + + if n.Steal >= c.cpuStats[i].Steal { + c.cpuStats[i].Steal = n.Steal + } else { + level.Debug(c.logger).Log("msg", "CPU Steal counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Steal, "new_value", n.Steal) + } + + if n.Guest >= c.cpuStats[i].Guest { + c.cpuStats[i].Guest = n.Guest + } else { + level.Debug(c.logger).Log("msg", "CPU Guest counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Guest, "new_value", n.Guest) + } + + if n.GuestNice >= c.cpuStats[i].GuestNice { + c.cpuStats[i].GuestNice = n.GuestNice + } else { + level.Debug(c.logger).Log("msg", "CPU GuestNice counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].GuestNice, "new_value", n.GuestNice) + } + } +} diff --git a/collector/cpu_linux_test.go b/collector/cpu_linux_test.go new file mode 100644 index 0000000000..93b493b2e9 --- /dev/null +++ b/collector/cpu_linux_test.go @@ -0,0 +1,106 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nocpu +// +build !nocpu + +package collector + +import ( + "reflect" + "testing" + + "github.com/go-kit/log" + "github.com/prometheus/procfs" +) + +func makeTestCPUCollector(s []procfs.CPUStat) *cpuCollector { + dup := make([]procfs.CPUStat, len(s)) + copy(dup, s) + return &cpuCollector{ + logger: log.NewNopLogger(), + cpuStats: dup, + } +} + +func TestCPU(t *testing.T) { + firstCPUStat := []procfs.CPUStat{{ + User: 100.0, + Nice: 100.0, + System: 100.0, + Idle: 100.0, + Iowait: 100.0, + IRQ: 100.0, + SoftIRQ: 100.0, + Steal: 100.0, + Guest: 100.0, + GuestNice: 100.0, + }} + + c := makeTestCPUCollector(firstCPUStat) + want := []procfs.CPUStat{{ + User: 101.0, + Nice: 101.0, + System: 101.0, + Idle: 101.0, + Iowait: 101.0, + IRQ: 101.0, + SoftIRQ: 101.0, + Steal: 101.0, + Guest: 101.0, + GuestNice: 101.0, + }} + c.updateCPUStats(want) + got := c.cpuStats + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %v CPU Stat: got %v", want, got) + } + + c = makeTestCPUCollector(firstCPUStat) + jumpBack := []procfs.CPUStat{{ + User: 99.9, + Nice: 99.9, + System: 99.9, + Idle: 99.9, + Iowait: 99.9, + IRQ: 99.9, + SoftIRQ: 99.9, + Steal: 99.9, + Guest: 99.9, + GuestNice: 99.9, + }} + c.updateCPUStats(jumpBack) + got = c.cpuStats + if reflect.DeepEqual(jumpBack, got) { + t.Fatalf("should have %v CPU Stat: got %v", firstCPUStat, got) + } + + c = makeTestCPUCollector(firstCPUStat) + resetIdle := []procfs.CPUStat{{ + User: 102.0, + Nice: 102.0, + System: 102.0, + Idle: 1.0, + Iowait: 102.0, + IRQ: 102.0, + SoftIRQ: 102.0, + Steal: 102.0, + Guest: 102.0, + GuestNice: 102.0, + }} + c.updateCPUStats(resetIdle) + got = c.cpuStats + if !reflect.DeepEqual(resetIdle, got) { + t.Fatalf("should have %v CPU Stat: got %v", resetIdle, got) + } +} diff --git a/collector/cpu_openbsd.go b/collector/cpu_openbsd.go new file mode 100644 index 0000000000..f025c08f8f --- /dev/null +++ b/collector/cpu_openbsd.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build openbsd && !amd64 && !nocpu +// +build openbsd,!amd64,!nocpu + +package collector + +import ( + "strconv" + "unsafe" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +/* +#include +#include +*/ +import "C" + +type cpuCollector struct { + cpu typedDesc + logger log.Logger +} + +func init() { + registerCollector("cpu", defaultEnabled, NewCPUCollector) +} + +func NewCPUCollector(logger log.Logger) (Collector, error) { + return &cpuCollector{ + cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, + logger: logger, + }, nil +} + +func (c *cpuCollector) Update(ch chan<- prometheus.Metric) (err error) { + clockb, err := unix.SysctlRaw("kern.clockrate") + if err != nil { + return err + } + clock := *(*C.struct_clockinfo)(unsafe.Pointer(&clockb[0])) + hz := float64(clock.stathz) + + ncpus, err := unix.SysctlUint32("hw.ncpu") + if err != nil { + return err + } + + var cpTime [][C.CPUSTATES]C.int64_t + for i := 0; i < int(ncpus); i++ { + cpb, err := unix.SysctlRaw("kern.cp_time2", i) + if err != nil && err != unix.ENODEV { + return err + } + if err != unix.ENODEV { + cpTime = append(cpTime, *(*[C.CPUSTATES]C.int64_t)(unsafe.Pointer(&cpb[0]))) + } + } + + for cpu, time := range cpTime { + lcpu := strconv.Itoa(cpu) + ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_USER])/hz, lcpu, "user") + ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_NICE])/hz, lcpu, "nice") + ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_SYS])/hz, lcpu, "system") + ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_INTR])/hz, lcpu, "interrupt") + ch <- c.cpu.mustNewConstMetric(float64(time[C.CP_IDLE])/hz, lcpu, "idle") + } + return err +} diff --git a/collector/cpu_openbsd_amd64.go b/collector/cpu_openbsd_amd64.go new file mode 100644 index 0000000000..b07d2ca702 --- /dev/null +++ b/collector/cpu_openbsd_amd64.go @@ -0,0 +1,96 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nocpu +// +build !nocpu + +package collector + +import ( + "strconv" + "unsafe" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +type clockinfo struct { + hz int32 + tick int32 + tickadj int32 + stathz int32 + profhz int32 +} + +const ( + CP_USER = iota + CP_NICE + CP_SYS + CP_SPIN + CP_INTR + CP_IDLE + CPUSTATES +) + +type cpuCollector struct { + cpu typedDesc + logger log.Logger +} + +func init() { + registerCollector("cpu", defaultEnabled, NewCPUCollector) +} + +func NewCPUCollector(logger log.Logger) (Collector, error) { + return &cpuCollector{ + cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, + logger: logger, + }, nil +} + +func (c *cpuCollector) Update(ch chan<- prometheus.Metric) (err error) { + clockb, err := unix.SysctlRaw("kern.clockrate") + if err != nil { + return err + } + clock := *(*clockinfo)(unsafe.Pointer(&clockb[0])) + hz := float64(clock.stathz) + + ncpus, err := unix.SysctlUint32("hw.ncpu") + if err != nil { + return err + } + + var cpTime [][CPUSTATES]int64 + for i := 0; i < int(ncpus); i++ { + cpb, err := unix.SysctlRaw("kern.cp_time2", i) + if err != nil && err != unix.ENODEV { + return err + } + if err != unix.ENODEV { + cpTime = append(cpTime, *(*[CPUSTATES]int64)(unsafe.Pointer(&cpb[0]))) + } + } + + for cpu, time := range cpTime { + lcpu := strconv.Itoa(cpu) + ch <- c.cpu.mustNewConstMetric(float64(time[CP_USER])/hz, lcpu, "user") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_NICE])/hz, lcpu, "nice") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_SYS])/hz, lcpu, "system") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_SPIN])/hz, lcpu, "spin") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_INTR])/hz, lcpu, "interrupt") + ch <- c.cpu.mustNewConstMetric(float64(time[CP_IDLE])/hz, lcpu, "idle") + } + return err +} diff --git a/collector/cpu_solaris.go b/collector/cpu_solaris.go new file mode 100644 index 0000000000..c28b4d6dfc --- /dev/null +++ b/collector/cpu_solaris.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build solaris && !nocpu +// +build solaris,!nocpu + +package collector + +import ( + "strconv" + + "github.com/go-kit/log" + "github.com/illumos/go-kstat" + "github.com/prometheus/client_golang/prometheus" +) + +// #include +import "C" + +type cpuCollector struct { + cpu typedDesc + logger log.Logger +} + +func init() { + registerCollector("cpu", defaultEnabled, NewCpuCollector) +} + +func NewCpuCollector(logger log.Logger) (Collector, error) { + return &cpuCollector{ + cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, + logger: logger, + }, nil +} + +func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + + tok, err := kstat.Open() + if err != nil { + return err + } + + defer tok.Close() + + for cpu := 0; cpu < int(ncpus); cpu++ { + ksCPU, err := tok.Lookup("cpu", cpu, "sys") + if err != nil { + return err + } + + for k, v := range map[string]string{ + "idle": "cpu_ticks_idle", + "kernel": "cpu_ticks_kernel", + "user": "cpu_ticks_user", + "wait": "cpu_ticks_wait", + } { + kstatValue, err := ksCPU.GetNamed(v) + if err != nil { + return err + } + + ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal), strconv.Itoa(cpu), k) + } + } + return nil +} diff --git a/collector/cpufreq_linux.go b/collector/cpufreq_linux.go new file mode 100644 index 0000000000..ce431cc06e --- /dev/null +++ b/collector/cpufreq_linux.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nocpu +// +build !nocpu + +package collector + +import ( + "fmt" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +type cpuFreqCollector struct { + fs sysfs.FS + cpuFreq *prometheus.Desc + cpuFreqMin *prometheus.Desc + cpuFreqMax *prometheus.Desc + scalingFreq *prometheus.Desc + scalingFreqMin *prometheus.Desc + scalingFreqMax *prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("cpufreq", defaultEnabled, NewCPUFreqCollector) +} + +// NewCPUFreqCollector returns a new Collector exposing kernel/system statistics. +func NewCPUFreqCollector(logger log.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + return &cpuFreqCollector{ + fs: fs, + cpuFreq: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), + "Current cpu thread frequency in hertz.", + []string{"cpu"}, nil, + ), + cpuFreqMin: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_min_hertz"), + "Minimum cpu thread frequency in hertz.", + []string{"cpu"}, nil, + ), + cpuFreqMax: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"), + "Maximum cpu thread frequency in hertz.", + []string{"cpu"}, nil, + ), + scalingFreq: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_hertz"), + "Current scaled CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ), + scalingFreqMin: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_min_hertz"), + "Minimum scaled CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ), + scalingFreqMax: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_max_hertz"), + "Maximum scaled CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ), + logger: logger, + }, nil +} + +// Update implements Collector and exposes cpu related metrics from /proc/stat and /sys/.../cpu/. +func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { + cpuFreqs, err := c.fs.SystemCpufreq() + if err != nil { + return err + } + + // sysfs cpufreq values are kHz, thus multiply by 1000 to export base units (hz). + // See https://www.kernel.org/doc/Documentation/cpu-freq/user-guide.txt + for _, stats := range cpuFreqs { + if stats.CpuinfoCurrentFrequency != nil { + ch <- prometheus.MustNewConstMetric( + c.cpuFreq, + prometheus.GaugeValue, + float64(*stats.CpuinfoCurrentFrequency)*1000.0, + stats.Name, + ) + } + if stats.CpuinfoMinimumFrequency != nil { + ch <- prometheus.MustNewConstMetric( + c.cpuFreqMin, + prometheus.GaugeValue, + float64(*stats.CpuinfoMinimumFrequency)*1000.0, + stats.Name, + ) + } + if stats.CpuinfoMaximumFrequency != nil { + ch <- prometheus.MustNewConstMetric( + c.cpuFreqMax, + prometheus.GaugeValue, + float64(*stats.CpuinfoMaximumFrequency)*1000.0, + stats.Name, + ) + } + if stats.ScalingCurrentFrequency != nil { + ch <- prometheus.MustNewConstMetric( + c.scalingFreq, + prometheus.GaugeValue, + float64(*stats.ScalingCurrentFrequency)*1000.0, + stats.Name, + ) + } + if stats.ScalingMinimumFrequency != nil { + ch <- prometheus.MustNewConstMetric( + c.scalingFreqMin, + prometheus.GaugeValue, + float64(*stats.ScalingMinimumFrequency)*1000.0, + stats.Name, + ) + } + if stats.ScalingMaximumFrequency != nil { + ch <- prometheus.MustNewConstMetric( + c.scalingFreqMax, + prometheus.GaugeValue, + float64(*stats.ScalingMaximumFrequency)*1000.0, + stats.Name, + ) + } + } + return nil +} diff --git a/collector/cpufreq_solaris.go b/collector/cpufreq_solaris.go new file mode 100644 index 0000000000..10883ce404 --- /dev/null +++ b/collector/cpufreq_solaris.go @@ -0,0 +1,98 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build solaris && !nocpu +// +build solaris,!nocpu + +package collector + +import ( + "fmt" + "strconv" + + "github.com/go-kit/log" + "github.com/illumos/go-kstat" + "github.com/prometheus/client_golang/prometheus" +) + +// #include +import "C" + +type cpuFreqCollector struct { + cpuFreq *prometheus.Desc + cpuFreqMax *prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("cpufreq", defaultEnabled, NewCpuFreqCollector) +} + +func NewCpuFreqCollector(logger log.Logger) (Collector, error) { + return &cpuFreqCollector{ + cpuFreq: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), + "Current CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ), + cpuFreqMax: prometheus.NewDesc( + prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"), + "Maximum CPU thread frequency in hertz.", + []string{"cpu"}, nil, + ), + logger: logger, + }, nil +} + +func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + + tok, err := kstat.Open() + if err != nil { + return err + } + + defer tok.Close() + + for cpu := 0; cpu < int(ncpus); cpu++ { + ksCPUInfo, err := tok.Lookup("cpu_info", cpu, fmt.Sprintf("cpu_info%d", cpu)) + if err != nil { + return err + } + cpuFreqV, err := ksCPUInfo.GetNamed("current_clock_Hz") + if err != nil { + return err + } + + cpuFreqMaxV, err := ksCPUInfo.GetNamed("clock_MHz") + if err != nil { + return err + } + + lcpu := strconv.Itoa(cpu) + ch <- prometheus.MustNewConstMetric( + c.cpuFreq, + prometheus.GaugeValue, + float64(cpuFreqV.UintVal), + lcpu, + ) + // Multiply by 1e+6 to convert MHz to Hz. + ch <- prometheus.MustNewConstMetric( + c.cpuFreqMax, + prometheus.GaugeValue, + float64(cpuFreqMaxV.IntVal)*1e+6, + lcpu, + ) + } + return nil +} diff --git a/collector/devstat_dragonfly.go b/collector/devstat_dragonfly.go index d2cc6cd222..11678054f7 100644 --- a/collector/devstat_dragonfly.go +++ b/collector/devstat_dragonfly.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nodevstat // +build !nodevstat package collector @@ -19,6 +20,7 @@ import ( "errors" "fmt" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -96,6 +98,7 @@ type devstatCollector struct { bytesDesc *prometheus.Desc transfersDesc *prometheus.Desc blocksDesc *prometheus.Desc + logger log.Logger } func init() { @@ -103,7 +106,7 @@ func init() { } // NewDevstatCollector returns a new Collector exposing Device stats. -func NewDevstatCollector() (Collector, error) { +func NewDevstatCollector(logger log.Logger) (Collector, error) { return &devstatCollector{ bytesDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"), @@ -120,6 +123,7 @@ func NewDevstatCollector() (Collector, error) { "The total number of bytes given in terms of the devices blocksize.", []string{"device"}, nil, ), + logger: logger, }, nil } diff --git a/collector/devstat_freebsd.go b/collector/devstat_freebsd.go index d394f341e4..20cdc27674 100644 --- a/collector/devstat_freebsd.go +++ b/collector/devstat_freebsd.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nodevstat // +build !nodevstat package collector @@ -21,6 +22,7 @@ import ( "sync" "unsafe" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -41,6 +43,7 @@ type devstatCollector struct { duration typedDesc busyTime typedDesc blocks typedDesc + logger log.Logger } func init() { @@ -48,7 +51,7 @@ func init() { } // NewDevstatCollector returns a new Collector exposing Device stats. -func NewDevstatCollector() (Collector, error) { +func NewDevstatCollector(logger log.Logger) (Collector, error) { return &devstatCollector{ devinfo: &C.struct_devinfo{}, bytes: typedDesc{prometheus.NewDesc( @@ -76,6 +79,7 @@ func NewDevstatCollector() (Collector, error) { "The total number of blocks transferred.", []string{"device"}, nil, ), prometheus.CounterValue}, + logger: logger, }, nil } diff --git a/collector/diskstats_common.go b/collector/diskstats_common.go new file mode 100644 index 0000000000..0f6bbe4694 --- /dev/null +++ b/collector/diskstats_common.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nodiskstats && (openbsd || linux || darwin) +// +build !nodiskstats +// +build openbsd linux darwin + +package collector + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + diskSubsystem = "disk" +) + +var ( + diskLabelNames = []string{"device"} + + readsCompletedDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "reads_completed_total"), + "The total number of reads completed successfully.", + diskLabelNames, nil, + ) + + readBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "read_bytes_total"), + "The total number of bytes read successfully.", + diskLabelNames, nil, + ) + + writesCompletedDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "writes_completed_total"), + "The total number of writes completed successfully.", + diskLabelNames, nil, + ) + + writtenBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "written_bytes_total"), + "The total number of bytes written successfully.", + diskLabelNames, nil, + ) + + ioTimeSecondsDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "io_time_seconds_total"), + "Total seconds spent doing I/Os.", + diskLabelNames, nil, + ) + + readTimeSecondsDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "read_time_seconds_total"), + "The total number of seconds spent by all reads.", + diskLabelNames, + nil, + ) + + writeTimeSecondsDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "write_time_seconds_total"), + "This is the total number of seconds spent by all writes.", + diskLabelNames, + nil, + ) +) diff --git a/collector/diskstats_darwin.go b/collector/diskstats_darwin.go index f94d98423b..350c44f1cc 100644 --- a/collector/diskstats_darwin.go +++ b/collector/diskstats_darwin.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nodiskstats // +build !nodiskstats package collector @@ -18,21 +19,19 @@ package collector import ( "fmt" + "github.com/go-kit/log" "github.com/lufia/iostat" "github.com/prometheus/client_golang/prometheus" ) -const ( - diskSubsystem = "disk" -) - type typedDescFunc struct { typedDesc value func(stat *iostat.DriveStats) float64 } type diskstatsCollector struct { - descs []typedDescFunc + descs []typedDescFunc + logger log.Logger } func init() { @@ -40,19 +39,14 @@ func init() { } // NewDiskstatsCollector returns a new Collector exposing disk device stats. -func NewDiskstatsCollector() (Collector, error) { +func NewDiskstatsCollector(logger log.Logger) (Collector, error) { var diskLabelNames = []string{"device"} return &diskstatsCollector{ descs: []typedDescFunc{ { typedDesc: typedDesc{ - desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "reads_completed_total"), - "The total number of reads completed successfully.", - diskLabelNames, - nil, - ), + desc: readsCompletedDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { @@ -75,12 +69,7 @@ func NewDiskstatsCollector() (Collector, error) { }, { typedDesc: typedDesc{ - desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "read_seconds_total"), - "The total number of seconds spent by all reads.", - diskLabelNames, - nil, - ), + desc: readTimeSecondsDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { @@ -89,12 +78,7 @@ func NewDiskstatsCollector() (Collector, error) { }, { typedDesc: typedDesc{ - desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "writes_completed_total"), - "The total number of writes completed successfully.", - diskLabelNames, - nil, - ), + desc: writesCompletedDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { @@ -115,56 +99,98 @@ func NewDiskstatsCollector() (Collector, error) { return float64(stat.NumWrite) / float64(stat.BlockSize) }, }, + { + typedDesc: typedDesc{ + desc: writeTimeSecondsDesc, + valueType: prometheus.CounterValue, + }, + value: func(stat *iostat.DriveStats) float64 { + return stat.TotalWriteTime.Seconds() + }, + }, + { + typedDesc: typedDesc{ + desc: readBytesDesc, + valueType: prometheus.CounterValue, + }, + value: func(stat *iostat.DriveStats) float64 { + return float64(stat.BytesRead) + }, + }, + { + typedDesc: typedDesc{ + desc: writtenBytesDesc, + valueType: prometheus.CounterValue, + }, + value: func(stat *iostat.DriveStats) float64 { + return float64(stat.BytesWritten) + }, + }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "write_seconds_total"), - "This is the total number of seconds spent by all writes.", + prometheus.BuildFQName(namespace, diskSubsystem, "read_errors_total"), + "The total number of read errors.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { - return stat.TotalWriteTime.Seconds() + return float64(stat.ReadErrors) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "read_bytes_total"), - "The total number of bytes read successfully.", + prometheus.BuildFQName(namespace, diskSubsystem, "write_errors_total"), + "The total number of write errors.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { - return float64(stat.BytesRead) + return float64(stat.WriteErrors) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "written_bytes_total"), - "The total number of bytes written successfully.", + prometheus.BuildFQName(namespace, diskSubsystem, "read_retries_total"), + "The total number of read retries.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { - return float64(stat.BytesWritten) + return float64(stat.ReadRetries) + }, + }, + { + typedDesc: typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, diskSubsystem, "write_retries_total"), + "The total number of write retries.", + diskLabelNames, + nil, + ), + valueType: prometheus.CounterValue, + }, + value: func(stat *iostat.DriveStats) float64 { + return float64(stat.WriteRetries) }, }, }, + logger: logger, }, nil } func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { diskStats, err := iostat.ReadDriveStats() if err != nil { - return fmt.Errorf("couldn't get diskstats: %s", err) + return fmt.Errorf("couldn't get diskstats: %w", err) } for _, stats := range diskStats { diff --git a/collector/diskstats_linux.go b/collector/diskstats_linux.go index 8c1e5ccc9b..b79d6bd398 100644 --- a/collector/diskstats_linux.go +++ b/collector/diskstats_linux.go @@ -11,36 +11,45 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nodiskstats // +build !nodiskstats package collector import ( - "bufio" "fmt" - "io" - "os" "regexp" - "strconv" - "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" + "github.com/prometheus/procfs/blockdevice" "gopkg.in/alecthomas/kingpin.v2" ) const ( - diskSubsystem = "disk" - diskSectorSize uint64 = 512 + secondsPerTick = 1.0 / 1000.0 ) var ( ignoredDevices = kingpin.Flag("collector.diskstats.ignored-devices", "Regexp of devices to ignore for diskstats.").Default("^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$").String() ) +type typedFactorDesc struct { + desc *prometheus.Desc + valueType prometheus.ValueType +} + +func (d *typedFactorDesc) mustNewConstMetric(value float64, labels ...string) prometheus.Metric { + return prometheus.MustNewConstMetric(d.desc, d.valueType, value, labels...) +} + type diskstatsCollector struct { ignoredDevicesPattern *regexp.Regexp - descs []typedDesc + fs blockdevice.FS + infoDesc typedFactorDesc + descs []typedFactorDesc + logger log.Logger } func init() { @@ -48,196 +57,180 @@ func init() { } // NewDiskstatsCollector returns a new Collector exposing disk device stats. -func NewDiskstatsCollector() (Collector, error) { +// Docs from https://www.kernel.org/doc/Documentation/iostats.txt +func NewDiskstatsCollector(logger log.Logger) (Collector, error) { var diskLabelNames = []string{"device"} + fs, err := blockdevice.NewFS(*procPath, *sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } return &diskstatsCollector{ ignoredDevicesPattern: regexp.MustCompile(*ignoredDevices), - // Docs from https://www.kernel.org/doc/Documentation/iostats.txt - descs: []typedDesc{ + fs: fs, + infoDesc: typedFactorDesc{ + desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "info"), + "Info of /sys/block/.", + []string{"device", "major", "minor"}, + nil, + ), valueType: prometheus.GaugeValue, + }, + descs: []typedFactorDesc{ { - desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "reads_completed"), - "The total number of reads completed successfully.", - diskLabelNames, - nil, - ), valueType: prometheus.CounterValue, + desc: readsCompletedDesc, valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "reads_merged"), - "The total number of reads merged. See https://www.kernel.org/doc/Documentation/iostats.txt.", + prometheus.BuildFQName(namespace, diskSubsystem, "reads_merged_total"), + "The total number of reads merged.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { - desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "sectors_read"), - "The total number of sectors read successfully.", - diskLabelNames, - nil, - ), valueType: prometheus.CounterValue, + desc: readBytesDesc, valueType: prometheus.CounterValue, }, { - desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "read_time_ms"), - "The total number of milliseconds spent by all reads.", - diskLabelNames, - nil, - ), valueType: prometheus.CounterValue, + desc: readTimeSecondsDesc, valueType: prometheus.CounterValue, + }, + { + desc: writesCompletedDesc, valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "writes_completed"), - "The total number of writes completed successfully.", + prometheus.BuildFQName(namespace, diskSubsystem, "writes_merged_total"), + "The number of writes merged.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, + { + desc: writtenBytesDesc, valueType: prometheus.CounterValue, + }, + { + desc: writeTimeSecondsDesc, valueType: prometheus.CounterValue, + }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "writes_merged"), - "The number of writes merged. See https://www.kernel.org/doc/Documentation/iostats.txt.", + prometheus.BuildFQName(namespace, diskSubsystem, "io_now"), + "The number of I/Os currently in progress.", diskLabelNames, nil, - ), valueType: prometheus.CounterValue, + ), valueType: prometheus.GaugeValue, + }, + { + desc: ioTimeSecondsDesc, valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "sectors_written"), - "The total number of sectors written successfully.", + prometheus.BuildFQName(namespace, diskSubsystem, "io_time_weighted_seconds_total"), + "The weighted # of seconds spent doing I/Os.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "write_time_ms"), - "This is the total number of milliseconds spent by all writes.", + prometheus.BuildFQName(namespace, diskSubsystem, "discards_completed_total"), + "The total number of discards completed successfully.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "io_now"), - "The number of I/Os currently in progress.", + prometheus.BuildFQName(namespace, diskSubsystem, "discards_merged_total"), + "The total number of discards merged.", diskLabelNames, nil, - ), valueType: prometheus.GaugeValue, + ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "io_time_ms"), - "Total Milliseconds spent doing I/Os.", + prometheus.BuildFQName(namespace, diskSubsystem, "discarded_sectors_total"), + "The total number of sectors discarded successfully.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "io_time_weighted"), - "The weighted # of milliseconds spent doing I/Os. See https://www.kernel.org/doc/Documentation/iostats.txt.", + prometheus.BuildFQName(namespace, diskSubsystem, "discard_time_seconds_total"), + "This is the total number of seconds spent by all discards.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "bytes_read"), - "The total number of bytes read successfully.", + prometheus.BuildFQName(namespace, diskSubsystem, "flush_requests_total"), + "The total number of flush requests completed successfully", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, diskSubsystem, "bytes_written"), - "The total number of bytes written successfully.", + prometheus.BuildFQName(namespace, diskSubsystem, "flush_requests_time_seconds_total"), + "This is the total number of seconds spent by all flush requests.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, }, + logger: logger, }, nil } func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { - procDiskStats := procFilePath("diskstats") - diskStats, err := getDiskStats() + diskStats, err := c.fs.ProcDiskstats() if err != nil { - return fmt.Errorf("couldn't get diskstats: %s", err) + return fmt.Errorf("couldn't get diskstats: %w", err) } - for dev, stats := range diskStats { + for _, stats := range diskStats { + dev := stats.DeviceName if c.ignoredDevicesPattern.MatchString(dev) { - log.Debugf("Ignoring device: %s", dev) + level.Debug(c.logger).Log("msg", "Ignoring device", "device", dev, "pattern", c.ignoredDevicesPattern) continue } - if len(stats) != len(c.descs) { - return fmt.Errorf("invalid line for %s for %s", procDiskStats, dev) + diskSectorSize := 512.0 + blockQueue, err := c.fs.SysBlockDeviceQueueStats(dev) + if err != nil { + level.Debug(c.logger).Log("msg", "Error getting queue stats", "device", dev, "err", err) + } else { + diskSectorSize = float64(blockQueue.LogicalBlockSize) } - for i, value := range stats { - v, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("invalid value %s in diskstats: %s", value, err) + ch <- c.infoDesc.mustNewConstMetric(1.0, dev, fmt.Sprint(stats.MajorNumber), fmt.Sprint(stats.MinorNumber)) + + statCount := stats.IoStatsCount - 3 // Total diskstats record count, less MajorNumber, MinorNumber and DeviceName + + for i, val := range []float64{ + float64(stats.ReadIOs), + float64(stats.ReadMerges), + float64(stats.ReadSectors) * diskSectorSize, + float64(stats.ReadTicks) * secondsPerTick, + float64(stats.WriteIOs), + float64(stats.WriteMerges), + float64(stats.WriteSectors) * diskSectorSize, + float64(stats.WriteTicks) * secondsPerTick, + float64(stats.IOsInProgress), + float64(stats.IOsTotalTicks) * secondsPerTick, + float64(stats.WeightedIOTicks) * secondsPerTick, + float64(stats.DiscardIOs), + float64(stats.DiscardMerges), + float64(stats.DiscardSectors), + float64(stats.DiscardTicks) * secondsPerTick, + float64(stats.FlushRequestsCompleted), + float64(stats.TimeSpentFlushing) * secondsPerTick, + } { + if i >= statCount { + break } - ch <- c.descs[i].mustNewConstMetric(v, dev) + ch <- c.descs[i].mustNewConstMetric(val, dev) } } return nil } - -func getDiskStats() (map[string]map[int]string, error) { - file, err := os.Open(procFilePath("diskstats")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseDiskStats(file) -} - -func convertDiskSectorsToBytes(sectorCount string) (string, error) { - sectors, err := strconv.ParseUint(sectorCount, 10, 64) - if err != nil { - return "", err - } - - return strconv.FormatUint(sectors*diskSectorSize, 10), nil -} - -func parseDiskStats(r io.Reader) (map[string]map[int]string, error) { - var ( - diskStats = map[string]map[int]string{} - scanner = bufio.NewScanner(r) - ) - - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - if len(parts) < 4 { // we strip major, minor and dev - return nil, fmt.Errorf("invalid line in %s: %s", procFilePath("diskstats"), scanner.Text()) - } - dev := parts[2] - diskStats[dev] = map[int]string{} - for i, v := range parts[3:] { - diskStats[dev][i] = v - } - bytesRead, err := convertDiskSectorsToBytes(diskStats[dev][2]) - if err != nil { - return nil, fmt.Errorf("invalid value for sectors read in %s: %s", procFilePath("diskstats"), scanner.Text()) - } - diskStats[dev][11] = bytesRead - - bytesWritten, err := convertDiskSectorsToBytes(diskStats[dev][6]) - if err != nil { - return nil, fmt.Errorf("invalid value for sectors written in %s: %s", procFilePath("diskstats"), scanner.Text()) - } - diskStats[dev][12] = bytesWritten - } - - return diskStats, scanner.Err() -} diff --git a/collector/diskstats_linux_test.go b/collector/diskstats_linux_test.go index 21e537873b..0e8dc84ff2 100644 --- a/collector/diskstats_linux_test.go +++ b/collector/diskstats_linux_test.go @@ -14,35 +14,293 @@ package collector import ( + "fmt" "os" + "strings" "testing" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" ) -func TestDiskStats(t *testing.T) { - file, err := os.Open("fixtures/proc/diskstats") - if err != nil { - t.Fatal(err) - } - defer file.Close() +type testDiskStatsCollector struct { + dsc Collector +} - diskStats, err := parseDiskStats(file) +func (c testDiskStatsCollector) Collect(ch chan<- prometheus.Metric) { + c.dsc.Update(ch) +} + +func (c testDiskStatsCollector) Describe(ch chan<- *prometheus.Desc) { + prometheus.DescribeByCollect(c, ch) +} + +func NewTestDiskStatsCollector(logger log.Logger) (prometheus.Collector, error) { + dsc, err := NewDiskstatsCollector(logger) if err != nil { - t.Fatal(err) + return testDiskStatsCollector{}, err } + return testDiskStatsCollector{ + dsc: dsc, + }, err +} - if want, got := "25353629", diskStats["sda4"][0]; want != got { - t.Errorf("want diskstats sda4 %s, got %s", want, got) - } +func TestDiskStats(t *testing.T) { + *sysPath = "fixtures/sys" + *procPath = "fixtures/proc" + *ignoredDevices = "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$" + testcase := `# HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. +# TYPE node_disk_discard_time_seconds_total counter +node_disk_discard_time_seconds_total{device="sdb"} 11.13 +node_disk_discard_time_seconds_total{device="sdc"} 11.13 +# HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. +# TYPE node_disk_discarded_sectors_total counter +node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 +node_disk_discarded_sectors_total{device="sdc"} 1.25173784e+08 +# HELP node_disk_discards_completed_total The total number of discards completed successfully. +# TYPE node_disk_discards_completed_total counter +node_disk_discards_completed_total{device="sdb"} 68851 +node_disk_discards_completed_total{device="sdc"} 18851 +# HELP node_disk_discards_merged_total The total number of discards merged. +# TYPE node_disk_discards_merged_total counter +node_disk_discards_merged_total{device="sdb"} 0 +node_disk_discards_merged_total{device="sdc"} 0 +# HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. +# TYPE node_disk_flush_requests_time_seconds_total counter +node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 +# HELP node_disk_flush_requests_total The total number of flush requests completed successfully +# TYPE node_disk_flush_requests_total counter +node_disk_flush_requests_total{device="sdc"} 1555 +# HELP node_disk_info Info of /sys/block/. +# TYPE node_disk_info gauge +node_disk_info{device="dm-0",major="252",minor="0"} 1 +node_disk_info{device="dm-1",major="252",minor="1"} 1 +node_disk_info{device="dm-2",major="252",minor="2"} 1 +node_disk_info{device="dm-3",major="252",minor="3"} 1 +node_disk_info{device="dm-4",major="252",minor="4"} 1 +node_disk_info{device="dm-5",major="252",minor="5"} 1 +node_disk_info{device="mmcblk0",major="179",minor="0"} 1 +node_disk_info{device="mmcblk0p1",major="179",minor="1"} 1 +node_disk_info{device="mmcblk0p2",major="179",minor="2"} 1 +node_disk_info{device="nvme0n1",major="259",minor="0"} 1 +node_disk_info{device="sda",major="8",minor="0"} 1 +node_disk_info{device="sdb",major="8",minor="0"} 1 +node_disk_info{device="sdc",major="8",minor="0"} 1 +node_disk_info{device="sr0",major="11",minor="0"} 1 +node_disk_info{device="vda",major="254",minor="0"} 1 +# HELP node_disk_io_now The number of I/Os currently in progress. +# TYPE node_disk_io_now gauge +node_disk_io_now{device="dm-0"} 0 +node_disk_io_now{device="dm-1"} 0 +node_disk_io_now{device="dm-2"} 0 +node_disk_io_now{device="dm-3"} 0 +node_disk_io_now{device="dm-4"} 0 +node_disk_io_now{device="dm-5"} 0 +node_disk_io_now{device="mmcblk0"} 0 +node_disk_io_now{device="mmcblk0p1"} 0 +node_disk_io_now{device="mmcblk0p2"} 0 +node_disk_io_now{device="nvme0n1"} 0 +node_disk_io_now{device="sda"} 0 +node_disk_io_now{device="sdb"} 0 +node_disk_io_now{device="sdc"} 0 +node_disk_io_now{device="sr0"} 0 +node_disk_io_now{device="vda"} 0 +# HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. +# TYPE node_disk_io_time_seconds_total counter +node_disk_io_time_seconds_total{device="dm-0"} 11325.968 +node_disk_io_time_seconds_total{device="dm-1"} 0.076 +node_disk_io_time_seconds_total{device="dm-2"} 65.4 +node_disk_io_time_seconds_total{device="dm-3"} 0.016 +node_disk_io_time_seconds_total{device="dm-4"} 0.024 +node_disk_io_time_seconds_total{device="dm-5"} 58.848 +node_disk_io_time_seconds_total{device="mmcblk0"} 0.136 +node_disk_io_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 +node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 +node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 +node_disk_io_time_seconds_total{device="sdc"} 10.73 +node_disk_io_time_seconds_total{device="sr0"} 0 +node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 +# HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. +# TYPE node_disk_io_time_weighted_seconds_total counter +node_disk_io_time_weighted_seconds_total{device="dm-0"} 1.206301256e+06 +node_disk_io_time_weighted_seconds_total{device="dm-1"} 0.084 +node_disk_io_time_weighted_seconds_total{device="dm-2"} 129.416 +node_disk_io_time_weighted_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_io_time_weighted_seconds_total{device="dm-4"} 0.044 +node_disk_io_time_weighted_seconds_total{device="dm-5"} 105.632 +node_disk_io_time_weighted_seconds_total{device="mmcblk0"} 0.156 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 +node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 +node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 +node_disk_io_time_weighted_seconds_total{device="sdc"} 17.07 +node_disk_io_time_weighted_seconds_total{device="sr0"} 0 +node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 +# HELP node_disk_read_bytes_total The total number of bytes read successfully. +# TYPE node_disk_read_bytes_total counter +node_disk_read_bytes_total{device="dm-0"} 5.13708655616e+11 +node_disk_read_bytes_total{device="dm-1"} 1.589248e+06 +node_disk_read_bytes_total{device="dm-2"} 1.578752e+08 +node_disk_read_bytes_total{device="dm-3"} 1.98144e+06 +node_disk_read_bytes_total{device="dm-4"} 529408 +node_disk_read_bytes_total{device="dm-5"} 4.3150848e+07 +node_disk_read_bytes_total{device="mmcblk0"} 798720 +node_disk_read_bytes_total{device="mmcblk0p1"} 81920 +node_disk_read_bytes_total{device="mmcblk0p2"} 389120 +node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 +node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 +node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 +node_disk_read_bytes_total{device="sdc"} 8.48782848e+08 +node_disk_read_bytes_total{device="sr0"} 0 +node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 +# HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. +# TYPE node_disk_read_time_seconds_total counter +node_disk_read_time_seconds_total{device="dm-0"} 46229.572 +node_disk_read_time_seconds_total{device="dm-1"} 0.084 +node_disk_read_time_seconds_total{device="dm-2"} 6.5360000000000005 +node_disk_read_time_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_read_time_seconds_total{device="dm-4"} 0.028 +node_disk_read_time_seconds_total{device="dm-5"} 0.924 +node_disk_read_time_seconds_total{device="mmcblk0"} 0.156 +node_disk_read_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 +node_disk_read_time_seconds_total{device="sda"} 18492.372 +node_disk_read_time_seconds_total{device="sdb"} 0.084 +node_disk_read_time_seconds_total{device="sdc"} 0.014 +node_disk_read_time_seconds_total{device="sr0"} 0 +node_disk_read_time_seconds_total{device="vda"} 8655.768 +# HELP node_disk_reads_completed_total The total number of reads completed successfully. +# TYPE node_disk_reads_completed_total counter +node_disk_reads_completed_total{device="dm-0"} 5.9910002e+07 +node_disk_reads_completed_total{device="dm-1"} 388 +node_disk_reads_completed_total{device="dm-2"} 11571 +node_disk_reads_completed_total{device="dm-3"} 3870 +node_disk_reads_completed_total{device="dm-4"} 392 +node_disk_reads_completed_total{device="dm-5"} 3729 +node_disk_reads_completed_total{device="mmcblk0"} 192 +node_disk_reads_completed_total{device="mmcblk0p1"} 17 +node_disk_reads_completed_total{device="mmcblk0p2"} 95 +node_disk_reads_completed_total{device="nvme0n1"} 47114 +node_disk_reads_completed_total{device="sda"} 2.5354637e+07 +node_disk_reads_completed_total{device="sdb"} 326552 +node_disk_reads_completed_total{device="sdc"} 126552 +node_disk_reads_completed_total{device="sr0"} 0 +node_disk_reads_completed_total{device="vda"} 1.775784e+06 +# HELP node_disk_reads_merged_total The total number of reads merged. +# TYPE node_disk_reads_merged_total counter +node_disk_reads_merged_total{device="dm-0"} 0 +node_disk_reads_merged_total{device="dm-1"} 0 +node_disk_reads_merged_total{device="dm-2"} 0 +node_disk_reads_merged_total{device="dm-3"} 0 +node_disk_reads_merged_total{device="dm-4"} 0 +node_disk_reads_merged_total{device="dm-5"} 0 +node_disk_reads_merged_total{device="mmcblk0"} 3 +node_disk_reads_merged_total{device="mmcblk0p1"} 3 +node_disk_reads_merged_total{device="mmcblk0p2"} 0 +node_disk_reads_merged_total{device="nvme0n1"} 4 +node_disk_reads_merged_total{device="sda"} 3.4367663e+07 +node_disk_reads_merged_total{device="sdb"} 841 +node_disk_reads_merged_total{device="sdc"} 141 +node_disk_reads_merged_total{device="sr0"} 0 +node_disk_reads_merged_total{device="vda"} 15386 +# HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. +# TYPE node_disk_write_time_seconds_total counter +node_disk_write_time_seconds_total{device="dm-0"} 1.1585578e+06 +node_disk_write_time_seconds_total{device="dm-1"} 0 +node_disk_write_time_seconds_total{device="dm-2"} 122.884 +node_disk_write_time_seconds_total{device="dm-3"} 0 +node_disk_write_time_seconds_total{device="dm-4"} 0.016 +node_disk_write_time_seconds_total{device="dm-5"} 104.684 +node_disk_write_time_seconds_total{device="mmcblk0"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p1"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 +node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 +node_disk_write_time_seconds_total{device="sda"} 63877.96 +node_disk_write_time_seconds_total{device="sdb"} 5.007 +node_disk_write_time_seconds_total{device="sdc"} 1.0070000000000001 +node_disk_write_time_seconds_total{device="sr0"} 0 +node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 +# HELP node_disk_writes_completed_total The total number of writes completed successfully. +# TYPE node_disk_writes_completed_total counter +node_disk_writes_completed_total{device="dm-0"} 3.9231014e+07 +node_disk_writes_completed_total{device="dm-1"} 74 +node_disk_writes_completed_total{device="dm-2"} 153522 +node_disk_writes_completed_total{device="dm-3"} 0 +node_disk_writes_completed_total{device="dm-4"} 38 +node_disk_writes_completed_total{device="dm-5"} 98918 +node_disk_writes_completed_total{device="mmcblk0"} 0 +node_disk_writes_completed_total{device="mmcblk0p1"} 0 +node_disk_writes_completed_total{device="mmcblk0p2"} 0 +node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 +node_disk_writes_completed_total{device="sda"} 2.8444756e+07 +node_disk_writes_completed_total{device="sdb"} 41822 +node_disk_writes_completed_total{device="sdc"} 11822 +node_disk_writes_completed_total{device="sr0"} 0 +node_disk_writes_completed_total{device="vda"} 6.038856e+06 +# HELP node_disk_writes_merged_total The number of writes merged. +# TYPE node_disk_writes_merged_total counter +node_disk_writes_merged_total{device="dm-0"} 0 +node_disk_writes_merged_total{device="dm-1"} 0 +node_disk_writes_merged_total{device="dm-2"} 0 +node_disk_writes_merged_total{device="dm-3"} 0 +node_disk_writes_merged_total{device="dm-4"} 0 +node_disk_writes_merged_total{device="dm-5"} 0 +node_disk_writes_merged_total{device="mmcblk0"} 0 +node_disk_writes_merged_total{device="mmcblk0p1"} 0 +node_disk_writes_merged_total{device="mmcblk0p2"} 0 +node_disk_writes_merged_total{device="nvme0n1"} 43950 +node_disk_writes_merged_total{device="sda"} 1.1134226e+07 +node_disk_writes_merged_total{device="sdb"} 2895 +node_disk_writes_merged_total{device="sdc"} 1895 +node_disk_writes_merged_total{device="sr0"} 0 +node_disk_writes_merged_total{device="vda"} 2.0711856e+07 +# HELP node_disk_written_bytes_total The total number of bytes written successfully. +# TYPE node_disk_written_bytes_total counter +node_disk_written_bytes_total{device="dm-0"} 2.5891680256e+11 +node_disk_written_bytes_total{device="dm-1"} 303104 +node_disk_written_bytes_total{device="dm-2"} 2.607828992e+09 +node_disk_written_bytes_total{device="dm-3"} 0 +node_disk_written_bytes_total{device="dm-4"} 70144 +node_disk_written_bytes_total{device="dm-5"} 5.89664256e+08 +node_disk_written_bytes_total{device="mmcblk0"} 0 +node_disk_written_bytes_total{device="mmcblk0p1"} 0 +node_disk_written_bytes_total{device="mmcblk0p2"} 0 +node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 +node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 +node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 +node_disk_written_bytes_total{device="sdc"} 8.852736e+07 +node_disk_written_bytes_total{device="sr0"} 0 +node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 +` - if want, got := "68", diskStats["mmcblk0p2"][10]; want != got { - t.Errorf("want diskstats mmcblk0p2 %s, got %s", want, got) + logger := log.NewLogfmtLogger(os.Stderr) + collector, err := NewDiskstatsCollector(logger) + if err != nil { + panic(err) } - - if want, got := "513713216512", diskStats["sda"][11]; want != got { - t.Errorf("want diskstats sda read bytes %s, got %s", want, got) + c, err := NewTestDiskStatsCollector(logger) + if err != nil { + t.Fatal(err) } + reg := prometheus.NewRegistry() + reg.MustRegister(c) + + sink := make(chan prometheus.Metric) + go func() { + err = collector.Update(sink) + if err != nil { + panic(fmt.Errorf("failed to update collector: %s", err)) + } + close(sink) + }() - if want, got := "258916880384", diskStats["sda"][12]; want != got { - t.Errorf("want diskstats sda write bytes %s, got %s", want, got) + err = testutil.GatherAndCompare(reg, strings.NewReader(testcase)) + if err != nil { + t.Fatal(err) } } diff --git a/collector/diskstats_openbsd.go b/collector/diskstats_openbsd.go new file mode 100644 index 0000000000..dd4f0b037c --- /dev/null +++ b/collector/diskstats_openbsd.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build openbsd && !amd64 && !nodiskstats +// +build openbsd,!amd64,!nodiskstats + +package collector + +import ( + "unsafe" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +/* +#include +#include +*/ +import "C" + +type diskstatsCollector struct { + rxfer typedDesc + rbytes typedDesc + wxfer typedDesc + wbytes typedDesc + time typedDesc + logger log.Logger +} + +func init() { + registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) +} + +// NewDiskstatsCollector returns a new Collector exposing disk device stats. +func NewDiskstatsCollector(logger log.Logger) (Collector, error) { + return &diskstatsCollector{ + rxfer: typedDesc{readsCompletedDesc, prometheus.CounterValue}, + rbytes: typedDesc{readBytesDesc, prometheus.CounterValue}, + wxfer: typedDesc{writesCompletedDesc, prometheus.CounterValue}, + wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue}, + time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue}, + logger: logger, + }, nil +} + +func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) { + diskstatsb, err := unix.SysctlRaw("hw.diskstats") + if err != nil { + return err + } + + ndisks := len(diskstatsb) / C.sizeof_struct_diskstats + diskstats := *(*[]C.struct_diskstats)(unsafe.Pointer(&diskstatsb)) + + for i := 0; i < ndisks; i++ { + diskname := C.GoString(&diskstats[i].ds_name[0]) + + ch <- c.rxfer.mustNewConstMetric(float64(diskstats[i].ds_rxfer), diskname) + ch <- c.rbytes.mustNewConstMetric(float64(diskstats[i].ds_rbytes), diskname) + ch <- c.wxfer.mustNewConstMetric(float64(diskstats[i].ds_wxfer), diskname) + ch <- c.wbytes.mustNewConstMetric(float64(diskstats[i].ds_wbytes), diskname) + time := float64(diskstats[i].ds_time.tv_sec) + float64(diskstats[i].ds_time.tv_usec)/1000000 + ch <- c.time.mustNewConstMetric(time, diskname) + } + return nil +} diff --git a/collector/diskstats_openbsd_amd64.go b/collector/diskstats_openbsd_amd64.go new file mode 100644 index 0000000000..41265e233b --- /dev/null +++ b/collector/diskstats_openbsd_amd64.go @@ -0,0 +1,90 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nodiskstats +// +build !nodiskstats + +package collector + +import ( + "unsafe" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +const ( + DS_DISKNAMELEN = 16 +) + +type DiskStats struct { + Name [DS_DISKNAMELEN]int8 + Busy int32 + Rxfer uint64 + Wxfer uint64 + Seek uint64 + Rbytes uint64 + Wbytes uint64 + Attachtime unix.Timeval + Timestamp unix.Timeval + Time unix.Timeval +} + +type diskstatsCollector struct { + rxfer typedDesc + rbytes typedDesc + wxfer typedDesc + wbytes typedDesc + time typedDesc + logger log.Logger +} + +func init() { + registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) +} + +// NewDiskstatsCollector returns a new Collector exposing disk device stats. +func NewDiskstatsCollector(logger log.Logger) (Collector, error) { + return &diskstatsCollector{ + rxfer: typedDesc{readsCompletedDesc, prometheus.CounterValue}, + rbytes: typedDesc{readBytesDesc, prometheus.CounterValue}, + wxfer: typedDesc{writesCompletedDesc, prometheus.CounterValue}, + wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue}, + time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue}, + logger: logger, + }, nil +} + +func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) { + diskstatsb, err := unix.SysctlRaw("hw.diskstats") + if err != nil { + return err + } + + ndisks := len(diskstatsb) / int(unsafe.Sizeof(DiskStats{})) + diskstats := *(*[]DiskStats)(unsafe.Pointer(&diskstatsb)) + + for i := 0; i < ndisks; i++ { + dn := *(*[DS_DISKNAMELEN]int8)(unsafe.Pointer(&diskstats[i].Name[0])) + diskname := int8ToString(dn[:]) + + ch <- c.rxfer.mustNewConstMetric(float64(diskstats[i].Rxfer), diskname) + ch <- c.rbytes.mustNewConstMetric(float64(diskstats[i].Rbytes), diskname) + ch <- c.wxfer.mustNewConstMetric(float64(diskstats[i].Wxfer), diskname) + ch <- c.wbytes.mustNewConstMetric(float64(diskstats[i].Wbytes), diskname) + time := float64(diskstats[i].Time.Sec) + float64(diskstats[i].Time.Usec)/1000000 + ch <- c.time.mustNewConstMetric(time, diskname) + } + return nil +} diff --git a/collector/dmi.go b/collector/dmi.go new file mode 100644 index 0000000000..3cc2a8a517 --- /dev/null +++ b/collector/dmi.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !nodmi +// +build linux,!nodmi + +package collector + +import ( + "errors" + "fmt" + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +type dmiCollector struct { + infoDesc *prometheus.Desc + values []string +} + +func init() { + registerCollector("dmi", defaultEnabled, NewDMICollector) +} + +// NewDMICollector returns a new Collector exposing DMI information. +func NewDMICollector(logger log.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + dmi, err := fs.DMIClass() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(logger).Log("msg", "Platform does not support Desktop Management Interface (DMI) information", "err", err) + dmi = &sysfs.DMIClass{} + } else { + return nil, fmt.Errorf("failed to read Desktop Management Interface (DMI) information: %w", err) + } + } + + var labels, values []string + for label, value := range map[string]*string{ + "bios_date": dmi.BiosDate, + "bios_release": dmi.BiosRelease, + "bios_vendor": dmi.BiosVendor, + "bios_version": dmi.BiosVersion, + "board_asset_tag": dmi.BoardAssetTag, + "board_name": dmi.BoardName, + "board_serial": dmi.BoardSerial, + "board_vendor": dmi.BoardVendor, + "board_version": dmi.BoardVersion, + "chassis_asset_tag": dmi.ChassisAssetTag, + "chassis_serial": dmi.ChassisSerial, + "chassis_vendor": dmi.ChassisVendor, + "chassis_version": dmi.ChassisVersion, + "product_family": dmi.ProductFamily, + "product_name": dmi.ProductName, + "product_serial": dmi.ProductSerial, + "product_sku": dmi.ProductSKU, + "product_uuid": dmi.ProductUUID, + "product_version": dmi.ProductVersion, + "system_vendor": dmi.SystemVendor, + } { + if value != nil { + labels = append(labels, label) + values = append(values, *value) + } + } + + // Construct DMI metric only once since it will not change until the next reboot. + return &dmiCollector{ + infoDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "dmi", "info"), + "A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, "+ + "board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, "+ + "chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, "+ + "product_sku, product_uuid, product_version, system_vendor if provided by DMI.", + labels, nil, + ), + values: values, + }, nil +} + +func (c *dmiCollector) Update(ch chan<- prometheus.Metric) error { + if len(c.values) == 0 { + return ErrNoData + } + ch <- prometheus.MustNewConstMetric(c.infoDesc, prometheus.GaugeValue, 1.0, c.values...) + return nil +} diff --git a/collector/drbd_linux.go b/collector/drbd_linux.go index 6fd3d353e3..f192a24c6f 100644 --- a/collector/drbd_linux.go +++ b/collector/drbd_linux.go @@ -11,17 +11,22 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nodrbd +// +build !nodrbd + package collector import ( "bufio" + "errors" "fmt" "os" "strconv" "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" ) // Numerical metric provided by /proc/drbd. @@ -31,12 +36,14 @@ type drbdNumericalMetric struct { multiplier float64 } -func newDRBDNumericalMetric(name string, desc string, valueType prometheus.ValueType, multiplier float64) drbdNumericalMetric { +func newDRBDNumericalMetric(name, desc string, valueType prometheus.ValueType, multiplier float64) drbdNumericalMetric { return drbdNumericalMetric{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "drbd", name), desc, - []string{"device"}, nil), + []string{"device"}, + nil, + ), valueType: valueType, multiplier: multiplier, } @@ -44,125 +51,150 @@ func newDRBDNumericalMetric(name string, desc string, valueType prometheus.Value // String pair metric provided by /proc/drbd. type drbdStringPairMetric struct { - desc *prometheus.Desc - valueOkay string + desc *prometheus.Desc + valueOK string } -func (metric *drbdStringPairMetric) isOkay(value string) float64 { - if value == metric.valueOkay { +func (m *drbdStringPairMetric) isOkay(v string) float64 { + if v == m.valueOK { return 1 } + return 0 } -func newDRBDStringPairMetric(name string, desc string, valueOkay string) drbdStringPairMetric { +func newDRBDStringPairMetric(name, desc, valueOK string) drbdStringPairMetric { return drbdStringPairMetric{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "drbd", name), desc, - []string{"device", "node"}, nil), - valueOkay: valueOkay, + []string{"device", "node"}, + nil, + ), + valueOK: valueOK, } } -var ( - drbdNumericalMetrics = map[string]drbdNumericalMetric{ - "ns": newDRBDNumericalMetric( - "network_sent_bytes_total", - "Total number of bytes sent via the network.", - prometheus.CounterValue, - 1024), - "nr": newDRBDNumericalMetric( - "network_received_bytes_total", - "Total number of bytes received via the network.", - prometheus.CounterValue, - 1), - "dw": newDRBDNumericalMetric( - "disk_written_bytes_total", - "Net data written on local hard disk; in bytes.", - prometheus.CounterValue, - 1024), - "dr": newDRBDNumericalMetric( - "disk_read_bytes_total", - "Net data read from local hard disk; in bytes.", - prometheus.CounterValue, - 1024), - "al": newDRBDNumericalMetric( - "activitylog_writes_total", - "Number of updates of the activity log area of the meta data.", - prometheus.CounterValue, - 1), - "bm": newDRBDNumericalMetric( - "bitmap_writes_total", - "Number of updates of the bitmap area of the meta data.", - prometheus.CounterValue, - 1), - "lo": newDRBDNumericalMetric( - "local_pending", - "Number of open requests to the local I/O sub-system.", - prometheus.GaugeValue, - 1), - "pe": newDRBDNumericalMetric( - "remote_pending", - "Number of requests sent to the peer, but that have not yet been answered by the latter.", - prometheus.GaugeValue, - 1), - "ua": newDRBDNumericalMetric( - "remote_unacknowledged", - "Number of requests received by the peer via the network connection, but that have not yet been answered.", - prometheus.GaugeValue, - 1), - "ap": newDRBDNumericalMetric( - "application_pending", - "Number of block I/O requests forwarded to DRBD, but not yet answered by DRBD.", - prometheus.GaugeValue, - 1), - "ep": newDRBDNumericalMetric( - "epochs", - "Number of Epochs currently on the fly.", - prometheus.GaugeValue, - 1), - "oos": newDRBDNumericalMetric( - "out_of_sync_bytes", - "Amount of data known to be out of sync; in bytes.", - prometheus.GaugeValue, - 1024), - } - drbdStringPairMetrics = map[string]drbdStringPairMetric{ - "ro": newDRBDStringPairMetric( - "node_role_is_primary", - "Whether the role of the node is in the primary state.", - "Primary"), - "ds": newDRBDStringPairMetric( - "disk_state_is_up_to_date", - "Whether the disk of the node is up to date.", - "UpToDate"), - } - - drbdConnected = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "drbd", "connected"), - "Whether DRBD is connected to the peer.", - []string{"device"}, nil) -) - -type drbdCollector struct{} +type drbdCollector struct { + numerical map[string]drbdNumericalMetric + stringPair map[string]drbdStringPairMetric + connected *prometheus.Desc + logger log.Logger +} func init() { registerCollector("drbd", defaultDisabled, newDRBDCollector) } -func newDRBDCollector() (Collector, error) { - return &drbdCollector{}, nil +func newDRBDCollector(logger log.Logger) (Collector, error) { + return &drbdCollector{ + numerical: map[string]drbdNumericalMetric{ + "ns": newDRBDNumericalMetric( + "network_sent_bytes_total", + "Total number of bytes sent via the network.", + prometheus.CounterValue, + 1024, + ), + "nr": newDRBDNumericalMetric( + "network_received_bytes_total", + "Total number of bytes received via the network.", + prometheus.CounterValue, + 1, + ), + "dw": newDRBDNumericalMetric( + "disk_written_bytes_total", + "Net data written on local hard disk; in bytes.", + prometheus.CounterValue, + 1024, + ), + "dr": newDRBDNumericalMetric( + "disk_read_bytes_total", + "Net data read from local hard disk; in bytes.", + prometheus.CounterValue, + 1024, + ), + "al": newDRBDNumericalMetric( + "activitylog_writes_total", + "Number of updates of the activity log area of the meta data.", + prometheus.CounterValue, + 1, + ), + "bm": newDRBDNumericalMetric( + "bitmap_writes_total", + "Number of updates of the bitmap area of the meta data.", + prometheus.CounterValue, + 1, + ), + "lo": newDRBDNumericalMetric( + "local_pending", + "Number of open requests to the local I/O sub-system.", + prometheus.GaugeValue, + 1, + ), + "pe": newDRBDNumericalMetric( + "remote_pending", + "Number of requests sent to the peer, but that have not yet been answered by the latter.", + prometheus.GaugeValue, + 1, + ), + "ua": newDRBDNumericalMetric( + "remote_unacknowledged", + "Number of requests received by the peer via the network connection, but that have not yet been answered.", + prometheus.GaugeValue, + 1, + ), + "ap": newDRBDNumericalMetric( + "application_pending", + "Number of block I/O requests forwarded to DRBD, but not yet answered by DRBD.", + prometheus.GaugeValue, + 1, + ), + "ep": newDRBDNumericalMetric( + "epochs", + "Number of Epochs currently on the fly.", + prometheus.GaugeValue, + 1, + ), + "oos": newDRBDNumericalMetric( + "out_of_sync_bytes", + "Amount of data known to be out of sync; in bytes.", + prometheus.GaugeValue, + 1024, + ), + }, + + stringPair: map[string]drbdStringPairMetric{ + "ro": newDRBDStringPairMetric( + "node_role_is_primary", + "Whether the role of the node is in the primary state.", + "Primary", + ), + "ds": newDRBDStringPairMetric( + "disk_state_is_up_to_date", + "Whether the disk of the node is up to date.", + "UpToDate", + ), + }, + + connected: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "drbd", "connected"), + "Whether DRBD is connected to the peer.", + []string{"device"}, + nil, + ), + logger: logger, + }, nil } func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error { statsFile := procFilePath("drbd") file, err := os.Open(statsFile) if err != nil { - if os.IsNotExist(err) { - log.Debugf("Not collecting DRBD statistics, as %s does not exist: %s", statsFile, err) - return nil + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err) + return ErrNoData } + return err } defer file.Close() @@ -170,44 +202,80 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error { scanner := bufio.NewScanner(file) scanner.Split(bufio.ScanWords) device := "unknown" + for scanner.Scan() { field := scanner.Text() - if kv := strings.Split(field, ":"); len(kv) == 2 { - if id, err := strconv.ParseUint(kv[0], 10, 64); err == nil && kv[1] == "" { - device = fmt.Sprintf("drbd%d", id) - } else if metric, ok := drbdNumericalMetrics[kv[0]]; ok { - // Numerical value. - value, err := strconv.ParseFloat(kv[1], 64) - if err != nil { - return err - } - ch <- prometheus.MustNewConstMetric( - metric.desc, metric.valueType, - value*metric.multiplier, device) - } else if metric, ok := drbdStringPairMetrics[kv[0]]; ok { - // String pair value. - values := strings.Split(kv[1], "/") - ch <- prometheus.MustNewConstMetric( - metric.desc, prometheus.GaugeValue, - metric.isOkay(values[0]), device, "local") - ch <- prometheus.MustNewConstMetric( - metric.desc, prometheus.GaugeValue, - metric.isOkay(values[1]), device, "remote") - } else if kv[0] == "cs" { - // Connection state. - var connected float64 - if kv[1] == "Connected" { - connected = 1 - } - ch <- prometheus.MustNewConstMetric( - drbdConnected, prometheus.GaugeValue, - connected, device) - } else { - log.Debugf("Don't know how to process key-value pair [%s: %q]", kv[0], kv[1]) + + kv := strings.Split(field, ":") + if len(kv) != 2 { + level.Debug(c.logger).Log("msg", "skipping invalid key:value pair", "field", field) + continue + } + + if id, err := strconv.ParseUint(kv[0], 10, 64); err == nil && kv[1] == "" { + // New DRBD device encountered. + device = fmt.Sprintf("drbd%d", id) + continue + } + + if m, ok := c.numerical[kv[0]]; ok { + // Numerical value. + v, err := strconv.ParseFloat(kv[1], 64) + if err != nil { + return err } - } else { - log.Debugf("Don't know how to process string %q", field) + + ch <- prometheus.MustNewConstMetric( + m.desc, + m.valueType, + v*m.multiplier, + device, + ) + + continue + } + + if m, ok := c.stringPair[kv[0]]; ok { + // String pair value. + values := strings.Split(kv[1], "/") + ch <- prometheus.MustNewConstMetric( + m.desc, + prometheus.GaugeValue, + m.isOkay(values[0]), + device, + "local", + ) + + ch <- prometheus.MustNewConstMetric( + m.desc, + prometheus.GaugeValue, + m.isOkay(values[1]), + device, + "remote", + ) + + continue } + + if kv[0] == "cs" { + // Connection state. + var connected float64 + if kv[1] == "Connected" { + connected = 1 + } + + ch <- prometheus.MustNewConstMetric( + c.connected, + prometheus.GaugeValue, + connected, + device, + ) + + continue + } + + level.Debug(c.logger).Log("msg", "unhandled key-value pair", "key", kv[0], "value", kv[1]) } + return scanner.Err() } diff --git a/collector/drm_linux.go b/collector/drm_linux.go new file mode 100644 index 0000000000..11b8c6283b --- /dev/null +++ b/collector/drm_linux.go @@ -0,0 +1,140 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nogpu +// +build !nogpu + +package collector + +import ( + "fmt" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +const ( + drmCollectorSubsystem = "drm" +) + +type drmCollector struct { + fs sysfs.FS + logger log.Logger + CardInfo *prometheus.Desc + GPUBusyPercent *prometheus.Desc + MemoryGTTSize *prometheus.Desc + MemoryGTTUsed *prometheus.Desc + MemoryVisibleVRAMSize *prometheus.Desc + MemoryVisibleVRAMUsed *prometheus.Desc + MemoryVRAMSize *prometheus.Desc + MemoryVRAMUsed *prometheus.Desc +} + +func init() { + registerCollector("drm", defaultDisabled, NewDrmCollector) +} + +// NewDrmCollector returns a new Collector exposing /sys/class/drm/card?/device stats. +func NewDrmCollector(logger log.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + return &drmCollector{ + fs: fs, + logger: logger, + CardInfo: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "card_info"), + "Card information", + []string{"card", "memory_vendor", "power_performance_level", "unique_id", "vendor"}, nil, + ), + GPUBusyPercent: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "gpu_busy_percent"), + "How busy the GPU is as a percentage.", + []string{"card"}, nil, + ), + MemoryGTTSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_gtt_size_bytes"), + "The size of the graphics translation table (GTT) block in bytes.", + []string{"card"}, nil, + ), + MemoryGTTUsed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_gtt_used_bytes"), + "The used amount of the graphics translation table (GTT) block in bytes.", + []string{"card"}, nil, + ), + MemoryVisibleVRAMSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vis_vram_size_bytes"), + "The size of visible VRAM in bytes.", + []string{"card"}, nil, + ), + MemoryVisibleVRAMUsed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vis_vram_used_bytes"), + "The used amount of visible VRAM in bytes.", + []string{"card"}, nil, + ), + MemoryVRAMSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vram_size_bytes"), + "The size of VRAM in bytes.", + []string{"card"}, nil, + ), + MemoryVRAMUsed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vram_used_bytes"), + "The used amount of VRAM in bytes.", + []string{"card"}, nil, + ), + }, nil +} + +func (c *drmCollector) Update(ch chan<- prometheus.Metric) error { + return c.updateAMDCards(ch) +} + +func (c *drmCollector) updateAMDCards(ch chan<- prometheus.Metric) error { + vendor := "amd" + stats, err := c.fs.ClassDRMCardAMDGPUStats() + if err != nil { + return err + } + + for _, s := range stats { + ch <- prometheus.MustNewConstMetric( + c.CardInfo, prometheus.GaugeValue, 1, + s.Name, s.MemoryVRAMVendor, s.PowerDPMForcePerformanceLevel, s.UniqueID, vendor) + + ch <- prometheus.MustNewConstMetric( + c.GPUBusyPercent, prometheus.GaugeValue, float64(s.GPUBusyPercent), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryGTTSize, prometheus.GaugeValue, float64(s.MemoryGTTSize), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryGTTUsed, prometheus.GaugeValue, float64(s.MemoryGTTUsed), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVRAMSize, prometheus.GaugeValue, float64(s.MemoryVRAMSize), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVRAMUsed, prometheus.GaugeValue, float64(s.MemoryVRAMUsed), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVisibleVRAMSize, prometheus.GaugeValue, float64(s.MemoryVisibleVRAMSize), s.Name) + + ch <- prometheus.MustNewConstMetric( + c.MemoryVisibleVRAMUsed, prometheus.GaugeValue, float64(s.MemoryVisibleVRAMUsed), s.Name) + } + + return nil +} diff --git a/collector/edac_linux.go b/collector/edac_linux.go index f4fef51303..c7719b7a64 100644 --- a/collector/edac_linux.go +++ b/collector/edac_linux.go @@ -11,16 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noedac // +build !noedac package collector import ( "fmt" - "path" "path/filepath" "regexp" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -38,6 +39,7 @@ type edacCollector struct { ueCount *prometheus.Desc csRowCECount *prometheus.Desc csRowUECount *prometheus.Desc + logger log.Logger } func init() { @@ -45,7 +47,7 @@ func init() { } // NewEdacCollector returns a new Collector exposing edac stats. -func NewEdacCollector() (Collector, error) { +func NewEdacCollector(logger log.Logger) (Collector, error) { return &edacCollector{ ceCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"), @@ -67,6 +69,7 @@ func NewEdacCollector() (Collector, error) { "Total uncorrectable memory errors for this csrow.", []string{"controller", "csrow"}, nil, ), + logger: logger, }, nil } @@ -82,30 +85,30 @@ func (c *edacCollector) Update(ch chan<- prometheus.Metric) error { } controllerNumber := controllerMatch[1] - value, err := readUintFromFile(path.Join(controller, "ce_count")) + value, err := readUintFromFile(filepath.Join(controller, "ce_count")) if err != nil { - return fmt.Errorf("couldn't get ce_count for controller %s: %s", controllerNumber, err) + return fmt.Errorf("couldn't get ce_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.ceCount, prometheus.CounterValue, float64(value), controllerNumber) - value, err = readUintFromFile(path.Join(controller, "ce_noinfo_count")) + value, err = readUintFromFile(filepath.Join(controller, "ce_noinfo_count")) if err != nil { - return fmt.Errorf("couldn't get ce_noinfo_count for controller %s: %s", controllerNumber, err) + return fmt.Errorf("couldn't get ce_noinfo_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown") - value, err = readUintFromFile(path.Join(controller, "ue_count")) + value, err = readUintFromFile(filepath.Join(controller, "ue_count")) if err != nil { - return fmt.Errorf("couldn't get ue_count for controller %s: %s", controllerNumber, err) + return fmt.Errorf("couldn't get ue_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.ueCount, prometheus.CounterValue, float64(value), controllerNumber) - value, err = readUintFromFile(path.Join(controller, "ue_noinfo_count")) + value, err = readUintFromFile(filepath.Join(controller, "ue_noinfo_count")) if err != nil { - return fmt.Errorf("couldn't get ue_noinfo_count for controller %s: %s", controllerNumber, err) + return fmt.Errorf("couldn't get ue_noinfo_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown") @@ -122,16 +125,16 @@ func (c *edacCollector) Update(ch chan<- prometheus.Metric) error { } csrowNumber := csrowMatch[1] - value, err = readUintFromFile(path.Join(csrow, "ce_count")) + value, err = readUintFromFile(filepath.Join(csrow, "ce_count")) if err != nil { - return fmt.Errorf("couldn't get ce_count for controller/csrow %s/%s: %s", controllerNumber, csrowNumber, err) + return fmt.Errorf("couldn't get ce_count for controller/csrow %s/%s: %w", controllerNumber, csrowNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber) - value, err = readUintFromFile(path.Join(csrow, "ue_count")) + value, err = readUintFromFile(filepath.Join(csrow, "ue_count")) if err != nil { - return fmt.Errorf("couldn't get ue_count for controller/csrow %s/%s: %s", controllerNumber, csrowNumber, err) + return fmt.Errorf("couldn't get ue_count for controller/csrow %s/%s: %w", controllerNumber, csrowNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber) diff --git a/collector/entropy_linux.go b/collector/entropy_linux.go index 455615bc0d..909d2ff4c2 100644 --- a/collector/entropy_linux.go +++ b/collector/entropy_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noentropy // +build !noentropy package collector @@ -18,11 +19,16 @@ package collector import ( "fmt" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" ) type entropyCollector struct { - entropyAvail *prometheus.Desc + fs procfs.FS + entropyAvail *prometheus.Desc + entropyPoolSize *prometheus.Desc + logger log.Logger } func init() { @@ -30,23 +36,45 @@ func init() { } // NewEntropyCollector returns a new Collector exposing entropy stats. -func NewEntropyCollector() (Collector, error) { +func NewEntropyCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &entropyCollector{ + fs: fs, entropyAvail: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "entropy_available_bits"), "Bits of available entropy.", nil, nil, ), + entropyPoolSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "entropy_pool_size_bits"), + "Bits of entropy pool.", + nil, nil, + ), + logger: logger, }, nil } func (c *entropyCollector) Update(ch chan<- prometheus.Metric) error { - value, err := readUintFromFile(procFilePath("sys/kernel/random/entropy_avail")) + stats, err := c.fs.KernelRandom() if err != nil { - return fmt.Errorf("couldn't get entropy_avail: %s", err) + return fmt.Errorf("failed to get kernel random stats: %w", err) + } + + if stats.EntropyAvaliable == nil { + return fmt.Errorf("couldn't get entropy_avail") + } + ch <- prometheus.MustNewConstMetric( + c.entropyAvail, prometheus.GaugeValue, float64(*stats.EntropyAvaliable)) + + if stats.PoolSize == nil { + return fmt.Errorf("couldn't get entropy poolsize") } ch <- prometheus.MustNewConstMetric( - c.entropyAvail, prometheus.GaugeValue, float64(value)) + c.entropyPoolSize, prometheus.GaugeValue, float64(*stats.PoolSize)) return nil } diff --git a/collector/ethtool_linux.go b/collector/ethtool_linux.go new file mode 100644 index 0000000000..c68b3e9f8c --- /dev/null +++ b/collector/ethtool_linux.go @@ -0,0 +1,438 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noethtool +// +build !noethtool + +// The hard work of collecting data from the kernel via the ethtool interfaces is done by +// https://github.com/safchain/ethtool/ +// by Sylvain Afchain. Used under the Apache license. + +package collector + +import ( + "errors" + "fmt" + "os" + "regexp" + "sort" + "strings" + "syscall" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" + "github.com/safchain/ethtool" + "golang.org/x/sys/unix" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + ethtoolDeviceInclude = kingpin.Flag("collector.ethtool.device-include", "Regexp of ethtool devices to include (mutually exclusive to device-exclude).").String() + ethtoolDeviceExclude = kingpin.Flag("collector.ethtool.device-exclude", "Regexp of ethtool devices to exclude (mutually exclusive to device-include).").String() + ethtoolIncludedMetrics = kingpin.Flag("collector.ethtool.metrics-include", "Regexp of ethtool stats to include.").Default(".*").String() + ethtoolReceivedRegex = regexp.MustCompile(`(^|_)rx(_|$)`) + ethtoolTransmitRegex = regexp.MustCompile(`(^|_)tx(_|$)`) +) + +type Ethtool interface { + DriverInfo(string) (ethtool.DrvInfo, error) + Stats(string) (map[string]uint64, error) + LinkInfo(string) (ethtool.EthtoolCmd, error) +} + +type ethtoolLibrary struct { + ethtool *ethtool.Ethtool +} + +func (e *ethtoolLibrary) DriverInfo(intf string) (ethtool.DrvInfo, error) { + return e.ethtool.DriverInfo(intf) +} + +func (e *ethtoolLibrary) Stats(intf string) (map[string]uint64, error) { + return e.ethtool.Stats(intf) +} + +func (e *ethtoolLibrary) LinkInfo(intf string) (ethtool.EthtoolCmd, error) { + var ethtoolCmd ethtool.EthtoolCmd + _, err := ethtoolCmd.CmdGet(intf) + return ethtoolCmd, err +} + +type ethtoolCollector struct { + fs sysfs.FS + entries map[string]*prometheus.Desc + ethtool Ethtool + deviceFilter netDevFilter + infoDesc *prometheus.Desc + metricsPattern *regexp.Regexp + logger log.Logger +} + +// makeEthtoolCollector is the internal constructor for EthtoolCollector. +// This allows NewEthtoolTestCollector to override its .ethtool interface +// for testing. +func makeEthtoolCollector(logger log.Logger) (*ethtoolCollector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + e, err := ethtool.NewEthtool() + if err != nil { + return nil, fmt.Errorf("failed to initialize ethtool library: %w", err) + } + + // Pre-populate some common ethtool metrics. + return ðtoolCollector{ + fs: fs, + ethtool: ðtoolLibrary{e}, + deviceFilter: newNetDevFilter(*ethtoolDeviceExclude, *ethtoolDeviceInclude), + metricsPattern: regexp.MustCompile(*ethtoolIncludedMetrics), + logger: logger, + entries: map[string]*prometheus.Desc{ + "rx_bytes": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_bytes_total"), + "Network interface bytes received", + []string{"device"}, nil, + ), + "rx_dropped": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_dropped_total"), + "Number of received frames dropped", + []string{"device"}, nil, + ), + "rx_errors": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_errors_total"), + "Number of received frames with errors", + []string{"device"}, nil, + ), + "rx_packets": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "received_packets_total"), + "Network interface packets received", + []string{"device"}, nil, + ), + "tx_bytes": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "transmitted_bytes_total"), + "Network interface bytes sent", + []string{"device"}, nil, + ), + "tx_errors": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "transmitted_errors_total"), + "Number of sent frames with errors", + []string{"device"}, nil, + ), + "tx_packets": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "transmitted_packets_total"), + "Network interface packets sent", + []string{"device"}, nil, + ), + + // link info + "supported_port": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "supported_port_info"), + "Type of ports or PHYs supported by network device", + []string{"device", "type"}, nil, + ), + "supported_speed": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "supported_speed_bytes"), + "Combination of speeds and features supported by network device", + []string{"device", "duplex", "mode"}, nil, + ), + "supported_autonegotiate": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "autonegotiate_supported"), + "If this port device supports autonegotiate", + []string{"device"}, nil, + ), + "supported_pause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "pause_supported"), + "If this port device supports pause frames", + []string{"device"}, nil, + ), + "supported_asymmetricpause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "asymmetricpause_supported"), + "If this port device supports asymmetric pause frames", + []string{"device"}, nil, + ), + "advertised_speed": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "advertised_speed_bytes"), + "Combination of speeds and features offered by network device", + []string{"device", "duplex", "mode"}, nil, + ), + "advertised_autonegotiate": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "autonegotiate_advertised"), + "If this port device offers autonegotiate", + []string{"device"}, nil, + ), + "advertised_pause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "pause_advertised"), + "If this port device offers pause capability", + []string{"device"}, nil, + ), + "advertised_asymmetricpause": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "asymmetricpause_advertised"), + "If this port device offers asymmetric pause capability", + []string{"device"}, nil, + ), + "autonegotiate": prometheus.NewDesc( + prometheus.BuildFQName(namespace, "network", "autonegotiate"), + "If this port is using autonegotiate", + []string{"device"}, nil, + ), + }, + infoDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "ethtool", "info"), + "A metric with a constant '1' value labeled by bus_info, device, driver, expansion_rom_version, firmware_version, version.", + []string{"bus_info", "device", "driver", "expansion_rom_version", "firmware_version", "version"}, nil, + ), + }, nil +} + +func init() { + registerCollector("ethtool", defaultDisabled, NewEthtoolCollector) +} + +// Generate the fully-qualified metric name for the ethool metric. +func buildEthtoolFQName(metric string) string { + metricName := strings.TrimLeft(strings.ToLower(SanitizeMetricName(metric)), "_") + metricName = ethtoolReceivedRegex.ReplaceAllString(metricName, "${1}received${2}") + metricName = ethtoolTransmitRegex.ReplaceAllString(metricName, "${1}transmitted${2}") + return prometheus.BuildFQName(namespace, "ethtool", metricName) +} + +// NewEthtoolCollector returns a new Collector exposing ethtool stats. +func NewEthtoolCollector(logger log.Logger) (Collector, error) { + return makeEthtoolCollector(logger) +} + +// updatePortCapabilities generates metrics for autonegotiate, pause and asymmetricpause. +// The bit offsets here correspond to ethtool_link_mode_bit_indices in linux/include/uapi/linux/ethtool.h +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/ethtool.h +func (c *ethtoolCollector) updatePortCapabilities(ch chan<- prometheus.Metric, prefix string, device string, linkModes uint32) { + var ( + autonegotiate = 0.0 + pause = 0.0 + asymmetricPause = 0.0 + ) + if linkModes&(1<, value is always 1.", + []string{"fc_host", "speed", "port_state", "port_type", "port_id", "port_name", "fabric_name", "symbolic_name", "supported_classes", "supported_speeds", "dev_loss_tmo"}, + nil, + ) + infoValue := 1.0 + + // First push the Host values + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, host.Name, host.Speed, host.PortState, host.PortType, host.PortID, host.PortName, host.FabricName, host.SymbolicName, host.SupportedClasses, host.SupportedSpeeds, host.DevLossTMO) + + // Then the counters + c.pushCounter(ch, "dumped_frames_total", host.Counters.DumpedFrames, host.Name) + c.pushCounter(ch, "error_frames_total", host.Counters.ErrorFrames, host.Name) + c.pushCounter(ch, "invalid_crc_total", host.Counters.InvalidCRCCount, host.Name) + c.pushCounter(ch, "rx_frames_total", host.Counters.RXFrames, host.Name) + c.pushCounter(ch, "rx_words_total", host.Counters.RXWords, host.Name) + c.pushCounter(ch, "tx_frames_total", host.Counters.TXFrames, host.Name) + c.pushCounter(ch, "tx_words_total", host.Counters.TXWords, host.Name) + c.pushCounter(ch, "seconds_since_last_reset_total", host.Counters.SecondsSinceLastReset, host.Name) + c.pushCounter(ch, "invalid_tx_words_total", host.Counters.InvalidTXWordCount, host.Name) + c.pushCounter(ch, "link_failure_total", host.Counters.LinkFailureCount, host.Name) + c.pushCounter(ch, "loss_of_sync_total", host.Counters.LossOfSyncCount, host.Name) + c.pushCounter(ch, "loss_of_signal_total", host.Counters.LossOfSignalCount, host.Name) + c.pushCounter(ch, "nos_total", host.Counters.NosCount, host.Name) + c.pushCounter(ch, "fcp_packet_aborts_total", host.Counters.FCPPacketAborts, host.Name) + } + + return nil +} diff --git a/collector/filefd_linux.go b/collector/filefd_linux.go index 1d34889a1d..2e7cd1bbbc 100644 --- a/collector/filefd_linux.go +++ b/collector/filefd_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nofilefd // +build !nofilefd package collector @@ -22,6 +23,7 @@ import ( "os" "strconv" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -29,26 +31,28 @@ const ( fileFDStatSubsystem = "filefd" ) -type fileFDStatCollector struct{} +type fileFDStatCollector struct { + logger log.Logger +} func init() { registerCollector(fileFDStatSubsystem, defaultEnabled, NewFileFDStatCollector) } // NewFileFDStatCollector returns a new Collector exposing file-nr stats. -func NewFileFDStatCollector() (Collector, error) { - return &fileFDStatCollector{}, nil +func NewFileFDStatCollector(logger log.Logger) (Collector, error) { + return &fileFDStatCollector{logger}, nil } func (c *fileFDStatCollector) Update(ch chan<- prometheus.Metric) error { fileFDStat, err := parseFileFDStats(procFilePath("sys/fs/file-nr")) if err != nil { - return fmt.Errorf("couldn't get file-nr: %s", err) + return fmt.Errorf("couldn't get file-nr: %w", err) } for name, value := range fileFDStat { v, err := strconv.ParseFloat(value, 64) if err != nil { - return fmt.Errorf("invalid value %s in file-nr: %s", value, err) + return fmt.Errorf("invalid value %s in file-nr: %w", value, err) } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( diff --git a/collector/filesystem_bsd.go b/collector/filesystem_bsd.go index 9f20ec05db..dc35c4f8df 100644 --- a/collector/filesystem_bsd.go +++ b/collector/filesystem_bsd.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build openbsd darwin,amd64 dragonfly +//go:build ((openbsd && !amd64) || darwin || dragonfly) && !nofilesystem +// +build openbsd,!amd64 darwin dragonfly // +build !nofilesystem package collector @@ -20,7 +21,7 @@ import ( "errors" "unsafe" - "github.com/prometheus/common/log" + "github.com/go-kit/log/level" ) /* @@ -32,9 +33,9 @@ import ( import "C" const ( - defIgnoredMountPoints = "^/(dev)($|/)" - defIgnoredFSTypes = "^devfs$" - readOnly = 0x1 // MNT_RDONLY + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^devfs$" + readOnly = 0x1 // MNT_RDONLY ) // Expose filesystem fullness. @@ -49,15 +50,15 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { stats = []filesystemStats{} for i := 0; i < int(count); i++ { mountpoint := C.GoString(&mnt[i].f_mntonname[0]) - if c.ignoredMountPointsPattern.MatchString(mountpoint) { - log.Debugf("Ignoring mount point: %s", mountpoint) + if c.excludedMountPointsPattern.MatchString(mountpoint) { + level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) continue } device := C.GoString(&mnt[i].f_mntfromname[0]) fstype := C.GoString(&mnt[i].f_fstypename[0]) - if c.ignoredFSTypesPattern.MatchString(fstype) { - log.Debugf("Ignoring fs type: %s", fstype) + if c.excludedFSTypesPattern.MatchString(fstype) { + level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) continue } @@ -69,7 +70,7 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { stats = append(stats, filesystemStats{ labels: filesystemLabels{ device: device, - mountPoint: mountpoint, + mountPoint: rootfsStripPrefix(mountpoint), fsType: fstype, }, size: float64(mnt[i].f_blocks) * float64(mnt[i].f_bsize), diff --git a/collector/filesystem_common.go b/collector/filesystem_common.go index ceac815f92..930ba24650 100644 --- a/collector/filesystem_common.go +++ b/collector/filesystem_common.go @@ -11,47 +11,69 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nofilesystem && (linux || freebsd || openbsd || darwin || dragonfly) // +build !nofilesystem -// +build linux freebsd openbsd darwin,amd64 dragonfly +// +build linux freebsd openbsd darwin dragonfly package collector import ( + "errors" "regexp" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) // Arch-dependent implementation must define: -// * defIgnoredMountPoints -// * defIgnoredFSTypes +// * defMountPointsExcluded +// * defFSTypesExcluded // * filesystemLabelNames // * filesystemCollector.GetStats var ( - ignoredMountPoints = kingpin.Flag( + mountPointsExcludeSet bool + mountPointsExclude = kingpin.Flag( + "collector.filesystem.mount-points-exclude", + "Regexp of mount points to exclude for filesystem collector.", + ).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error { + mountPointsExcludeSet = true + return nil + }).String() + oldMountPointsExcluded = kingpin.Flag( "collector.filesystem.ignored-mount-points", "Regexp of mount points to ignore for filesystem collector.", - ).Default(defIgnoredMountPoints).String() - ignoredFSTypes = kingpin.Flag( + ).Hidden().String() + + fsTypesExcludeSet bool + fsTypesExclude = kingpin.Flag( + "collector.filesystem.fs-types-exclude", + "Regexp of filesystem types to exclude for filesystem collector.", + ).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error { + fsTypesExcludeSet = true + return nil + }).String() + oldFSTypesExcluded = kingpin.Flag( "collector.filesystem.ignored-fs-types", "Regexp of filesystem types to ignore for filesystem collector.", - ).Default(defIgnoredFSTypes).String() + ).Hidden().String() filesystemLabelNames = []string{"device", "mountpoint", "fstype"} ) type filesystemCollector struct { - ignoredMountPointsPattern *regexp.Regexp - ignoredFSTypesPattern *regexp.Regexp + excludedMountPointsPattern *regexp.Regexp + excludedFSTypesPattern *regexp.Regexp sizeDesc, freeDesc, availDesc *prometheus.Desc filesDesc, filesFreeDesc *prometheus.Desc roDesc, deviceErrorDesc *prometheus.Desc + logger log.Logger } type filesystemLabels struct { - device, mountPoint, fsType string + device, mountPoint, fsType, options string } type filesystemStats struct { @@ -66,25 +88,45 @@ func init() { } // NewFilesystemCollector returns a new Collector exposing filesystems stats. -func NewFilesystemCollector() (Collector, error) { +func NewFilesystemCollector(logger log.Logger) (Collector, error) { + if *oldMountPointsExcluded != "" { + if !mountPointsExcludeSet { + level.Warn(logger).Log("msg", "--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude") + *mountPointsExclude = *oldMountPointsExcluded + } else { + return nil, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive") + } + } + + if *oldFSTypesExcluded != "" { + if !fsTypesExcludeSet { + level.Warn(logger).Log("msg", "--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude") + *fsTypesExclude = *oldFSTypesExcluded + } else { + return nil, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive") + } + } + subsystem := "filesystem" - mountPointPattern := regexp.MustCompile(*ignoredMountPoints) - filesystemsTypesPattern := regexp.MustCompile(*ignoredFSTypes) + level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude) + mountPointPattern := regexp.MustCompile(*mountPointsExclude) + level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude) + filesystemsTypesPattern := regexp.MustCompile(*fsTypesExclude) sizeDesc := prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "size"), + prometheus.BuildFQName(namespace, subsystem, "size_bytes"), "Filesystem size in bytes.", filesystemLabelNames, nil, ) freeDesc := prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "free"), + prometheus.BuildFQName(namespace, subsystem, "free_bytes"), "Filesystem free space in bytes.", filesystemLabelNames, nil, ) availDesc := prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "avail"), + prometheus.BuildFQName(namespace, subsystem, "avail_bytes"), "Filesystem space available to non-root users in bytes.", filesystemLabelNames, nil, ) @@ -114,15 +156,16 @@ func NewFilesystemCollector() (Collector, error) { ) return &filesystemCollector{ - ignoredMountPointsPattern: mountPointPattern, - ignoredFSTypesPattern: filesystemsTypesPattern, - sizeDesc: sizeDesc, - freeDesc: freeDesc, - availDesc: availDesc, - filesDesc: filesDesc, - filesFreeDesc: filesFreeDesc, - roDesc: roDesc, - deviceErrorDesc: deviceErrorDesc, + excludedMountPointsPattern: mountPointPattern, + excludedFSTypesPattern: filesystemsTypesPattern, + sizeDesc: sizeDesc, + freeDesc: freeDesc, + availDesc: availDesc, + filesDesc: filesDesc, + filesFreeDesc: filesFreeDesc, + roDesc: roDesc, + deviceErrorDesc: deviceErrorDesc, + logger: logger, }, nil } diff --git a/collector/filesystem_freebsd.go b/collector/filesystem_freebsd.go index 071671615f..768eb797b0 100644 --- a/collector/filesystem_freebsd.go +++ b/collector/filesystem_freebsd.go @@ -11,60 +11,46 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nofilesystem // +build !nofilesystem package collector import ( - "bytes" - "unsafe" - - "github.com/prometheus/common/log" + "github.com/go-kit/log/level" "golang.org/x/sys/unix" ) const ( - defIgnoredMountPoints = "^/(dev)($|/)" - defIgnoredFSTypes = "^devfs$" - readOnly = 0x1 // MNT_RDONLY - noWait = 0x2 // MNT_NOWAIT + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^devfs$" + readOnly = 0x1 // MNT_RDONLY + noWait = 0x2 // MNT_NOWAIT ) -func gostring(b []int8) string { - bb := *(*[]byte)(unsafe.Pointer(&b)) - idx := bytes.IndexByte(bb, 0) - if idx < 0 { - return "" - } - return string(bb[:idx]) -} - // Expose filesystem fullness. func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { - buf := make([]unix.Statfs_t, 16) - for { - n, err := unix.Getfsstat(buf, noWait) - if err != nil { - return nil, err - } - if n < len(buf) { - buf = buf[:n] - break - } - buf = make([]unix.Statfs_t, len(buf)*2) + n, err := unix.Getfsstat(nil, noWait) + if err != nil { + return nil, err + } + buf := make([]unix.Statfs_t, n) + _, err = unix.Getfsstat(buf, noWait) + if err != nil { + return nil, err } stats := []filesystemStats{} for _, fs := range buf { - mountpoint := gostring(fs.Mntonname[:]) - if c.ignoredMountPointsPattern.MatchString(mountpoint) { - log.Debugf("Ignoring mount point: %s", mountpoint) + mountpoint := bytesToString(fs.Mntonname[:]) + if c.excludedMountPointsPattern.MatchString(mountpoint) { + level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) continue } - device := gostring(fs.Mntfromname[:]) - fstype := gostring(fs.Fstypename[:]) - if c.ignoredFSTypesPattern.MatchString(fstype) { - log.Debugf("Ignoring fs type: %s", fstype) + device := bytesToString(fs.Mntfromname[:]) + fstype := bytesToString(fs.Fstypename[:]) + if c.excludedFSTypesPattern.MatchString(fstype) { + level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) continue } @@ -76,7 +62,7 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { stats = append(stats, filesystemStats{ labels: filesystemLabels{ device: device, - mountPoint: mountpoint, + mountPoint: rootfsStripPrefix(mountpoint), fsType: fstype, }, size: float64(fs.Blocks) * float64(fs.Bsize), diff --git a/collector/filesystem_linux.go b/collector/filesystem_linux.go index d9a4432046..a5c6bf3192 100644 --- a/collector/filesystem_linux.go +++ b/collector/filesystem_linux.go @@ -11,56 +11,98 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nofilesystem // +build !nofilesystem package collector import ( "bufio" + "errors" + "fmt" + "io" "os" "strings" - "syscall" + "sync" + "time" - "github.com/prometheus/common/log" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "golang.org/x/sys/unix" + "gopkg.in/alecthomas/kingpin.v2" ) const ( - defIgnoredMountPoints = "^/(sys|proc|dev)($|/)" - defIgnoredFSTypes = "^(sys|proc|auto)fs$" - readOnly = 0x1 // ST_RDONLY + defMountPointsExcluded = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)" + defFSTypesExcluded = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$" ) +var mountTimeout = kingpin.Flag("collector.filesystem.mount-timeout", + "how long to wait for a mount to respond before marking it as stale"). + Hidden().Default("5s").Duration() +var stuckMounts = make(map[string]struct{}) +var stuckMountsMtx = &sync.Mutex{} + // GetStats returns filesystem stats. func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { - mps, err := mountPointDetails() + mps, err := mountPointDetails(c.logger) if err != nil { return nil, err } stats := []filesystemStats{} for _, labels := range mps { - if c.ignoredMountPointsPattern.MatchString(labels.mountPoint) { - log.Debugf("Ignoring mount point: %s", labels.mountPoint) + if c.excludedMountPointsPattern.MatchString(labels.mountPoint) { + level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", labels.mountPoint) + continue + } + if c.excludedFSTypesPattern.MatchString(labels.fsType) { + level.Debug(c.logger).Log("msg", "Ignoring fs", "type", labels.fsType) continue } - if c.ignoredFSTypesPattern.MatchString(labels.fsType) { - log.Debugf("Ignoring fs type: %s", labels.fsType) + stuckMountsMtx.Lock() + if _, ok := stuckMounts[labels.mountPoint]; ok { + stats = append(stats, filesystemStats{ + labels: labels, + deviceError: 1, + }) + level.Debug(c.logger).Log("msg", "Mount point is in an unresponsive state", "mountpoint", labels.mountPoint) + stuckMountsMtx.Unlock() continue } + stuckMountsMtx.Unlock() + + // The success channel is used do tell the "watcher" that the stat + // finished successfully. The channel is closed on success. + success := make(chan struct{}) + go stuckMountWatcher(labels.mountPoint, success, c.logger) + + buf := new(unix.Statfs_t) + err = unix.Statfs(rootfsFilePath(labels.mountPoint), buf) + stuckMountsMtx.Lock() + close(success) + // If the mount has been marked as stuck, unmark it and log it's recovery. + if _, ok := stuckMounts[labels.mountPoint]; ok { + level.Debug(c.logger).Log("msg", "Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint) + delete(stuckMounts, labels.mountPoint) + } + stuckMountsMtx.Unlock() - buf := new(syscall.Statfs_t) - err := syscall.Statfs(labels.mountPoint, buf) if err != nil { stats = append(stats, filesystemStats{ labels: labels, deviceError: 1, }) - log.Errorf("Error on statfs() system call for %q: %s", labels.mountPoint, err) + + level.Debug(c.logger).Log("msg", "Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err) continue } var ro float64 - if (buf.Flags & readOnly) != 0 { - ro = 1 + for _, option := range strings.Split(labels.options, ",") { + if option == "ro" { + ro = 1 + break + } } stats = append(stats, filesystemStats{ @@ -76,22 +118,67 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { return stats, nil } -func mountPointDetails() ([]filesystemLabels, error) { - file, err := os.Open(procFilePath("mounts")) +// stuckMountWatcher listens on the given success channel and if the channel closes +// then the watcher does nothing. If instead the timeout is reached, the +// mount point that is being watched is marked as stuck. +func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logger) { + mountCheckTimer := time.NewTimer(*mountTimeout) + defer mountCheckTimer.Stop() + select { + case <-success: + // Success + case <-mountCheckTimer.C: + // Timed out, mark mount as stuck + stuckMountsMtx.Lock() + select { + case <-success: + // Success came in just after the timeout was reached, don't label the mount as stuck + default: + level.Debug(logger).Log("msg", "Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint) + stuckMounts[mountPoint] = struct{}{} + } + stuckMountsMtx.Unlock() + } +} + +func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) { + file, err := os.Open(procFilePath("1/mounts")) + if errors.Is(err, os.ErrNotExist) { + // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid. + level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err) + file, err = os.Open(procFilePath("mounts")) + } if err != nil { return nil, err } defer file.Close() - filesystems := []filesystemLabels{} - scanner := bufio.NewScanner(file) + return parseFilesystemLabels(file) +} + +func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) { + var filesystems []filesystemLabels + + scanner := bufio.NewScanner(r) for scanner.Scan() { parts := strings.Fields(scanner.Text()) + + if len(parts) < 4 { + return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text()) + } + + // Ensure we handle the translation of \040 and \011 + // as per fstab(5). + parts[1] = strings.Replace(parts[1], "\\040", " ", -1) + parts[1] = strings.Replace(parts[1], "\\011", "\t", -1) + filesystems = append(filesystems, filesystemLabels{ device: parts[0], - mountPoint: parts[1], + mountPoint: rootfsStripPrefix(parts[1]), fsType: parts[2], + options: parts[3], }) } + return filesystems, scanner.Err() } diff --git a/collector/filesystem_linux_test.go b/collector/filesystem_linux_test.go new file mode 100644 index 0000000000..1d18fd5920 --- /dev/null +++ b/collector/filesystem_linux_test.go @@ -0,0 +1,141 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "github.com/go-kit/log" + "strings" + "testing" + + kingpin "gopkg.in/alecthomas/kingpin.v2" +) + +func Test_parseFilesystemLabelsError(t *testing.T) { + tests := []struct { + name string + in string + }{ + { + name: "too few fields", + in: "hello world", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if _, err := parseFilesystemLabels(strings.NewReader(tt.in)); err == nil { + t.Fatal("expected an error, but none occurred") + } + }) + } +} + +func TestMountPointDetails(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./fixtures/proc"}); err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "/": "", + "/sys": "", + "/proc": "", + "/dev": "", + "/dev/pts": "", + "/run": "", + "/sys/kernel/security": "", + "/dev/shm": "", + "/run/lock": "", + "/sys/fs/cgroup": "", + "/sys/fs/cgroup/systemd": "", + "/sys/fs/pstore": "", + "/sys/fs/cgroup/cpuset": "", + "/sys/fs/cgroup/cpu,cpuacct": "", + "/sys/fs/cgroup/devices": "", + "/sys/fs/cgroup/freezer": "", + "/sys/fs/cgroup/net_cls,net_prio": "", + "/sys/fs/cgroup/blkio": "", + "/sys/fs/cgroup/perf_event": "", + "/proc/sys/fs/binfmt_misc": "", + "/dev/mqueue": "", + "/sys/kernel/debug": "", + "/dev/hugepages": "", + "/sys/fs/fuse/connections": "", + "/boot": "", + "/run/rpc_pipefs": "", + "/run/user/1000": "", + "/run/user/1000/gvfs": "", + "/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "", + "/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "", + } + + filesystems, err := mountPointDetails(log.NewNopLogger()) + if err != nil { + t.Log(err) + } + + for _, fs := range filesystems { + if _, ok := expected[fs.mountPoint]; !ok { + t.Errorf("Got unexpected %s", fs.mountPoint) + } + } +} + +func TestMountsFallback(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./fixtures_hidepid/proc"}); err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "/": "", + } + + filesystems, err := mountPointDetails(log.NewNopLogger()) + if err != nil { + t.Log(err) + } + + for _, fs := range filesystems { + if _, ok := expected[fs.mountPoint]; !ok { + t.Errorf("Got unexpected %s", fs.mountPoint) + } + } +} + +func TestPathRootfs(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./fixtures_bindmount/proc", "--path.rootfs", "/host"}); err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + // should modify these mountpoints (removes /host, see fixture proc file) + "/": "", + "/media/volume1": "", + "/media/volume2": "", + // should not modify these mountpoints + "/dev/shm": "", + "/run/lock": "", + "/sys/fs/cgroup": "", + } + + filesystems, err := mountPointDetails(log.NewNopLogger()) + if err != nil { + t.Log(err) + } + + for _, fs := range filesystems { + if _, ok := expected[fs.mountPoint]; !ok { + t.Errorf("Got unexpected %s", fs.mountPoint) + } + } +} diff --git a/collector/filesystem_openbsd_amd64.go b/collector/filesystem_openbsd_amd64.go new file mode 100644 index 0000000000..448b3ef36e --- /dev/null +++ b/collector/filesystem_openbsd_amd64.go @@ -0,0 +1,77 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build openbsd && !nofilesystem +// +build openbsd,!nofilesystem + +package collector + +import ( + "github.com/go-kit/log/level" + "golang.org/x/sys/unix" +) + +const ( + defMountPointsExcluded = "^/(dev)($|/)" + defFSTypesExcluded = "^devfs$" +) + +// Expose filesystem fullness. +func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { + var mnt []unix.Statfs_t + size, err := unix.Getfsstat(mnt, unix.MNT_NOWAIT) + if err != nil { + return nil, err + } + mnt = make([]unix.Statfs_t, size) + _, err = unix.Getfsstat(mnt, unix.MNT_NOWAIT) + if err != nil { + return nil, err + } + + stats = []filesystemStats{} + for _, v := range mnt { + mountpoint := int8ToString(v.F_mntonname[:]) + if c.excludedMountPointsPattern.MatchString(mountpoint) { + level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) + continue + } + + device := int8ToString(v.F_mntfromname[:]) + fstype := int8ToString(v.F_fstypename[:]) + if c.excludedFSTypesPattern.MatchString(fstype) { + level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) + continue + } + + var ro float64 + if (v.F_flags & unix.MNT_RDONLY) != 0 { + ro = 1 + } + + stats = append(stats, filesystemStats{ + labels: filesystemLabels{ + device: device, + mountPoint: mountpoint, + fsType: fstype, + }, + size: float64(v.F_blocks) * float64(v.F_bsize), + free: float64(v.F_bfree) * float64(v.F_bsize), + avail: float64(v.F_bavail) * float64(v.F_bsize), + files: float64(v.F_files), + filesFree: float64(v.F_ffree), + ro: ro, + }) + } + return stats, nil +} diff --git a/collector/fixtures/e2e-64k-page-output.txt b/collector/fixtures/e2e-64k-page-output.txt new file mode 100644 index 0000000000..845a9089b9 --- /dev/null +++ b/collector/fixtures/e2e-64k-page-output.txt @@ -0,0 +1,3768 @@ +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP node_arp_entries ARP entries by device +# TYPE node_arp_entries gauge +node_arp_entries{device="eth0"} 3 +node_arp_entries{device="eth1"} 3 +# HELP node_bcache_active_journal_entries Number of journal entries that are newer than the index. +# TYPE node_bcache_active_journal_entries gauge +node_bcache_active_journal_entries{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1 +# HELP node_bcache_average_key_size_sectors Average data per key in the btree (sectors). +# TYPE node_bcache_average_key_size_sectors gauge +node_bcache_average_key_size_sectors{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_btree_cache_size_bytes Amount of memory currently used by the btree cache. +# TYPE node_bcache_btree_cache_size_bytes gauge +node_bcache_btree_cache_size_bytes{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_btree_nodes Total nodes in the btree. +# TYPE node_bcache_btree_nodes gauge +node_bcache_btree_nodes{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_btree_read_average_duration_seconds Average btree read duration. +# TYPE node_bcache_btree_read_average_duration_seconds gauge +node_bcache_btree_read_average_duration_seconds{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.305e-06 +# HELP node_bcache_bypassed_bytes_total Amount of IO (both reads and writes) that has bypassed the cache. +# TYPE node_bcache_bypassed_bytes_total counter +node_bcache_bypassed_bytes_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_cache_available_percent Percentage of cache device without dirty data, usable for writeback (may contain clean cached data). +# TYPE node_bcache_cache_available_percent gauge +node_bcache_cache_available_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 100 +# HELP node_bcache_cache_bypass_hits_total Hits for IO intended to skip the cache. +# TYPE node_bcache_cache_bypass_hits_total counter +node_bcache_cache_bypass_hits_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_cache_bypass_misses_total Misses for IO intended to skip the cache. +# TYPE node_bcache_cache_bypass_misses_total counter +node_bcache_cache_bypass_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_cache_hits_total Hits counted per individual IO as bcache sees them. +# TYPE node_bcache_cache_hits_total counter +node_bcache_cache_hits_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 546 +# HELP node_bcache_cache_miss_collisions_total Instances where data insertion from cache miss raced with write (data already present). +# TYPE node_bcache_cache_miss_collisions_total counter +node_bcache_cache_miss_collisions_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_cache_misses_total Misses counted per individual IO as bcache sees them. +# TYPE node_bcache_cache_misses_total counter +node_bcache_cache_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_cache_read_races_total Counts instances where while data was being read from the cache, the bucket was reused and invalidated - i.e. where the pointer was stale after the read completed. +# TYPE node_bcache_cache_read_races_total counter +node_bcache_cache_read_races_total{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_cache_readaheads_total Count of times readahead occurred. +# TYPE node_bcache_cache_readaheads_total counter +node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_congested Congestion. +# TYPE node_bcache_congested gauge +node_bcache_congested{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_dirty_data_bytes Amount of dirty data for this backing device in the cache. +# TYPE node_bcache_dirty_data_bytes gauge +node_bcache_dirty_data_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_io_errors Number of errors that have occurred, decayed by io_error_halflife. +# TYPE node_bcache_io_errors gauge +node_bcache_io_errors{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_metadata_written_bytes_total Sum of all non data writes (btree writes and all other metadata). +# TYPE node_bcache_metadata_written_bytes_total counter +node_bcache_metadata_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 512 +# HELP node_bcache_priority_stats_metadata_percent Bcache's metadata overhead. +# TYPE node_bcache_priority_stats_metadata_percent gauge +node_bcache_priority_stats_metadata_percent{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_priority_stats_unused_percent The percentage of the cache that doesn't contain any data. +# TYPE node_bcache_priority_stats_unused_percent gauge +node_bcache_priority_stats_unused_percent{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 99 +# HELP node_bcache_root_usage_percent Percentage of the root btree node in use (tree depth increases if too high). +# TYPE node_bcache_root_usage_percent gauge +node_bcache_root_usage_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_tree_depth Depth of the btree. +# TYPE node_bcache_tree_depth gauge +node_bcache_tree_depth{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_written_bytes_total Sum of all data that has been written to the cache. +# TYPE node_bcache_written_bytes_total counter +node_bcache_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bonding_active Number of active slaves per bonding interface. +# TYPE node_bonding_active gauge +node_bonding_active{master="bond0"} 0 +node_bonding_active{master="dmz"} 2 +node_bonding_active{master="int"} 1 +# HELP node_bonding_slaves Number of configured slaves per bonding interface. +# TYPE node_bonding_slaves gauge +node_bonding_slaves{master="bond0"} 0 +node_bonding_slaves{master="dmz"} 2 +node_bonding_slaves{master="int"} 2 +# HELP node_boot_time_seconds Node boot time, in unixtime. +# TYPE node_boot_time_seconds gauge +node_boot_time_seconds 1.418183276e+09 +# HELP node_buddyinfo_blocks Count of free blocks according to size. +# TYPE node_buddyinfo_blocks gauge +node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 +node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 +node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 +node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 +node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 +node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 +node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 +node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 +node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 +node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 +node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 +node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 +node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 +node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 +node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 +node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 +node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_context_switches_total Total number of context switches. +# TYPE node_context_switches_total counter +node_context_switches_total 3.8014093e+07 +# HELP node_cooling_device_cur_state Current throttle state of the cooling device +# TYPE node_cooling_device_cur_state gauge +node_cooling_device_cur_state{name="0",type="Processor"} 0 +# HELP node_cooling_device_max_state Maximum throttle state of the cooling device +# TYPE node_cooling_device_max_state gauge +node_cooling_device_max_state{name="0",type="Processor"} 3 +# HELP node_cpu_bug_info The `bugs` field of CPU information from /proc/cpuinfo. +# TYPE node_cpu_bug_info gauge +node_cpu_bug_info{bug="cpu_meltdown"} 1 +node_cpu_bug_info{bug="mds"} 1 +node_cpu_bug_info{bug="spectre_v1"} 1 +node_cpu_bug_info{bug="spectre_v2"} 1 +# HELP node_cpu_core_throttles_total Number of times this CPU core has been throttled. +# TYPE node_cpu_core_throttles_total counter +node_cpu_core_throttles_total{core="0",package="0"} 5 +node_cpu_core_throttles_total{core="0",package="1"} 0 +node_cpu_core_throttles_total{core="1",package="0"} 0 +node_cpu_core_throttles_total{core="1",package="1"} 9 +# HELP node_cpu_flag_info The `flags` field of CPU information from /proc/cpuinfo. +# TYPE node_cpu_flag_info gauge +node_cpu_flag_info{flag="aes"} 1 +node_cpu_flag_info{flag="avx"} 1 +node_cpu_flag_info{flag="avx2"} 1 +node_cpu_flag_info{flag="constant_tsc"} 1 +# HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. +# TYPE node_cpu_guest_seconds_total counter +node_cpu_guest_seconds_total{cpu="0",mode="nice"} 0.01 +node_cpu_guest_seconds_total{cpu="0",mode="user"} 0.02 +node_cpu_guest_seconds_total{cpu="1",mode="nice"} 0.02 +node_cpu_guest_seconds_total{cpu="1",mode="user"} 0.03 +node_cpu_guest_seconds_total{cpu="2",mode="nice"} 0.03 +node_cpu_guest_seconds_total{cpu="2",mode="user"} 0.04 +node_cpu_guest_seconds_total{cpu="3",mode="nice"} 0.04 +node_cpu_guest_seconds_total{cpu="3",mode="user"} 0.05 +node_cpu_guest_seconds_total{cpu="4",mode="nice"} 0.05 +node_cpu_guest_seconds_total{cpu="4",mode="user"} 0.06 +node_cpu_guest_seconds_total{cpu="5",mode="nice"} 0.06 +node_cpu_guest_seconds_total{cpu="5",mode="user"} 0.07 +node_cpu_guest_seconds_total{cpu="6",mode="nice"} 0.07 +node_cpu_guest_seconds_total{cpu="6",mode="user"} 0.08 +node_cpu_guest_seconds_total{cpu="7",mode="nice"} 0.08 +node_cpu_guest_seconds_total{cpu="7",mode="user"} 0.09 +# HELP node_cpu_info CPU information from /proc/cpuinfo. +# TYPE node_cpu_info gauge +node_cpu_info{cachesize="8192 KB",core="0",cpu="0",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="0",cpu="4",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="1",cpu="1",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="1",cpu="5",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="2",cpu="2",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="2",cpu="6",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="3",cpu="3",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="3",cpu="7",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +# HELP node_cpu_package_throttles_total Number of times this CPU package has been throttled. +# TYPE node_cpu_package_throttles_total counter +node_cpu_package_throttles_total{package="0"} 30 +node_cpu_package_throttles_total{package="1"} 6 +# HELP node_cpu_scaling_frequency_hertz Current scaled CPU thread frequency in hertz. +# TYPE node_cpu_scaling_frequency_hertz gauge +node_cpu_scaling_frequency_hertz{cpu="0"} 1.699981e+09 +node_cpu_scaling_frequency_hertz{cpu="1"} 1.699981e+09 +node_cpu_scaling_frequency_hertz{cpu="2"} 8e+06 +node_cpu_scaling_frequency_hertz{cpu="3"} 8e+06 +# HELP node_cpu_scaling_frequency_max_hertz Maximum scaled CPU thread frequency in hertz. +# TYPE node_cpu_scaling_frequency_max_hertz gauge +node_cpu_scaling_frequency_max_hertz{cpu="0"} 3.7e+09 +node_cpu_scaling_frequency_max_hertz{cpu="1"} 3.7e+09 +node_cpu_scaling_frequency_max_hertz{cpu="2"} 4.2e+09 +node_cpu_scaling_frequency_max_hertz{cpu="3"} 4.2e+09 +# HELP node_cpu_scaling_frequency_min_hertz Minimum scaled CPU thread frequency in hertz. +# TYPE node_cpu_scaling_frequency_min_hertz gauge +node_cpu_scaling_frequency_min_hertz{cpu="0"} 8e+08 +node_cpu_scaling_frequency_min_hertz{cpu="1"} 8e+08 +node_cpu_scaling_frequency_min_hertz{cpu="2"} 1e+06 +node_cpu_scaling_frequency_min_hertz{cpu="3"} 1e+06 +# HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. +# TYPE node_cpu_seconds_total counter +node_cpu_seconds_total{cpu="0",mode="idle"} 10870.69 +node_cpu_seconds_total{cpu="0",mode="iowait"} 2.2 +node_cpu_seconds_total{cpu="0",mode="irq"} 0.01 +node_cpu_seconds_total{cpu="0",mode="nice"} 0.19 +node_cpu_seconds_total{cpu="0",mode="softirq"} 34.1 +node_cpu_seconds_total{cpu="0",mode="steal"} 0 +node_cpu_seconds_total{cpu="0",mode="system"} 210.45 +node_cpu_seconds_total{cpu="0",mode="user"} 444.9 +node_cpu_seconds_total{cpu="1",mode="idle"} 11107.87 +node_cpu_seconds_total{cpu="1",mode="iowait"} 5.91 +node_cpu_seconds_total{cpu="1",mode="irq"} 0 +node_cpu_seconds_total{cpu="1",mode="nice"} 0.23 +node_cpu_seconds_total{cpu="1",mode="softirq"} 0.46 +node_cpu_seconds_total{cpu="1",mode="steal"} 0 +node_cpu_seconds_total{cpu="1",mode="system"} 164.74 +node_cpu_seconds_total{cpu="1",mode="user"} 478.69 +node_cpu_seconds_total{cpu="2",mode="idle"} 11123.21 +node_cpu_seconds_total{cpu="2",mode="iowait"} 4.41 +node_cpu_seconds_total{cpu="2",mode="irq"} 0 +node_cpu_seconds_total{cpu="2",mode="nice"} 0.36 +node_cpu_seconds_total{cpu="2",mode="softirq"} 3.26 +node_cpu_seconds_total{cpu="2",mode="steal"} 0 +node_cpu_seconds_total{cpu="2",mode="system"} 159.16 +node_cpu_seconds_total{cpu="2",mode="user"} 465.04 +node_cpu_seconds_total{cpu="3",mode="idle"} 11132.3 +node_cpu_seconds_total{cpu="3",mode="iowait"} 5.33 +node_cpu_seconds_total{cpu="3",mode="irq"} 0 +node_cpu_seconds_total{cpu="3",mode="nice"} 1.02 +node_cpu_seconds_total{cpu="3",mode="softirq"} 0.6 +node_cpu_seconds_total{cpu="3",mode="steal"} 0 +node_cpu_seconds_total{cpu="3",mode="system"} 156.83 +node_cpu_seconds_total{cpu="3",mode="user"} 470.54 +node_cpu_seconds_total{cpu="4",mode="idle"} 11403.21 +node_cpu_seconds_total{cpu="4",mode="iowait"} 2.17 +node_cpu_seconds_total{cpu="4",mode="irq"} 0 +node_cpu_seconds_total{cpu="4",mode="nice"} 0.25 +node_cpu_seconds_total{cpu="4",mode="softirq"} 0.08 +node_cpu_seconds_total{cpu="4",mode="steal"} 0 +node_cpu_seconds_total{cpu="4",mode="system"} 107.76 +node_cpu_seconds_total{cpu="4",mode="user"} 284.13 +node_cpu_seconds_total{cpu="5",mode="idle"} 11362.7 +node_cpu_seconds_total{cpu="5",mode="iowait"} 6.72 +node_cpu_seconds_total{cpu="5",mode="irq"} 0 +node_cpu_seconds_total{cpu="5",mode="nice"} 1.01 +node_cpu_seconds_total{cpu="5",mode="softirq"} 0.3 +node_cpu_seconds_total{cpu="5",mode="steal"} 0 +node_cpu_seconds_total{cpu="5",mode="system"} 115.86 +node_cpu_seconds_total{cpu="5",mode="user"} 292.71 +node_cpu_seconds_total{cpu="6",mode="idle"} 11397.21 +node_cpu_seconds_total{cpu="6",mode="iowait"} 3.19 +node_cpu_seconds_total{cpu="6",mode="irq"} 0 +node_cpu_seconds_total{cpu="6",mode="nice"} 0.36 +node_cpu_seconds_total{cpu="6",mode="softirq"} 0.29 +node_cpu_seconds_total{cpu="6",mode="steal"} 0 +node_cpu_seconds_total{cpu="6",mode="system"} 102.76 +node_cpu_seconds_total{cpu="6",mode="user"} 291.52 +node_cpu_seconds_total{cpu="7",mode="idle"} 11392.82 +node_cpu_seconds_total{cpu="7",mode="iowait"} 5.55 +node_cpu_seconds_total{cpu="7",mode="irq"} 0 +node_cpu_seconds_total{cpu="7",mode="nice"} 2.68 +node_cpu_seconds_total{cpu="7",mode="softirq"} 0.31 +node_cpu_seconds_total{cpu="7",mode="steal"} 0 +node_cpu_seconds_total{cpu="7",mode="system"} 101.64 +node_cpu_seconds_total{cpu="7",mode="user"} 290.98 +# HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. +# TYPE node_disk_discard_time_seconds_total counter +node_disk_discard_time_seconds_total{device="sdb"} 11.13 +# HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. +# TYPE node_disk_discarded_sectors_total counter +node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 +# HELP node_disk_discards_completed_total The total number of discards completed successfully. +# TYPE node_disk_discards_completed_total counter +node_disk_discards_completed_total{device="sdb"} 68851 +# HELP node_disk_discards_merged_total The total number of discards merged. +# TYPE node_disk_discards_merged_total counter +node_disk_discards_merged_total{device="sdb"} 0 +# HELP node_disk_io_now The number of I/Os currently in progress. +# TYPE node_disk_io_now gauge +node_disk_io_now{device="dm-0"} 0 +node_disk_io_now{device="dm-1"} 0 +node_disk_io_now{device="dm-2"} 0 +node_disk_io_now{device="dm-3"} 0 +node_disk_io_now{device="dm-4"} 0 +node_disk_io_now{device="dm-5"} 0 +node_disk_io_now{device="mmcblk0"} 0 +node_disk_io_now{device="mmcblk0p1"} 0 +node_disk_io_now{device="mmcblk0p2"} 0 +node_disk_io_now{device="nvme0n1"} 0 +node_disk_io_now{device="sda"} 0 +node_disk_io_now{device="sdb"} 0 +node_disk_io_now{device="sr0"} 0 +node_disk_io_now{device="vda"} 0 +# HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. +# TYPE node_disk_io_time_seconds_total counter +node_disk_io_time_seconds_total{device="dm-0"} 11325.968 +node_disk_io_time_seconds_total{device="dm-1"} 0.076 +node_disk_io_time_seconds_total{device="dm-2"} 65.4 +node_disk_io_time_seconds_total{device="dm-3"} 0.016 +node_disk_io_time_seconds_total{device="dm-4"} 0.024 +node_disk_io_time_seconds_total{device="dm-5"} 58.848 +node_disk_io_time_seconds_total{device="mmcblk0"} 0.136 +node_disk_io_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 +node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 +node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 +node_disk_io_time_seconds_total{device="sr0"} 0 +node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 +# HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. +# TYPE node_disk_io_time_weighted_seconds_total counter +node_disk_io_time_weighted_seconds_total{device="dm-0"} 1.206301256e+06 +node_disk_io_time_weighted_seconds_total{device="dm-1"} 0.084 +node_disk_io_time_weighted_seconds_total{device="dm-2"} 129.416 +node_disk_io_time_weighted_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_io_time_weighted_seconds_total{device="dm-4"} 0.044 +node_disk_io_time_weighted_seconds_total{device="dm-5"} 105.632 +node_disk_io_time_weighted_seconds_total{device="mmcblk0"} 0.156 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 +node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 +node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 +node_disk_io_time_weighted_seconds_total{device="sr0"} 0 +node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 +# HELP node_disk_read_bytes_total The total number of bytes read successfully. +# TYPE node_disk_read_bytes_total counter +node_disk_read_bytes_total{device="dm-0"} 5.13708655616e+11 +node_disk_read_bytes_total{device="dm-1"} 1.589248e+06 +node_disk_read_bytes_total{device="dm-2"} 1.578752e+08 +node_disk_read_bytes_total{device="dm-3"} 1.98144e+06 +node_disk_read_bytes_total{device="dm-4"} 529408 +node_disk_read_bytes_total{device="dm-5"} 4.3150848e+07 +node_disk_read_bytes_total{device="mmcblk0"} 798720 +node_disk_read_bytes_total{device="mmcblk0p1"} 81920 +node_disk_read_bytes_total{device="mmcblk0p2"} 389120 +node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 +node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 +node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 +node_disk_read_bytes_total{device="sr0"} 0 +node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 +# HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. +# TYPE node_disk_read_time_seconds_total counter +node_disk_read_time_seconds_total{device="dm-0"} 46229.572 +node_disk_read_time_seconds_total{device="dm-1"} 0.084 +node_disk_read_time_seconds_total{device="dm-2"} 6.5360000000000005 +node_disk_read_time_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_read_time_seconds_total{device="dm-4"} 0.028 +node_disk_read_time_seconds_total{device="dm-5"} 0.924 +node_disk_read_time_seconds_total{device="mmcblk0"} 0.156 +node_disk_read_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 +node_disk_read_time_seconds_total{device="sda"} 18492.372 +node_disk_read_time_seconds_total{device="sdb"} 0.084 +node_disk_read_time_seconds_total{device="sr0"} 0 +node_disk_read_time_seconds_total{device="vda"} 8655.768 +# HELP node_disk_reads_completed_total The total number of reads completed successfully. +# TYPE node_disk_reads_completed_total counter +node_disk_reads_completed_total{device="dm-0"} 5.9910002e+07 +node_disk_reads_completed_total{device="dm-1"} 388 +node_disk_reads_completed_total{device="dm-2"} 11571 +node_disk_reads_completed_total{device="dm-3"} 3870 +node_disk_reads_completed_total{device="dm-4"} 392 +node_disk_reads_completed_total{device="dm-5"} 3729 +node_disk_reads_completed_total{device="mmcblk0"} 192 +node_disk_reads_completed_total{device="mmcblk0p1"} 17 +node_disk_reads_completed_total{device="mmcblk0p2"} 95 +node_disk_reads_completed_total{device="nvme0n1"} 47114 +node_disk_reads_completed_total{device="sda"} 2.5354637e+07 +node_disk_reads_completed_total{device="sdb"} 326552 +node_disk_reads_completed_total{device="sr0"} 0 +node_disk_reads_completed_total{device="vda"} 1.775784e+06 +# HELP node_disk_reads_merged_total The total number of reads merged. +# TYPE node_disk_reads_merged_total counter +node_disk_reads_merged_total{device="dm-0"} 0 +node_disk_reads_merged_total{device="dm-1"} 0 +node_disk_reads_merged_total{device="dm-2"} 0 +node_disk_reads_merged_total{device="dm-3"} 0 +node_disk_reads_merged_total{device="dm-4"} 0 +node_disk_reads_merged_total{device="dm-5"} 0 +node_disk_reads_merged_total{device="mmcblk0"} 3 +node_disk_reads_merged_total{device="mmcblk0p1"} 3 +node_disk_reads_merged_total{device="mmcblk0p2"} 0 +node_disk_reads_merged_total{device="nvme0n1"} 4 +node_disk_reads_merged_total{device="sda"} 3.4367663e+07 +node_disk_reads_merged_total{device="sdb"} 841 +node_disk_reads_merged_total{device="sr0"} 0 +node_disk_reads_merged_total{device="vda"} 15386 +# HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. +# TYPE node_disk_write_time_seconds_total counter +node_disk_write_time_seconds_total{device="dm-0"} 1.1585578e+06 +node_disk_write_time_seconds_total{device="dm-1"} 0 +node_disk_write_time_seconds_total{device="dm-2"} 122.884 +node_disk_write_time_seconds_total{device="dm-3"} 0 +node_disk_write_time_seconds_total{device="dm-4"} 0.016 +node_disk_write_time_seconds_total{device="dm-5"} 104.684 +node_disk_write_time_seconds_total{device="mmcblk0"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p1"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 +node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 +node_disk_write_time_seconds_total{device="sda"} 63877.96 +node_disk_write_time_seconds_total{device="sdb"} 5.007 +node_disk_write_time_seconds_total{device="sr0"} 0 +node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 +# HELP node_disk_writes_completed_total The total number of writes completed successfully. +# TYPE node_disk_writes_completed_total counter +node_disk_writes_completed_total{device="dm-0"} 3.9231014e+07 +node_disk_writes_completed_total{device="dm-1"} 74 +node_disk_writes_completed_total{device="dm-2"} 153522 +node_disk_writes_completed_total{device="dm-3"} 0 +node_disk_writes_completed_total{device="dm-4"} 38 +node_disk_writes_completed_total{device="dm-5"} 98918 +node_disk_writes_completed_total{device="mmcblk0"} 0 +node_disk_writes_completed_total{device="mmcblk0p1"} 0 +node_disk_writes_completed_total{device="mmcblk0p2"} 0 +node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 +node_disk_writes_completed_total{device="sda"} 2.8444756e+07 +node_disk_writes_completed_total{device="sdb"} 41822 +node_disk_writes_completed_total{device="sr0"} 0 +node_disk_writes_completed_total{device="vda"} 6.038856e+06 +# HELP node_disk_writes_merged_total The number of writes merged. +# TYPE node_disk_writes_merged_total counter +node_disk_writes_merged_total{device="dm-0"} 0 +node_disk_writes_merged_total{device="dm-1"} 0 +node_disk_writes_merged_total{device="dm-2"} 0 +node_disk_writes_merged_total{device="dm-3"} 0 +node_disk_writes_merged_total{device="dm-4"} 0 +node_disk_writes_merged_total{device="dm-5"} 0 +node_disk_writes_merged_total{device="mmcblk0"} 0 +node_disk_writes_merged_total{device="mmcblk0p1"} 0 +node_disk_writes_merged_total{device="mmcblk0p2"} 0 +node_disk_writes_merged_total{device="nvme0n1"} 43950 +node_disk_writes_merged_total{device="sda"} 1.1134226e+07 +node_disk_writes_merged_total{device="sdb"} 2895 +node_disk_writes_merged_total{device="sr0"} 0 +node_disk_writes_merged_total{device="vda"} 2.0711856e+07 +# HELP node_disk_written_bytes_total The total number of bytes written successfully. +# TYPE node_disk_written_bytes_total counter +node_disk_written_bytes_total{device="dm-0"} 2.5891680256e+11 +node_disk_written_bytes_total{device="dm-1"} 303104 +node_disk_written_bytes_total{device="dm-2"} 2.607828992e+09 +node_disk_written_bytes_total{device="dm-3"} 0 +node_disk_written_bytes_total{device="dm-4"} 70144 +node_disk_written_bytes_total{device="dm-5"} 5.89664256e+08 +node_disk_written_bytes_total{device="mmcblk0"} 0 +node_disk_written_bytes_total{device="mmcblk0p1"} 0 +node_disk_written_bytes_total{device="mmcblk0p2"} 0 +node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 +node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 +node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 +node_disk_written_bytes_total{device="sr0"} 0 +node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 +# HELP node_dmi_info A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, product_sku, product_uuid, product_version, system_vendor if provided by DMI. +# TYPE node_dmi_info gauge +node_dmi_info{bios_date="04/12/2021",bios_release="2.2",bios_vendor="Dell Inc.",bios_version="2.2.4",board_name="07PXPY",board_serial=".7N62AI2.GRTCL6944100GP.",board_vendor="Dell Inc.",board_version="A01",chassis_asset_tag="",chassis_serial="7N62AI2",chassis_vendor="Dell Inc.",chassis_version="",product_family="PowerEdge",product_name="PowerEdge R6515",product_serial="7N62AI2",product_sku="SKU=NotProvided;ModelName=PowerEdge R6515",product_uuid="83340ca8-cb49-4474-8c29-d2088ca84dd9",product_version="",system_vendor="Dell Inc."} 1 +# HELP node_drbd_activitylog_writes_total Number of updates of the activity log area of the meta data. +# TYPE node_drbd_activitylog_writes_total counter +node_drbd_activitylog_writes_total{device="drbd1"} 1100 +# HELP node_drbd_application_pending Number of block I/O requests forwarded to DRBD, but not yet answered by DRBD. +# TYPE node_drbd_application_pending gauge +node_drbd_application_pending{device="drbd1"} 12348 +# HELP node_drbd_bitmap_writes_total Number of updates of the bitmap area of the meta data. +# TYPE node_drbd_bitmap_writes_total counter +node_drbd_bitmap_writes_total{device="drbd1"} 221 +# HELP node_drbd_connected Whether DRBD is connected to the peer. +# TYPE node_drbd_connected gauge +node_drbd_connected{device="drbd1"} 1 +# HELP node_drbd_disk_read_bytes_total Net data read from local hard disk; in bytes. +# TYPE node_drbd_disk_read_bytes_total counter +node_drbd_disk_read_bytes_total{device="drbd1"} 1.2154539008e+11 +# HELP node_drbd_disk_state_is_up_to_date Whether the disk of the node is up to date. +# TYPE node_drbd_disk_state_is_up_to_date gauge +node_drbd_disk_state_is_up_to_date{device="drbd1",node="local"} 1 +node_drbd_disk_state_is_up_to_date{device="drbd1",node="remote"} 1 +# HELP node_drbd_disk_written_bytes_total Net data written on local hard disk; in bytes. +# TYPE node_drbd_disk_written_bytes_total counter +node_drbd_disk_written_bytes_total{device="drbd1"} 2.8941845504e+10 +# HELP node_drbd_epochs Number of Epochs currently on the fly. +# TYPE node_drbd_epochs gauge +node_drbd_epochs{device="drbd1"} 1 +# HELP node_drbd_local_pending Number of open requests to the local I/O sub-system. +# TYPE node_drbd_local_pending gauge +node_drbd_local_pending{device="drbd1"} 12345 +# HELP node_drbd_network_received_bytes_total Total number of bytes received via the network. +# TYPE node_drbd_network_received_bytes_total counter +node_drbd_network_received_bytes_total{device="drbd1"} 1.0961011e+07 +# HELP node_drbd_network_sent_bytes_total Total number of bytes sent via the network. +# TYPE node_drbd_network_sent_bytes_total counter +node_drbd_network_sent_bytes_total{device="drbd1"} 1.7740228608e+10 +# HELP node_drbd_node_role_is_primary Whether the role of the node is in the primary state. +# TYPE node_drbd_node_role_is_primary gauge +node_drbd_node_role_is_primary{device="drbd1",node="local"} 1 +node_drbd_node_role_is_primary{device="drbd1",node="remote"} 1 +# HELP node_drbd_out_of_sync_bytes Amount of data known to be out of sync; in bytes. +# TYPE node_drbd_out_of_sync_bytes gauge +node_drbd_out_of_sync_bytes{device="drbd1"} 1.2645376e+07 +# HELP node_drbd_remote_pending Number of requests sent to the peer, but that have not yet been answered by the latter. +# TYPE node_drbd_remote_pending gauge +node_drbd_remote_pending{device="drbd1"} 12346 +# HELP node_drbd_remote_unacknowledged Number of requests received by the peer via the network connection, but that have not yet been answered. +# TYPE node_drbd_remote_unacknowledged gauge +node_drbd_remote_unacknowledged{device="drbd1"} 12347 +# HELP node_edac_correctable_errors_total Total correctable memory errors. +# TYPE node_edac_correctable_errors_total counter +node_edac_correctable_errors_total{controller="0"} 1 +# HELP node_edac_csrow_correctable_errors_total Total correctable memory errors for this csrow. +# TYPE node_edac_csrow_correctable_errors_total counter +node_edac_csrow_correctable_errors_total{controller="0",csrow="0"} 3 +node_edac_csrow_correctable_errors_total{controller="0",csrow="unknown"} 2 +# HELP node_edac_csrow_uncorrectable_errors_total Total uncorrectable memory errors for this csrow. +# TYPE node_edac_csrow_uncorrectable_errors_total counter +node_edac_csrow_uncorrectable_errors_total{controller="0",csrow="0"} 4 +node_edac_csrow_uncorrectable_errors_total{controller="0",csrow="unknown"} 6 +# HELP node_edac_uncorrectable_errors_total Total uncorrectable memory errors. +# TYPE node_edac_uncorrectable_errors_total counter +node_edac_uncorrectable_errors_total{controller="0"} 5 +# HELP node_entropy_available_bits Bits of available entropy. +# TYPE node_entropy_available_bits gauge +node_entropy_available_bits 1337 +# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which node_exporter was built. +# TYPE node_exporter_build_info gauge +# HELP node_filefd_allocated File descriptor statistics: allocated. +# TYPE node_filefd_allocated gauge +node_filefd_allocated 1024 +# HELP node_filefd_maximum File descriptor statistics: maximum. +# TYPE node_filefd_maximum gauge +node_filefd_maximum 1.631329e+06 +# HELP node_forks_total Total number of forks. +# TYPE node_forks_total counter +node_forks_total 26442 +# HELP node_hwmon_chip_names Annotation metric for human-readable chip names +# TYPE node_hwmon_chip_names gauge +node_hwmon_chip_names{chip="nct6779",chip_name="nct6779"} 1 +node_hwmon_chip_names{chip="platform_coretemp_0",chip_name="coretemp"} 1 +node_hwmon_chip_names{chip="platform_coretemp_1",chip_name="coretemp"} 1 +# HELP node_hwmon_fan_alarm Hardware sensor alarm status (fan) +# TYPE node_hwmon_fan_alarm gauge +node_hwmon_fan_alarm{chip="nct6779",sensor="fan2"} 0 +# HELP node_hwmon_fan_beep_enabled Hardware monitor sensor has beeping enabled +# TYPE node_hwmon_fan_beep_enabled gauge +node_hwmon_fan_beep_enabled{chip="nct6779",sensor="fan2"} 0 +# HELP node_hwmon_fan_manual Hardware monitor fan element manual +# TYPE node_hwmon_fan_manual gauge +node_hwmon_fan_manual{chip="platform_applesmc_768",sensor="fan1"} 0 +node_hwmon_fan_manual{chip="platform_applesmc_768",sensor="fan2"} 0 +# HELP node_hwmon_fan_max_rpm Hardware monitor for fan revolutions per minute (max) +# TYPE node_hwmon_fan_max_rpm gauge +node_hwmon_fan_max_rpm{chip="platform_applesmc_768",sensor="fan1"} 6156 +node_hwmon_fan_max_rpm{chip="platform_applesmc_768",sensor="fan2"} 5700 +# HELP node_hwmon_fan_min_rpm Hardware monitor for fan revolutions per minute (min) +# TYPE node_hwmon_fan_min_rpm gauge +node_hwmon_fan_min_rpm{chip="nct6779",sensor="fan2"} 0 +node_hwmon_fan_min_rpm{chip="platform_applesmc_768",sensor="fan1"} 2160 +node_hwmon_fan_min_rpm{chip="platform_applesmc_768",sensor="fan2"} 2000 +# HELP node_hwmon_fan_output Hardware monitor fan element output +# TYPE node_hwmon_fan_output gauge +node_hwmon_fan_output{chip="platform_applesmc_768",sensor="fan1"} 2160 +node_hwmon_fan_output{chip="platform_applesmc_768",sensor="fan2"} 2000 +# HELP node_hwmon_fan_pulses Hardware monitor fan element pulses +# TYPE node_hwmon_fan_pulses gauge +node_hwmon_fan_pulses{chip="nct6779",sensor="fan2"} 2 +# HELP node_hwmon_fan_rpm Hardware monitor for fan revolutions per minute (input) +# TYPE node_hwmon_fan_rpm gauge +node_hwmon_fan_rpm{chip="nct6779",sensor="fan2"} 1098 +node_hwmon_fan_rpm{chip="platform_applesmc_768",sensor="fan1"} 0 +node_hwmon_fan_rpm{chip="platform_applesmc_768",sensor="fan2"} 1998 +# HELP node_hwmon_fan_target_rpm Hardware monitor for fan revolutions per minute (target) +# TYPE node_hwmon_fan_target_rpm gauge +node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000 +# HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance +# TYPE node_hwmon_fan_tolerance gauge +node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0 +# HELP node_hwmon_in_alarm Hardware sensor alarm status (in) +# TYPE node_hwmon_in_alarm gauge +node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0 +node_hwmon_in_alarm{chip="nct6779",sensor="in1"} 1 +# HELP node_hwmon_in_beep_enabled Hardware monitor sensor has beeping enabled +# TYPE node_hwmon_in_beep_enabled gauge +node_hwmon_in_beep_enabled{chip="nct6779",sensor="in0"} 0 +node_hwmon_in_beep_enabled{chip="nct6779",sensor="in1"} 0 +# HELP node_hwmon_in_max_volts Hardware monitor for voltage (max) +# TYPE node_hwmon_in_max_volts gauge +node_hwmon_in_max_volts{chip="nct6779",sensor="in0"} 1.744 +node_hwmon_in_max_volts{chip="nct6779",sensor="in1"} 0 +# HELP node_hwmon_in_min_volts Hardware monitor for voltage (min) +# TYPE node_hwmon_in_min_volts gauge +node_hwmon_in_min_volts{chip="nct6779",sensor="in0"} 0 +node_hwmon_in_min_volts{chip="nct6779",sensor="in1"} 0 +# HELP node_hwmon_in_volts Hardware monitor for voltage (input) +# TYPE node_hwmon_in_volts gauge +node_hwmon_in_volts{chip="nct6779",sensor="in0"} 0.792 +node_hwmon_in_volts{chip="nct6779",sensor="in1"} 1.024 +# HELP node_hwmon_intrusion_alarm Hardware sensor alarm status (intrusion) +# TYPE node_hwmon_intrusion_alarm gauge +node_hwmon_intrusion_alarm{chip="nct6779",sensor="intrusion0"} 1 +node_hwmon_intrusion_alarm{chip="nct6779",sensor="intrusion1"} 1 +# HELP node_hwmon_intrusion_beep_enabled Hardware monitor sensor has beeping enabled +# TYPE node_hwmon_intrusion_beep_enabled gauge +node_hwmon_intrusion_beep_enabled{chip="nct6779",sensor="intrusion0"} 0 +node_hwmon_intrusion_beep_enabled{chip="nct6779",sensor="intrusion1"} 0 +# HELP node_hwmon_pwm_auto_point1_pwm Hardware monitor pwm element auto_point1_pwm +# TYPE node_hwmon_pwm_auto_point1_pwm gauge +node_hwmon_pwm_auto_point1_pwm{chip="nct6779",sensor="pwm1"} 153 +# HELP node_hwmon_pwm_auto_point1_temp Hardware monitor pwm element auto_point1_temp +# TYPE node_hwmon_pwm_auto_point1_temp gauge +node_hwmon_pwm_auto_point1_temp{chip="nct6779",sensor="pwm1"} 30000 +# HELP node_hwmon_pwm_auto_point2_pwm Hardware monitor pwm element auto_point2_pwm +# TYPE node_hwmon_pwm_auto_point2_pwm gauge +node_hwmon_pwm_auto_point2_pwm{chip="nct6779",sensor="pwm1"} 255 +# HELP node_hwmon_pwm_auto_point2_temp Hardware monitor pwm element auto_point2_temp +# TYPE node_hwmon_pwm_auto_point2_temp gauge +node_hwmon_pwm_auto_point2_temp{chip="nct6779",sensor="pwm1"} 70000 +# HELP node_hwmon_pwm_auto_point3_pwm Hardware monitor pwm element auto_point3_pwm +# TYPE node_hwmon_pwm_auto_point3_pwm gauge +node_hwmon_pwm_auto_point3_pwm{chip="nct6779",sensor="pwm1"} 255 +# HELP node_hwmon_pwm_auto_point3_temp Hardware monitor pwm element auto_point3_temp +# TYPE node_hwmon_pwm_auto_point3_temp gauge +node_hwmon_pwm_auto_point3_temp{chip="nct6779",sensor="pwm1"} 70000 +# HELP node_hwmon_pwm_auto_point4_pwm Hardware monitor pwm element auto_point4_pwm +# TYPE node_hwmon_pwm_auto_point4_pwm gauge +node_hwmon_pwm_auto_point4_pwm{chip="nct6779",sensor="pwm1"} 255 +# HELP node_hwmon_pwm_auto_point4_temp Hardware monitor pwm element auto_point4_temp +# TYPE node_hwmon_pwm_auto_point4_temp gauge +node_hwmon_pwm_auto_point4_temp{chip="nct6779",sensor="pwm1"} 70000 +# HELP node_hwmon_pwm_auto_point5_pwm Hardware monitor pwm element auto_point5_pwm +# TYPE node_hwmon_pwm_auto_point5_pwm gauge +node_hwmon_pwm_auto_point5_pwm{chip="nct6779",sensor="pwm1"} 255 +# HELP node_hwmon_pwm_auto_point5_temp Hardware monitor pwm element auto_point5_temp +# TYPE node_hwmon_pwm_auto_point5_temp gauge +node_hwmon_pwm_auto_point5_temp{chip="nct6779",sensor="pwm1"} 75000 +# HELP node_hwmon_pwm_crit_temp_tolerance Hardware monitor pwm element crit_temp_tolerance +# TYPE node_hwmon_pwm_crit_temp_tolerance gauge +node_hwmon_pwm_crit_temp_tolerance{chip="nct6779",sensor="pwm1"} 2000 +# HELP node_hwmon_pwm_enable Hardware monitor pwm element enable +# TYPE node_hwmon_pwm_enable gauge +node_hwmon_pwm_enable{chip="nct6779",sensor="pwm1"} 5 +# HELP node_hwmon_pwm_floor Hardware monitor pwm element floor +# TYPE node_hwmon_pwm_floor gauge +node_hwmon_pwm_floor{chip="nct6779",sensor="pwm1"} 1 +# HELP node_hwmon_pwm_mode Hardware monitor pwm element mode +# TYPE node_hwmon_pwm_mode gauge +node_hwmon_pwm_mode{chip="nct6779",sensor="pwm1"} 1 +# HELP node_hwmon_pwm_start Hardware monitor pwm element start +# TYPE node_hwmon_pwm_start gauge +node_hwmon_pwm_start{chip="nct6779",sensor="pwm1"} 1 +# HELP node_hwmon_pwm_step_down_time Hardware monitor pwm element step_down_time +# TYPE node_hwmon_pwm_step_down_time gauge +node_hwmon_pwm_step_down_time{chip="nct6779",sensor="pwm1"} 100 +# HELP node_hwmon_pwm_step_up_time Hardware monitor pwm element step_up_time +# TYPE node_hwmon_pwm_step_up_time gauge +node_hwmon_pwm_step_up_time{chip="nct6779",sensor="pwm1"} 100 +# HELP node_hwmon_pwm_stop_time Hardware monitor pwm element stop_time +# TYPE node_hwmon_pwm_stop_time gauge +node_hwmon_pwm_stop_time{chip="nct6779",sensor="pwm1"} 6000 +# HELP node_hwmon_pwm_target_temp Hardware monitor pwm element target_temp +# TYPE node_hwmon_pwm_target_temp gauge +node_hwmon_pwm_target_temp{chip="nct6779",sensor="pwm1"} 0 +# HELP node_hwmon_pwm_temp_sel Hardware monitor pwm element temp_sel +# TYPE node_hwmon_pwm_temp_sel gauge +node_hwmon_pwm_temp_sel{chip="nct6779",sensor="pwm1"} 7 +# HELP node_hwmon_pwm_temp_tolerance Hardware monitor pwm element temp_tolerance +# TYPE node_hwmon_pwm_temp_tolerance gauge +node_hwmon_pwm_temp_tolerance{chip="nct6779",sensor="pwm1"} 0 +# HELP node_hwmon_pwm_weight_duty_base Hardware monitor pwm element weight_duty_base +# TYPE node_hwmon_pwm_weight_duty_base gauge +node_hwmon_pwm_weight_duty_base{chip="nct6779",sensor="pwm1"} 0 +# HELP node_hwmon_pwm_weight_duty_step Hardware monitor pwm element weight_duty_step +# TYPE node_hwmon_pwm_weight_duty_step gauge +node_hwmon_pwm_weight_duty_step{chip="nct6779",sensor="pwm1"} 0 +# HELP node_hwmon_pwm_weight_temp_sel Hardware monitor pwm element weight_temp_sel +# TYPE node_hwmon_pwm_weight_temp_sel gauge +node_hwmon_pwm_weight_temp_sel{chip="nct6779",sensor="pwm1"} 1 +# HELP node_hwmon_pwm_weight_temp_step Hardware monitor pwm element weight_temp_step +# TYPE node_hwmon_pwm_weight_temp_step gauge +node_hwmon_pwm_weight_temp_step{chip="nct6779",sensor="pwm1"} 0 +# HELP node_hwmon_pwm_weight_temp_step_base Hardware monitor pwm element weight_temp_step_base +# TYPE node_hwmon_pwm_weight_temp_step_base gauge +node_hwmon_pwm_weight_temp_step_base{chip="nct6779",sensor="pwm1"} 0 +# HELP node_hwmon_pwm_weight_temp_step_tol Hardware monitor pwm element weight_temp_step_tol +# TYPE node_hwmon_pwm_weight_temp_step_tol gauge +node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0 +# HELP node_hwmon_sensor_label Label for given chip and sensor +# TYPE node_hwmon_sensor_label gauge +node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1 +node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1 +node_hwmon_sensor_label{chip="platform_applesmc_768",label="left_side",sensor="fan1"} 1 +node_hwmon_sensor_label{chip="platform_applesmc_768",label="right_side",sensor="fan2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_0",sensor="temp2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_1",sensor="temp3"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_2",sensor="temp4"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="core_3",sensor="temp5"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_0",label="physical_id_0",sensor="temp1"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_0",sensor="temp2"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_1",sensor="temp3"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_2",sensor="temp4"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="core_3",sensor="temp5"} 1 +node_hwmon_sensor_label{chip="platform_coretemp_1",label="physical_id_0",sensor="temp1"} 1 +# HELP node_hwmon_temp_celsius Hardware monitor for temperature (input) +# TYPE node_hwmon_temp_celsius gauge +node_hwmon_temp_celsius{chip="hwmon4",sensor="temp1"} 55 +node_hwmon_temp_celsius{chip="hwmon4",sensor="temp2"} 54 +node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp1"} 55 +node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp2"} 54 +node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp3"} 52 +node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp4"} 53 +node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp5"} 50 +node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp1"} 55 +node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp2"} 54 +node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp3"} 52 +node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp4"} 53 +node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp5"} 50 +# HELP node_hwmon_temp_crit_alarm_celsius Hardware monitor for temperature (crit_alarm) +# TYPE node_hwmon_temp_crit_alarm_celsius gauge +node_hwmon_temp_crit_alarm_celsius{chip="hwmon4",sensor="temp1"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="hwmon4",sensor="temp2"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp1"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp2"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp3"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp4"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp5"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp1"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp2"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp3"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp4"} 0 +node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp5"} 0 +# HELP node_hwmon_temp_crit_celsius Hardware monitor for temperature (crit) +# TYPE node_hwmon_temp_crit_celsius gauge +node_hwmon_temp_crit_celsius{chip="hwmon4",sensor="temp1"} 100 +node_hwmon_temp_crit_celsius{chip="hwmon4",sensor="temp2"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp1"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp2"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp3"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp4"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp5"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp1"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp2"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp3"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp4"} 100 +node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp5"} 100 +# HELP node_hwmon_temp_max_celsius Hardware monitor for temperature (max) +# TYPE node_hwmon_temp_max_celsius gauge +node_hwmon_temp_max_celsius{chip="hwmon4",sensor="temp1"} 100 +node_hwmon_temp_max_celsius{chip="hwmon4",sensor="temp2"} 100 +node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp1"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp2"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp3"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp4"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp5"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp1"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp2"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp3"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp4"} 84 +node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp5"} 84 +# HELP node_infiniband_info Non-numeric data from /sys/class/infiniband/, value is always 1. +# TYPE node_infiniband_info gauge +node_infiniband_info{board_id="I40IW Board ID",device="i40iw0",firmware_version="0.2",hca_type="I40IW"} 1 +node_infiniband_info{board_id="SM_1141000001000",device="mlx4_0",firmware_version="2.31.5050",hca_type="MT4099"} 1 +# HELP node_infiniband_legacy_data_received_bytes_total Number of data octets received on all links +# TYPE node_infiniband_legacy_data_received_bytes_total counter +node_infiniband_legacy_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 +node_infiniband_legacy_data_received_bytes_total{device="mlx4_0",port="2"} 1.8527668e+07 +# HELP node_infiniband_legacy_data_transmitted_bytes_total Number of data octets transmitted on all links +# TYPE node_infiniband_legacy_data_transmitted_bytes_total counter +node_infiniband_legacy_data_transmitted_bytes_total{device="mlx4_0",port="1"} 1.493376e+07 +node_infiniband_legacy_data_transmitted_bytes_total{device="mlx4_0",port="2"} 1.493376e+07 +# HELP node_infiniband_legacy_multicast_packets_received_total Number of multicast packets received +# TYPE node_infiniband_legacy_multicast_packets_received_total counter +node_infiniband_legacy_multicast_packets_received_total{device="mlx4_0",port="1"} 93 +node_infiniband_legacy_multicast_packets_received_total{device="mlx4_0",port="2"} 93 +# HELP node_infiniband_legacy_multicast_packets_transmitted_total Number of multicast packets transmitted +# TYPE node_infiniband_legacy_multicast_packets_transmitted_total counter +node_infiniband_legacy_multicast_packets_transmitted_total{device="mlx4_0",port="1"} 16 +node_infiniband_legacy_multicast_packets_transmitted_total{device="mlx4_0",port="2"} 16 +# HELP node_infiniband_legacy_packets_received_total Number of data packets received on all links +# TYPE node_infiniband_legacy_packets_received_total counter +node_infiniband_legacy_packets_received_total{device="mlx4_0",port="1"} 0 +node_infiniband_legacy_packets_received_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_legacy_packets_transmitted_total Number of data packets received on all links +# TYPE node_infiniband_legacy_packets_transmitted_total counter +node_infiniband_legacy_packets_transmitted_total{device="mlx4_0",port="1"} 0 +node_infiniband_legacy_packets_transmitted_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_legacy_unicast_packets_received_total Number of unicast packets received +# TYPE node_infiniband_legacy_unicast_packets_received_total counter +node_infiniband_legacy_unicast_packets_received_total{device="mlx4_0",port="1"} 61148 +node_infiniband_legacy_unicast_packets_received_total{device="mlx4_0",port="2"} 61148 +# HELP node_infiniband_legacy_unicast_packets_transmitted_total Number of unicast packets transmitted +# TYPE node_infiniband_legacy_unicast_packets_transmitted_total counter +node_infiniband_legacy_unicast_packets_transmitted_total{device="mlx4_0",port="1"} 61239 +node_infiniband_legacy_unicast_packets_transmitted_total{device="mlx4_0",port="2"} 61239 +# HELP node_infiniband_link_downed_total Number of times the link failed to recover from an error state and went down +# TYPE node_infiniband_link_downed_total counter +node_infiniband_link_downed_total{device="mlx4_0",port="1"} 0 +node_infiniband_link_downed_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_link_error_recovery_total Number of times the link successfully recovered from an error state +# TYPE node_infiniband_link_error_recovery_total counter +node_infiniband_link_error_recovery_total{device="mlx4_0",port="1"} 0 +node_infiniband_link_error_recovery_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_multicast_packets_received_total Number of multicast packets received (including errors) +# TYPE node_infiniband_multicast_packets_received_total counter +node_infiniband_multicast_packets_received_total{device="mlx4_0",port="1"} 93 +node_infiniband_multicast_packets_received_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_multicast_packets_transmitted_total Number of multicast packets transmitted (including errors) +# TYPE node_infiniband_multicast_packets_transmitted_total counter +node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="1"} 16 +node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_physical_state_id Physical state of the InfiniBand port (0: no change, 1: sleep, 2: polling, 3: disable, 4: shift, 5: link up, 6: link error recover, 7: phytest) +# TYPE node_infiniband_physical_state_id gauge +node_infiniband_physical_state_id{device="i40iw0",port="1"} 5 +node_infiniband_physical_state_id{device="mlx4_0",port="1"} 5 +node_infiniband_physical_state_id{device="mlx4_0",port="2"} 5 +# HELP node_infiniband_port_constraint_errors_received_total Number of packets received on the switch physical port that are discarded +# TYPE node_infiniband_port_constraint_errors_received_total counter +node_infiniband_port_constraint_errors_received_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_constraint_errors_transmitted_total Number of packets not transmitted from the switch physical port +# TYPE node_infiniband_port_constraint_errors_transmitted_total counter +node_infiniband_port_constraint_errors_transmitted_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_data_received_bytes_total Number of data octets received on all links +# TYPE node_infiniband_port_data_received_bytes_total counter +node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 +node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_port_data_transmitted_bytes_total Number of data octets transmitted on all links +# TYPE node_infiniband_port_data_transmitted_bytes_total counter +node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="1"} 1.493376e+07 +node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_port_discards_received_total Number of inbound packets discarded by the port because the port is down or congested +# TYPE node_infiniband_port_discards_received_total counter +node_infiniband_port_discards_received_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_discards_transmitted_total Number of outbound packets discarded by the port because the port is down or congested +# TYPE node_infiniband_port_discards_transmitted_total counter +node_infiniband_port_discards_transmitted_total{device="mlx4_0",port="1"} 5 +# HELP node_infiniband_port_errors_received_total Number of packets containing an error that were received on this port +# TYPE node_infiniband_port_errors_received_total counter +node_infiniband_port_errors_received_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_packets_received_total Number of packets received on all VLs by this port (including errors) +# TYPE node_infiniband_port_packets_received_total counter +node_infiniband_port_packets_received_total{device="mlx4_0",port="1"} 6.825908347e+09 +# HELP node_infiniband_port_packets_transmitted_total Number of packets transmitted on all VLs from this port (including errors) +# TYPE node_infiniband_port_packets_transmitted_total counter +node_infiniband_port_packets_transmitted_total{device="mlx4_0",port="1"} 6.235865e+06 +# HELP node_infiniband_port_transmit_wait_total Number of ticks during which the port had data to transmit but no data was sent during the entire tick +# TYPE node_infiniband_port_transmit_wait_total counter +node_infiniband_port_transmit_wait_total{device="mlx4_0",port="1"} 4.294967295e+09 +# HELP node_infiniband_rate_bytes_per_second Maximum signal transfer rate +# TYPE node_infiniband_rate_bytes_per_second gauge +node_infiniband_rate_bytes_per_second{device="i40iw0",port="1"} 1.25e+09 +node_infiniband_rate_bytes_per_second{device="mlx4_0",port="1"} 5e+09 +node_infiniband_rate_bytes_per_second{device="mlx4_0",port="2"} 5e+09 +# HELP node_infiniband_state_id State of the InfiniBand port (0: no change, 1: down, 2: init, 3: armed, 4: active, 5: act defer) +# TYPE node_infiniband_state_id gauge +node_infiniband_state_id{device="i40iw0",port="1"} 4 +node_infiniband_state_id{device="mlx4_0",port="1"} 4 +node_infiniband_state_id{device="mlx4_0",port="2"} 4 +# HELP node_infiniband_unicast_packets_received_total Number of unicast packets received (including errors) +# TYPE node_infiniband_unicast_packets_received_total counter +node_infiniband_unicast_packets_received_total{device="mlx4_0",port="1"} 61148 +node_infiniband_unicast_packets_received_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_unicast_packets_transmitted_total Number of unicast packets transmitted (including errors) +# TYPE node_infiniband_unicast_packets_transmitted_total counter +node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="1"} 61239 +node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 +# HELP node_interrupts_total Interrupt details. +# TYPE node_interrupts_total counter +node_interrupts_total{cpu="0",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="0",devices="",info="Function call interrupts",type="CAL"} 148554 +node_interrupts_total{cpu="0",devices="",info="IRQ work interrupts",type="IWI"} 1.509379e+06 +node_interrupts_total{cpu="0",devices="",info="Local timer interrupts",type="LOC"} 1.74326351e+08 +node_interrupts_total{cpu="0",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="0",devices="",info="Machine check polls",type="MCP"} 2406 +node_interrupts_total{cpu="0",devices="",info="Non-maskable interrupts",type="NMI"} 47 +node_interrupts_total{cpu="0",devices="",info="Performance monitoring interrupts",type="PMI"} 47 +node_interrupts_total{cpu="0",devices="",info="Rescheduling interrupts",type="RES"} 1.0847134e+07 +node_interrupts_total{cpu="0",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="0",devices="",info="TLB shootdowns",type="TLB"} 1.0460334e+07 +node_interrupts_total{cpu="0",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="0",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="0",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 398553 +node_interrupts_total{cpu="0",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.434032e+06 +node_interrupts_total{cpu="0",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="0",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="0",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 328511 +node_interrupts_total{cpu="0",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.451445e+06 +node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="1"} 17960 +node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="12"} 380847 +node_interrupts_total{cpu="0",devices="i915",info="IR-PCI-MSI-edge",type="44"} 140636 +node_interrupts_total{cpu="0",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 4.3078464e+07 +node_interrupts_total{cpu="0",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 4 +node_interrupts_total{cpu="0",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 1 +node_interrupts_total{cpu="0",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 350 +node_interrupts_total{cpu="0",devices="timer",info="IR-IO-APIC-edge",type="0"} 18 +node_interrupts_total{cpu="0",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 378324 +node_interrupts_total{cpu="1",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="1",devices="",info="Function call interrupts",type="CAL"} 157441 +node_interrupts_total{cpu="1",devices="",info="IRQ work interrupts",type="IWI"} 2.411776e+06 +node_interrupts_total{cpu="1",devices="",info="Local timer interrupts",type="LOC"} 1.35776678e+08 +node_interrupts_total{cpu="1",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="1",devices="",info="Machine check polls",type="MCP"} 2399 +node_interrupts_total{cpu="1",devices="",info="Non-maskable interrupts",type="NMI"} 5031 +node_interrupts_total{cpu="1",devices="",info="Performance monitoring interrupts",type="PMI"} 5031 +node_interrupts_total{cpu="1",devices="",info="Rescheduling interrupts",type="RES"} 9.111507e+06 +node_interrupts_total{cpu="1",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="1",devices="",info="TLB shootdowns",type="TLB"} 9.918429e+06 +node_interrupts_total{cpu="1",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="1",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="1",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 2320 +node_interrupts_total{cpu="1",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 8.092205e+06 +node_interrupts_total{cpu="1",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="1",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="1",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 322879 +node_interrupts_total{cpu="1",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 3.333499e+06 +node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="1"} 105 +node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="12"} 1021 +node_interrupts_total{cpu="1",devices="i915",info="IR-PCI-MSI-edge",type="44"} 226313 +node_interrupts_total{cpu="1",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 130 +node_interrupts_total{cpu="1",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 22 +node_interrupts_total{cpu="1",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 +node_interrupts_total{cpu="1",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 224 +node_interrupts_total{cpu="1",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 +node_interrupts_total{cpu="1",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 1.734637e+06 +node_interrupts_total{cpu="2",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="2",devices="",info="Function call interrupts",type="CAL"} 142912 +node_interrupts_total{cpu="2",devices="",info="IRQ work interrupts",type="IWI"} 1.512975e+06 +node_interrupts_total{cpu="2",devices="",info="Local timer interrupts",type="LOC"} 1.68393257e+08 +node_interrupts_total{cpu="2",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="2",devices="",info="Machine check polls",type="MCP"} 2399 +node_interrupts_total{cpu="2",devices="",info="Non-maskable interrupts",type="NMI"} 6211 +node_interrupts_total{cpu="2",devices="",info="Performance monitoring interrupts",type="PMI"} 6211 +node_interrupts_total{cpu="2",devices="",info="Rescheduling interrupts",type="RES"} 1.5999335e+07 +node_interrupts_total{cpu="2",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="2",devices="",info="TLB shootdowns",type="TLB"} 1.0494258e+07 +node_interrupts_total{cpu="2",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="2",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="2",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 824 +node_interrupts_total{cpu="2",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 6.478877e+06 +node_interrupts_total{cpu="2",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="2",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="2",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 293782 +node_interrupts_total{cpu="2",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.092032e+06 +node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 +node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="12"} 240 +node_interrupts_total{cpu="2",devices="i915",info="IR-PCI-MSI-edge",type="44"} 347 +node_interrupts_total{cpu="2",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 460171 +node_interrupts_total{cpu="2",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 +node_interrupts_total{cpu="2",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 +node_interrupts_total{cpu="2",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 +node_interrupts_total{cpu="2",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 +node_interrupts_total{cpu="2",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 440240 +node_interrupts_total{cpu="3",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="3",devices="",info="Function call interrupts",type="CAL"} 155528 +node_interrupts_total{cpu="3",devices="",info="IRQ work interrupts",type="IWI"} 2.428828e+06 +node_interrupts_total{cpu="3",devices="",info="Local timer interrupts",type="LOC"} 1.30980079e+08 +node_interrupts_total{cpu="3",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="3",devices="",info="Machine check polls",type="MCP"} 2399 +node_interrupts_total{cpu="3",devices="",info="Non-maskable interrupts",type="NMI"} 4968 +node_interrupts_total{cpu="3",devices="",info="Performance monitoring interrupts",type="PMI"} 4968 +node_interrupts_total{cpu="3",devices="",info="Rescheduling interrupts",type="RES"} 7.45726e+06 +node_interrupts_total{cpu="3",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="3",devices="",info="TLB shootdowns",type="TLB"} 1.0345022e+07 +node_interrupts_total{cpu="3",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="3",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="3",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 863 +node_interrupts_total{cpu="3",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.492252e+06 +node_interrupts_total{cpu="3",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="3",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="3",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 351412 +node_interrupts_total{cpu="3",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 2.644609e+06 +node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 +node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="12"} 198 +node_interrupts_total{cpu="3",devices="i915",info="IR-PCI-MSI-edge",type="44"} 633 +node_interrupts_total{cpu="3",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 290 +node_interrupts_total{cpu="3",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 +node_interrupts_total{cpu="3",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 +node_interrupts_total{cpu="3",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 +node_interrupts_total{cpu="3",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 +node_interrupts_total{cpu="3",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 2.434308e+06 +# HELP node_intr_total Total number of interrupts serviced. +# TYPE node_intr_total counter +node_intr_total 8.885917e+06 +# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. +# TYPE node_ipvs_backend_connections_active gauge +node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 321 +node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 64 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +# HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. +# TYPE node_ipvs_backend_connections_inactive gauge +node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 5 +node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 1 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 +node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +# HELP node_ipvs_backend_weight The current backend weight by local and remote address. +# TYPE node_ipvs_backend_weight gauge +node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 20 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +# HELP node_ipvs_connections_total The total number of connections made. +# TYPE node_ipvs_connections_total counter +node_ipvs_connections_total 2.3765872e+07 +# HELP node_ipvs_incoming_bytes_total The total amount of incoming data. +# TYPE node_ipvs_incoming_bytes_total counter +node_ipvs_incoming_bytes_total 8.9991519156915e+13 +# HELP node_ipvs_incoming_packets_total The total number of incoming packets. +# TYPE node_ipvs_incoming_packets_total counter +node_ipvs_incoming_packets_total 3.811989221e+09 +# HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. +# TYPE node_ipvs_outgoing_bytes_total counter +node_ipvs_outgoing_bytes_total 0 +# HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. +# TYPE node_ipvs_outgoing_packets_total counter +node_ipvs_outgoing_packets_total 0 +# HELP node_ksmd_full_scans_total ksmd 'full_scans' file. +# TYPE node_ksmd_full_scans_total counter +node_ksmd_full_scans_total 323 +# HELP node_ksmd_merge_across_nodes ksmd 'merge_across_nodes' file. +# TYPE node_ksmd_merge_across_nodes gauge +node_ksmd_merge_across_nodes 1 +# HELP node_ksmd_pages_shared ksmd 'pages_shared' file. +# TYPE node_ksmd_pages_shared gauge +node_ksmd_pages_shared 1 +# HELP node_ksmd_pages_sharing ksmd 'pages_sharing' file. +# TYPE node_ksmd_pages_sharing gauge +node_ksmd_pages_sharing 255 +# HELP node_ksmd_pages_to_scan ksmd 'pages_to_scan' file. +# TYPE node_ksmd_pages_to_scan gauge +node_ksmd_pages_to_scan 100 +# HELP node_ksmd_pages_unshared ksmd 'pages_unshared' file. +# TYPE node_ksmd_pages_unshared gauge +node_ksmd_pages_unshared 0 +# HELP node_ksmd_pages_volatile ksmd 'pages_volatile' file. +# TYPE node_ksmd_pages_volatile gauge +node_ksmd_pages_volatile 0 +# HELP node_ksmd_run ksmd 'run' file. +# TYPE node_ksmd_run gauge +node_ksmd_run 1 +# HELP node_ksmd_sleep_seconds ksmd 'sleep_millisecs' file. +# TYPE node_ksmd_sleep_seconds gauge +node_ksmd_sleep_seconds 0.02 +# HELP node_load1 1m load average. +# TYPE node_load1 gauge +node_load1 0.21 +# HELP node_load15 15m load average. +# TYPE node_load15 gauge +node_load15 0.39 +# HELP node_load5 5m load average. +# TYPE node_load5 gauge +node_load5 0.37 +# HELP node_md_blocks Total number of blocks on device. +# TYPE node_md_blocks gauge +node_md_blocks{device="md0"} 248896 +node_md_blocks{device="md00"} 4.186624e+06 +node_md_blocks{device="md10"} 3.14159265e+08 +node_md_blocks{device="md101"} 322560 +node_md_blocks{device="md11"} 4.190208e+06 +node_md_blocks{device="md12"} 3.886394368e+09 +node_md_blocks{device="md120"} 2.095104e+06 +node_md_blocks{device="md126"} 1.855870976e+09 +node_md_blocks{device="md127"} 3.12319552e+08 +node_md_blocks{device="md219"} 7932 +node_md_blocks{device="md3"} 5.853468288e+09 +node_md_blocks{device="md4"} 4.883648e+06 +node_md_blocks{device="md6"} 1.95310144e+08 +node_md_blocks{device="md7"} 7.813735424e+09 +node_md_blocks{device="md8"} 1.95310144e+08 +node_md_blocks{device="md9"} 523968 +# HELP node_md_blocks_synced Number of blocks synced on device. +# TYPE node_md_blocks_synced gauge +node_md_blocks_synced{device="md0"} 248896 +node_md_blocks_synced{device="md00"} 4.186624e+06 +node_md_blocks_synced{device="md10"} 3.14159265e+08 +node_md_blocks_synced{device="md101"} 322560 +node_md_blocks_synced{device="md11"} 0 +node_md_blocks_synced{device="md12"} 3.886394368e+09 +node_md_blocks_synced{device="md120"} 2.095104e+06 +node_md_blocks_synced{device="md126"} 1.855870976e+09 +node_md_blocks_synced{device="md127"} 3.12319552e+08 +node_md_blocks_synced{device="md219"} 7932 +node_md_blocks_synced{device="md3"} 5.853468288e+09 +node_md_blocks_synced{device="md4"} 4.883648e+06 +node_md_blocks_synced{device="md6"} 1.6775552e+07 +node_md_blocks_synced{device="md7"} 7.813735424e+09 +node_md_blocks_synced{device="md8"} 1.6775552e+07 +node_md_blocks_synced{device="md9"} 0 +# HELP node_md_disks Number of active/failed/spare disks of device. +# TYPE node_md_disks gauge +node_md_disks{device="md0",state="active"} 2 +node_md_disks{device="md0",state="failed"} 0 +node_md_disks{device="md0",state="spare"} 0 +node_md_disks{device="md00",state="active"} 1 +node_md_disks{device="md00",state="failed"} 0 +node_md_disks{device="md00",state="spare"} 0 +node_md_disks{device="md10",state="active"} 2 +node_md_disks{device="md10",state="failed"} 0 +node_md_disks{device="md10",state="spare"} 0 +node_md_disks{device="md101",state="active"} 3 +node_md_disks{device="md101",state="failed"} 0 +node_md_disks{device="md101",state="spare"} 0 +node_md_disks{device="md11",state="active"} 2 +node_md_disks{device="md11",state="failed"} 1 +node_md_disks{device="md11",state="spare"} 2 +node_md_disks{device="md12",state="active"} 2 +node_md_disks{device="md12",state="failed"} 0 +node_md_disks{device="md12",state="spare"} 0 +node_md_disks{device="md120",state="active"} 2 +node_md_disks{device="md120",state="failed"} 0 +node_md_disks{device="md120",state="spare"} 0 +node_md_disks{device="md126",state="active"} 2 +node_md_disks{device="md126",state="failed"} 0 +node_md_disks{device="md126",state="spare"} 0 +node_md_disks{device="md127",state="active"} 2 +node_md_disks{device="md127",state="failed"} 0 +node_md_disks{device="md127",state="spare"} 0 +node_md_disks{device="md219",state="active"} 0 +node_md_disks{device="md219",state="failed"} 0 +node_md_disks{device="md219",state="spare"} 3 +node_md_disks{device="md3",state="active"} 8 +node_md_disks{device="md3",state="failed"} 0 +node_md_disks{device="md3",state="spare"} 2 +node_md_disks{device="md4",state="active"} 0 +node_md_disks{device="md4",state="failed"} 1 +node_md_disks{device="md4",state="spare"} 1 +node_md_disks{device="md6",state="active"} 1 +node_md_disks{device="md6",state="failed"} 1 +node_md_disks{device="md6",state="spare"} 1 +node_md_disks{device="md7",state="active"} 3 +node_md_disks{device="md7",state="failed"} 1 +node_md_disks{device="md7",state="spare"} 0 +node_md_disks{device="md8",state="active"} 2 +node_md_disks{device="md8",state="failed"} 0 +node_md_disks{device="md8",state="spare"} 2 +node_md_disks{device="md9",state="active"} 4 +node_md_disks{device="md9",state="failed"} 2 +node_md_disks{device="md9",state="spare"} 1 +# HELP node_md_disks_required Total number of disks of device. +# TYPE node_md_disks_required gauge +node_md_disks_required{device="md0"} 2 +node_md_disks_required{device="md00"} 1 +node_md_disks_required{device="md10"} 2 +node_md_disks_required{device="md101"} 3 +node_md_disks_required{device="md11"} 2 +node_md_disks_required{device="md12"} 2 +node_md_disks_required{device="md120"} 2 +node_md_disks_required{device="md126"} 2 +node_md_disks_required{device="md127"} 2 +node_md_disks_required{device="md219"} 0 +node_md_disks_required{device="md3"} 8 +node_md_disks_required{device="md4"} 0 +node_md_disks_required{device="md6"} 2 +node_md_disks_required{device="md7"} 4 +node_md_disks_required{device="md8"} 2 +node_md_disks_required{device="md9"} 4 +# HELP node_md_state Indicates the state of md-device. +# TYPE node_md_state gauge +node_md_state{device="md0",state="active"} 1 +node_md_state{device="md0",state="inactive"} 0 +node_md_state{device="md0",state="recovering"} 0 +node_md_state{device="md0",state="resync"} 0 +node_md_state{device="md00",state="active"} 1 +node_md_state{device="md00",state="inactive"} 0 +node_md_state{device="md00",state="recovering"} 0 +node_md_state{device="md00",state="resync"} 0 +node_md_state{device="md10",state="active"} 1 +node_md_state{device="md10",state="inactive"} 0 +node_md_state{device="md10",state="recovering"} 0 +node_md_state{device="md10",state="resync"} 0 +node_md_state{device="md101",state="active"} 1 +node_md_state{device="md101",state="inactive"} 0 +node_md_state{device="md101",state="recovering"} 0 +node_md_state{device="md101",state="resync"} 0 +node_md_state{device="md11",state="active"} 0 +node_md_state{device="md11",state="inactive"} 0 +node_md_state{device="md11",state="recovering"} 0 +node_md_state{device="md11",state="resync"} 1 +node_md_state{device="md12",state="active"} 1 +node_md_state{device="md12",state="inactive"} 0 +node_md_state{device="md12",state="recovering"} 0 +node_md_state{device="md12",state="resync"} 0 +node_md_state{device="md120",state="active"} 1 +node_md_state{device="md120",state="inactive"} 0 +node_md_state{device="md120",state="recovering"} 0 +node_md_state{device="md120",state="resync"} 0 +node_md_state{device="md126",state="active"} 1 +node_md_state{device="md126",state="inactive"} 0 +node_md_state{device="md126",state="recovering"} 0 +node_md_state{device="md126",state="resync"} 0 +node_md_state{device="md127",state="active"} 1 +node_md_state{device="md127",state="inactive"} 0 +node_md_state{device="md127",state="recovering"} 0 +node_md_state{device="md127",state="resync"} 0 +node_md_state{device="md219",state="active"} 0 +node_md_state{device="md219",state="inactive"} 1 +node_md_state{device="md219",state="recovering"} 0 +node_md_state{device="md219",state="resync"} 0 +node_md_state{device="md3",state="active"} 1 +node_md_state{device="md3",state="inactive"} 0 +node_md_state{device="md3",state="recovering"} 0 +node_md_state{device="md3",state="resync"} 0 +node_md_state{device="md4",state="active"} 0 +node_md_state{device="md4",state="inactive"} 1 +node_md_state{device="md4",state="recovering"} 0 +node_md_state{device="md4",state="resync"} 0 +node_md_state{device="md6",state="active"} 0 +node_md_state{device="md6",state="inactive"} 0 +node_md_state{device="md6",state="recovering"} 1 +node_md_state{device="md6",state="resync"} 0 +node_md_state{device="md7",state="active"} 1 +node_md_state{device="md7",state="inactive"} 0 +node_md_state{device="md7",state="recovering"} 0 +node_md_state{device="md7",state="resync"} 0 +node_md_state{device="md8",state="active"} 0 +node_md_state{device="md8",state="inactive"} 0 +node_md_state{device="md8",state="recovering"} 0 +node_md_state{device="md8",state="resync"} 1 +node_md_state{device="md9",state="active"} 0 +node_md_state{device="md9",state="inactive"} 0 +node_md_state{device="md9",state="recovering"} 0 +node_md_state{device="md9",state="resync"} 1 +# HELP node_memory_Active_anon_bytes Memory information field Active_anon_bytes. +# TYPE node_memory_Active_anon_bytes gauge +node_memory_Active_anon_bytes 2.068484096e+09 +# HELP node_memory_Active_bytes Memory information field Active_bytes. +# TYPE node_memory_Active_bytes gauge +node_memory_Active_bytes 2.287017984e+09 +# HELP node_memory_Active_file_bytes Memory information field Active_file_bytes. +# TYPE node_memory_Active_file_bytes gauge +node_memory_Active_file_bytes 2.18533888e+08 +# HELP node_memory_AnonHugePages_bytes Memory information field AnonHugePages_bytes. +# TYPE node_memory_AnonHugePages_bytes gauge +node_memory_AnonHugePages_bytes 0 +# HELP node_memory_AnonPages_bytes Memory information field AnonPages_bytes. +# TYPE node_memory_AnonPages_bytes gauge +node_memory_AnonPages_bytes 2.298032128e+09 +# HELP node_memory_Bounce_bytes Memory information field Bounce_bytes. +# TYPE node_memory_Bounce_bytes gauge +node_memory_Bounce_bytes 0 +# HELP node_memory_Buffers_bytes Memory information field Buffers_bytes. +# TYPE node_memory_Buffers_bytes gauge +node_memory_Buffers_bytes 2.256896e+07 +# HELP node_memory_Cached_bytes Memory information field Cached_bytes. +# TYPE node_memory_Cached_bytes gauge +node_memory_Cached_bytes 9.53229312e+08 +# HELP node_memory_CommitLimit_bytes Memory information field CommitLimit_bytes. +# TYPE node_memory_CommitLimit_bytes gauge +node_memory_CommitLimit_bytes 6.210940928e+09 +# HELP node_memory_Committed_AS_bytes Memory information field Committed_AS_bytes. +# TYPE node_memory_Committed_AS_bytes gauge +node_memory_Committed_AS_bytes 8.023486464e+09 +# HELP node_memory_DirectMap2M_bytes Memory information field DirectMap2M_bytes. +# TYPE node_memory_DirectMap2M_bytes gauge +node_memory_DirectMap2M_bytes 3.787456512e+09 +# HELP node_memory_DirectMap4k_bytes Memory information field DirectMap4k_bytes. +# TYPE node_memory_DirectMap4k_bytes gauge +node_memory_DirectMap4k_bytes 1.9011584e+08 +# HELP node_memory_Dirty_bytes Memory information field Dirty_bytes. +# TYPE node_memory_Dirty_bytes gauge +node_memory_Dirty_bytes 1.077248e+06 +# HELP node_memory_HardwareCorrupted_bytes Memory information field HardwareCorrupted_bytes. +# TYPE node_memory_HardwareCorrupted_bytes gauge +node_memory_HardwareCorrupted_bytes 0 +# HELP node_memory_HugePages_Free Memory information field HugePages_Free. +# TYPE node_memory_HugePages_Free gauge +node_memory_HugePages_Free 0 +# HELP node_memory_HugePages_Rsvd Memory information field HugePages_Rsvd. +# TYPE node_memory_HugePages_Rsvd gauge +node_memory_HugePages_Rsvd 0 +# HELP node_memory_HugePages_Surp Memory information field HugePages_Surp. +# TYPE node_memory_HugePages_Surp gauge +node_memory_HugePages_Surp 0 +# HELP node_memory_HugePages_Total Memory information field HugePages_Total. +# TYPE node_memory_HugePages_Total gauge +node_memory_HugePages_Total 0 +# HELP node_memory_Hugepagesize_bytes Memory information field Hugepagesize_bytes. +# TYPE node_memory_Hugepagesize_bytes gauge +node_memory_Hugepagesize_bytes 2.097152e+06 +# HELP node_memory_Inactive_anon_bytes Memory information field Inactive_anon_bytes. +# TYPE node_memory_Inactive_anon_bytes gauge +node_memory_Inactive_anon_bytes 9.04245248e+08 +# HELP node_memory_Inactive_bytes Memory information field Inactive_bytes. +# TYPE node_memory_Inactive_bytes gauge +node_memory_Inactive_bytes 1.053417472e+09 +# HELP node_memory_Inactive_file_bytes Memory information field Inactive_file_bytes. +# TYPE node_memory_Inactive_file_bytes gauge +node_memory_Inactive_file_bytes 1.49172224e+08 +# HELP node_memory_KernelStack_bytes Memory information field KernelStack_bytes. +# TYPE node_memory_KernelStack_bytes gauge +node_memory_KernelStack_bytes 5.9392e+06 +# HELP node_memory_Mapped_bytes Memory information field Mapped_bytes. +# TYPE node_memory_Mapped_bytes gauge +node_memory_Mapped_bytes 2.4496128e+08 +# HELP node_memory_MemFree_bytes Memory information field MemFree_bytes. +# TYPE node_memory_MemFree_bytes gauge +node_memory_MemFree_bytes 2.30883328e+08 +# HELP node_memory_MemTotal_bytes Memory information field MemTotal_bytes. +# TYPE node_memory_MemTotal_bytes gauge +node_memory_MemTotal_bytes 3.831959552e+09 +# HELP node_memory_Mlocked_bytes Memory information field Mlocked_bytes. +# TYPE node_memory_Mlocked_bytes gauge +node_memory_Mlocked_bytes 32768 +# HELP node_memory_NFS_Unstable_bytes Memory information field NFS_Unstable_bytes. +# TYPE node_memory_NFS_Unstable_bytes gauge +node_memory_NFS_Unstable_bytes 0 +# HELP node_memory_PageTables_bytes Memory information field PageTables_bytes. +# TYPE node_memory_PageTables_bytes gauge +node_memory_PageTables_bytes 7.7017088e+07 +# HELP node_memory_SReclaimable_bytes Memory information field SReclaimable_bytes. +# TYPE node_memory_SReclaimable_bytes gauge +node_memory_SReclaimable_bytes 4.5846528e+07 +# HELP node_memory_SUnreclaim_bytes Memory information field SUnreclaim_bytes. +# TYPE node_memory_SUnreclaim_bytes gauge +node_memory_SUnreclaim_bytes 5.545984e+07 +# HELP node_memory_Shmem_bytes Memory information field Shmem_bytes. +# TYPE node_memory_Shmem_bytes gauge +node_memory_Shmem_bytes 6.0809216e+08 +# HELP node_memory_Slab_bytes Memory information field Slab_bytes. +# TYPE node_memory_Slab_bytes gauge +node_memory_Slab_bytes 1.01306368e+08 +# HELP node_memory_SwapCached_bytes Memory information field SwapCached_bytes. +# TYPE node_memory_SwapCached_bytes gauge +node_memory_SwapCached_bytes 1.97124096e+08 +# HELP node_memory_SwapFree_bytes Memory information field SwapFree_bytes. +# TYPE node_memory_SwapFree_bytes gauge +node_memory_SwapFree_bytes 3.23108864e+09 +# HELP node_memory_SwapTotal_bytes Memory information field SwapTotal_bytes. +# TYPE node_memory_SwapTotal_bytes gauge +node_memory_SwapTotal_bytes 4.2949632e+09 +# HELP node_memory_Unevictable_bytes Memory information field Unevictable_bytes. +# TYPE node_memory_Unevictable_bytes gauge +node_memory_Unevictable_bytes 32768 +# HELP node_memory_VmallocChunk_bytes Memory information field VmallocChunk_bytes. +# TYPE node_memory_VmallocChunk_bytes gauge +node_memory_VmallocChunk_bytes 3.5183963009024e+13 +# HELP node_memory_VmallocTotal_bytes Memory information field VmallocTotal_bytes. +# TYPE node_memory_VmallocTotal_bytes gauge +node_memory_VmallocTotal_bytes 3.5184372087808e+13 +# HELP node_memory_VmallocUsed_bytes Memory information field VmallocUsed_bytes. +# TYPE node_memory_VmallocUsed_bytes gauge +node_memory_VmallocUsed_bytes 3.6130816e+08 +# HELP node_memory_WritebackTmp_bytes Memory information field WritebackTmp_bytes. +# TYPE node_memory_WritebackTmp_bytes gauge +node_memory_WritebackTmp_bytes 0 +# HELP node_memory_Writeback_bytes Memory information field Writeback_bytes. +# TYPE node_memory_Writeback_bytes gauge +node_memory_Writeback_bytes 0 +# HELP node_memory_numa_Active Memory information field Active. +# TYPE node_memory_numa_Active gauge +node_memory_numa_Active{node="0"} 5.58733312e+09 +node_memory_numa_Active{node="1"} 5.739003904e+09 +node_memory_numa_Active{node="2"} 5.739003904e+09 +# HELP node_memory_numa_Active_anon Memory information field Active_anon. +# TYPE node_memory_numa_Active_anon gauge +node_memory_numa_Active_anon{node="0"} 7.07915776e+08 +node_memory_numa_Active_anon{node="1"} 6.04635136e+08 +node_memory_numa_Active_anon{node="2"} 6.04635136e+08 +# HELP node_memory_numa_Active_file Memory information field Active_file. +# TYPE node_memory_numa_Active_file gauge +node_memory_numa_Active_file{node="0"} 4.879417344e+09 +node_memory_numa_Active_file{node="1"} 5.134368768e+09 +node_memory_numa_Active_file{node="2"} 5.134368768e+09 +# HELP node_memory_numa_AnonHugePages Memory information field AnonHugePages. +# TYPE node_memory_numa_AnonHugePages gauge +node_memory_numa_AnonHugePages{node="0"} 1.50994944e+08 +node_memory_numa_AnonHugePages{node="1"} 9.2274688e+07 +node_memory_numa_AnonHugePages{node="2"} 9.2274688e+07 +# HELP node_memory_numa_AnonPages Memory information field AnonPages. +# TYPE node_memory_numa_AnonPages gauge +node_memory_numa_AnonPages{node="0"} 8.07112704e+08 +node_memory_numa_AnonPages{node="1"} 6.88058368e+08 +node_memory_numa_AnonPages{node="2"} 6.88058368e+08 +# HELP node_memory_numa_Bounce Memory information field Bounce. +# TYPE node_memory_numa_Bounce gauge +node_memory_numa_Bounce{node="0"} 0 +node_memory_numa_Bounce{node="1"} 0 +node_memory_numa_Bounce{node="2"} 0 +# HELP node_memory_numa_Dirty Memory information field Dirty. +# TYPE node_memory_numa_Dirty gauge +node_memory_numa_Dirty{node="0"} 20480 +node_memory_numa_Dirty{node="1"} 122880 +node_memory_numa_Dirty{node="2"} 122880 +# HELP node_memory_numa_FilePages Memory information field FilePages. +# TYPE node_memory_numa_FilePages gauge +node_memory_numa_FilePages{node="0"} 7.1855017984e+10 +node_memory_numa_FilePages{node="1"} 8.5585088512e+10 +node_memory_numa_FilePages{node="2"} 8.5585088512e+10 +# HELP node_memory_numa_HugePages_Free Memory information field HugePages_Free. +# TYPE node_memory_numa_HugePages_Free gauge +node_memory_numa_HugePages_Free{node="0"} 0 +node_memory_numa_HugePages_Free{node="1"} 0 +node_memory_numa_HugePages_Free{node="2"} 0 +# HELP node_memory_numa_HugePages_Surp Memory information field HugePages_Surp. +# TYPE node_memory_numa_HugePages_Surp gauge +node_memory_numa_HugePages_Surp{node="0"} 0 +node_memory_numa_HugePages_Surp{node="1"} 0 +node_memory_numa_HugePages_Surp{node="2"} 0 +# HELP node_memory_numa_HugePages_Total Memory information field HugePages_Total. +# TYPE node_memory_numa_HugePages_Total gauge +node_memory_numa_HugePages_Total{node="0"} 0 +node_memory_numa_HugePages_Total{node="1"} 0 +node_memory_numa_HugePages_Total{node="2"} 0 +# HELP node_memory_numa_Inactive Memory information field Inactive. +# TYPE node_memory_numa_Inactive gauge +node_memory_numa_Inactive{node="0"} 6.0569788416e+10 +node_memory_numa_Inactive{node="1"} 7.3165406208e+10 +node_memory_numa_Inactive{node="2"} 7.3165406208e+10 +# HELP node_memory_numa_Inactive_anon Memory information field Inactive_anon. +# TYPE node_memory_numa_Inactive_anon gauge +node_memory_numa_Inactive_anon{node="0"} 3.48626944e+08 +node_memory_numa_Inactive_anon{node="1"} 2.91930112e+08 +node_memory_numa_Inactive_anon{node="2"} 2.91930112e+08 +# HELP node_memory_numa_Inactive_file Memory information field Inactive_file. +# TYPE node_memory_numa_Inactive_file gauge +node_memory_numa_Inactive_file{node="0"} 6.0221161472e+10 +node_memory_numa_Inactive_file{node="1"} 7.2873476096e+10 +node_memory_numa_Inactive_file{node="2"} 7.2873476096e+10 +# HELP node_memory_numa_KernelStack Memory information field KernelStack. +# TYPE node_memory_numa_KernelStack gauge +node_memory_numa_KernelStack{node="0"} 3.4832384e+07 +node_memory_numa_KernelStack{node="1"} 3.1850496e+07 +node_memory_numa_KernelStack{node="2"} 3.1850496e+07 +# HELP node_memory_numa_Mapped Memory information field Mapped. +# TYPE node_memory_numa_Mapped gauge +node_memory_numa_Mapped{node="0"} 9.1570176e+08 +node_memory_numa_Mapped{node="1"} 8.84850688e+08 +node_memory_numa_Mapped{node="2"} 8.84850688e+08 +# HELP node_memory_numa_MemFree Memory information field MemFree. +# TYPE node_memory_numa_MemFree gauge +node_memory_numa_MemFree{node="0"} 5.4303100928e+10 +node_memory_numa_MemFree{node="1"} 4.0586022912e+10 +node_memory_numa_MemFree{node="2"} 4.0586022912e+10 +# HELP node_memory_numa_MemTotal Memory information field MemTotal. +# TYPE node_memory_numa_MemTotal gauge +node_memory_numa_MemTotal{node="0"} 1.3740271616e+11 +node_memory_numa_MemTotal{node="1"} 1.37438953472e+11 +node_memory_numa_MemTotal{node="2"} 1.37438953472e+11 +# HELP node_memory_numa_MemUsed Memory information field MemUsed. +# TYPE node_memory_numa_MemUsed gauge +node_memory_numa_MemUsed{node="0"} 8.3099615232e+10 +node_memory_numa_MemUsed{node="1"} 9.685293056e+10 +node_memory_numa_MemUsed{node="2"} 9.685293056e+10 +# HELP node_memory_numa_Mlocked Memory information field Mlocked. +# TYPE node_memory_numa_Mlocked gauge +node_memory_numa_Mlocked{node="0"} 0 +node_memory_numa_Mlocked{node="1"} 0 +node_memory_numa_Mlocked{node="2"} 0 +# HELP node_memory_numa_NFS_Unstable Memory information field NFS_Unstable. +# TYPE node_memory_numa_NFS_Unstable gauge +node_memory_numa_NFS_Unstable{node="0"} 0 +node_memory_numa_NFS_Unstable{node="1"} 0 +node_memory_numa_NFS_Unstable{node="2"} 0 +# HELP node_memory_numa_PageTables Memory information field PageTables. +# TYPE node_memory_numa_PageTables gauge +node_memory_numa_PageTables{node="0"} 1.46743296e+08 +node_memory_numa_PageTables{node="1"} 1.27254528e+08 +node_memory_numa_PageTables{node="2"} 1.27254528e+08 +# HELP node_memory_numa_SReclaimable Memory information field SReclaimable. +# TYPE node_memory_numa_SReclaimable gauge +node_memory_numa_SReclaimable{node="0"} 4.580478976e+09 +node_memory_numa_SReclaimable{node="1"} 4.724822016e+09 +node_memory_numa_SReclaimable{node="2"} 4.724822016e+09 +# HELP node_memory_numa_SUnreclaim Memory information field SUnreclaim. +# TYPE node_memory_numa_SUnreclaim gauge +node_memory_numa_SUnreclaim{node="0"} 2.23352832e+09 +node_memory_numa_SUnreclaim{node="1"} 2.464391168e+09 +node_memory_numa_SUnreclaim{node="2"} 2.464391168e+09 +# HELP node_memory_numa_Shmem Memory information field Shmem. +# TYPE node_memory_numa_Shmem gauge +node_memory_numa_Shmem{node="0"} 4.900864e+07 +node_memory_numa_Shmem{node="1"} 8.968192e+07 +node_memory_numa_Shmem{node="2"} 8.968192e+07 +# HELP node_memory_numa_Slab Memory information field Slab. +# TYPE node_memory_numa_Slab gauge +node_memory_numa_Slab{node="0"} 6.814007296e+09 +node_memory_numa_Slab{node="1"} 7.189213184e+09 +node_memory_numa_Slab{node="2"} 7.189213184e+09 +# HELP node_memory_numa_Unevictable Memory information field Unevictable. +# TYPE node_memory_numa_Unevictable gauge +node_memory_numa_Unevictable{node="0"} 0 +node_memory_numa_Unevictable{node="1"} 0 +node_memory_numa_Unevictable{node="2"} 0 +# HELP node_memory_numa_Writeback Memory information field Writeback. +# TYPE node_memory_numa_Writeback gauge +node_memory_numa_Writeback{node="0"} 0 +node_memory_numa_Writeback{node="1"} 0 +node_memory_numa_Writeback{node="2"} 0 +# HELP node_memory_numa_WritebackTmp Memory information field WritebackTmp. +# TYPE node_memory_numa_WritebackTmp gauge +node_memory_numa_WritebackTmp{node="0"} 0 +node_memory_numa_WritebackTmp{node="1"} 0 +node_memory_numa_WritebackTmp{node="2"} 0 +# HELP node_memory_numa_interleave_hit_total Memory information field interleave_hit_total. +# TYPE node_memory_numa_interleave_hit_total counter +node_memory_numa_interleave_hit_total{node="0"} 57146 +node_memory_numa_interleave_hit_total{node="1"} 57286 +node_memory_numa_interleave_hit_total{node="2"} 7286 +# HELP node_memory_numa_local_node_total Memory information field local_node_total. +# TYPE node_memory_numa_local_node_total counter +node_memory_numa_local_node_total{node="0"} 1.93454780853e+11 +node_memory_numa_local_node_total{node="1"} 3.2671904655e+11 +node_memory_numa_local_node_total{node="2"} 2.671904655e+10 +# HELP node_memory_numa_numa_foreign_total Memory information field numa_foreign_total. +# TYPE node_memory_numa_numa_foreign_total counter +node_memory_numa_numa_foreign_total{node="0"} 5.98586233e+10 +node_memory_numa_numa_foreign_total{node="1"} 1.2624528e+07 +node_memory_numa_numa_foreign_total{node="2"} 2.624528e+06 +# HELP node_memory_numa_numa_hit_total Memory information field numa_hit_total. +# TYPE node_memory_numa_numa_hit_total counter +node_memory_numa_numa_hit_total{node="0"} 1.93460335812e+11 +node_memory_numa_numa_hit_total{node="1"} 3.26720946761e+11 +node_memory_numa_numa_hit_total{node="2"} 2.6720946761e+10 +# HELP node_memory_numa_numa_miss_total Memory information field numa_miss_total. +# TYPE node_memory_numa_numa_miss_total counter +node_memory_numa_numa_miss_total{node="0"} 1.2624528e+07 +node_memory_numa_numa_miss_total{node="1"} 5.9858626709e+10 +node_memory_numa_numa_miss_total{node="2"} 9.858626709e+09 +# HELP node_memory_numa_other_node_total Memory information field other_node_total. +# TYPE node_memory_numa_other_node_total counter +node_memory_numa_other_node_total{node="0"} 1.8179487e+07 +node_memory_numa_other_node_total{node="1"} 5.986052692e+10 +node_memory_numa_other_node_total{node="2"} 9.86052692e+09 +# HELP node_mountstats_nfs_age_seconds_total The age of the NFS mount in seconds. +# TYPE node_mountstats_nfs_age_seconds_total counter +node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13968 +node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13968 +# HELP node_mountstats_nfs_direct_read_bytes_total Number of bytes read using the read() syscall in O_DIRECT mode. +# TYPE node_mountstats_nfs_direct_read_bytes_total counter +node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_direct_write_bytes_total Number of bytes written using the write() syscall in O_DIRECT mode. +# TYPE node_mountstats_nfs_direct_write_bytes_total counter +node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_attribute_invalidate_total Number of times cached inode attributes are invalidated. +# TYPE node_mountstats_nfs_event_attribute_invalidate_total counter +node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_data_invalidate_total Number of times an inode cache is cleared. +# TYPE node_mountstats_nfs_event_data_invalidate_total counter +node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_dnode_revalidate_total Number of times cached dentry nodes are re-validated from the server. +# TYPE node_mountstats_nfs_event_dnode_revalidate_total counter +node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 226 +node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 226 +# HELP node_mountstats_nfs_event_inode_revalidate_total Number of times cached inode attributes are re-validated from the server. +# TYPE node_mountstats_nfs_event_inode_revalidate_total counter +node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 52 +node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 52 +# HELP node_mountstats_nfs_event_jukebox_delay_total Number of times the NFS server indicated EJUKEBOX; retrieving data from offline storage. +# TYPE node_mountstats_nfs_event_jukebox_delay_total counter +node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_pnfs_read_total Number of NFS v4.1+ pNFS reads. +# TYPE node_mountstats_nfs_event_pnfs_read_total counter +node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_pnfs_write_total Number of NFS v4.1+ pNFS writes. +# TYPE node_mountstats_nfs_event_pnfs_write_total counter +node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_short_read_total Number of times the NFS server gave less data than expected while reading. +# TYPE node_mountstats_nfs_event_short_read_total counter +node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_short_write_total Number of times the NFS server wrote less data than expected while writing. +# TYPE node_mountstats_nfs_event_short_write_total counter +node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_silly_rename_total Number of times a file was removed while still open by another process. +# TYPE node_mountstats_nfs_event_silly_rename_total counter +node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_truncation_total Number of times files have been truncated. +# TYPE node_mountstats_nfs_event_truncation_total counter +node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_access_total Number of times permissions have been checked. +# TYPE node_mountstats_nfs_event_vfs_access_total counter +node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 398 +node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 398 +# HELP node_mountstats_nfs_event_vfs_file_release_total Number of times files have been closed and released. +# TYPE node_mountstats_nfs_event_vfs_file_release_total counter +node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 +node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 +# HELP node_mountstats_nfs_event_vfs_flush_total Number of pending writes that have been forcefully flushed to the server. +# TYPE node_mountstats_nfs_event_vfs_flush_total counter +node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 +node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 +# HELP node_mountstats_nfs_event_vfs_fsync_total Number of times fsync() has been called on directories and files. +# TYPE node_mountstats_nfs_event_vfs_fsync_total counter +node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_getdents_total Number of times directory entries have been read with getdents(). +# TYPE node_mountstats_nfs_event_vfs_getdents_total counter +node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_lock_total Number of times locking has been attempted on a file. +# TYPE node_mountstats_nfs_event_vfs_lock_total counter +node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_lookup_total Number of times a directory lookup has occurred. +# TYPE node_mountstats_nfs_event_vfs_lookup_total counter +node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13 +node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13 +# HELP node_mountstats_nfs_event_vfs_open_total Number of times cached inode attributes are invalidated. +# TYPE node_mountstats_nfs_event_vfs_open_total counter +node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 +node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1 +# HELP node_mountstats_nfs_event_vfs_read_page_total Number of pages read directly via mmap()'d files. +# TYPE node_mountstats_nfs_event_vfs_read_page_total counter +node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_read_pages_total Number of times a group of pages have been read. +# TYPE node_mountstats_nfs_event_vfs_read_pages_total counter +node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 331 +node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 331 +# HELP node_mountstats_nfs_event_vfs_setattr_total Number of times directory entries have been read with getdents(). +# TYPE node_mountstats_nfs_event_vfs_setattr_total counter +node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_update_page_total Number of updates (and potential writes) to pages. +# TYPE node_mountstats_nfs_event_vfs_update_page_total counter +node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_write_page_total Number of pages written directly via mmap()'d files. +# TYPE node_mountstats_nfs_event_vfs_write_page_total counter +node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_event_vfs_write_pages_total Number of times a group of pages have been written. +# TYPE node_mountstats_nfs_event_vfs_write_pages_total counter +node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 47 +node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 47 +# HELP node_mountstats_nfs_event_write_extension_total Number of times a file has been grown due to writes beyond its existing end. +# TYPE node_mountstats_nfs_event_write_extension_total counter +node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_major_timeouts_total Number of times a request has had a major timeout for a given operation. +# TYPE node_mountstats_nfs_operations_major_timeouts_total counter +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_queue_time_seconds_total Duration all requests spent queued for transmission for a given operation before they were sent, in seconds. +# TYPE node_mountstats_nfs_operations_queue_time_seconds_total counter +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 9.007044786793922e+12 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0.006 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0.006 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_received_bytes_total Number of bytes received for a given operation, including RPC headers and payload. +# TYPE node_mountstats_nfs_operations_received_bytes_total counter +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 3.62996810236e+11 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1.210292152e+09 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1.210292152e+09 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_request_time_seconds_total Duration all requests took from when a request was enqueued to when it was completely handled for a given operation, in seconds. +# TYPE node_mountstats_nfs_operations_request_time_seconds_total counter +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.953587717e+06 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.407 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.407 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_requests_total Number of requests performed for a given operation. +# TYPE node_mountstats_nfs_operations_requests_total counter +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927395007e+09 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_response_time_seconds_total Duration all requests took to get a reply back after a request for a given operation was transmitted, in seconds. +# TYPE node_mountstats_nfs_operations_response_time_seconds_total counter +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.667369447e+06 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.386 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.386 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_sent_bytes_total Number of bytes sent for a given operation, including RPC headers and payload. +# TYPE node_mountstats_nfs_operations_sent_bytes_total counter +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 5.26931094212e+11 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 207680 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 207680 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_operations_transmissions_total Number of times an actual RPC request has been transmitted for a given operation. +# TYPE node_mountstats_nfs_operations_transmissions_total counter +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927394995e+09 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 +# HELP node_mountstats_nfs_read_bytes_total Number of bytes read using the read() syscall. +# TYPE node_mountstats_nfs_read_bytes_total counter +node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.20764023e+09 +node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.20764023e+09 +# HELP node_mountstats_nfs_read_pages_total Number of pages read directly via mmap()'d files. +# TYPE node_mountstats_nfs_read_pages_total counter +node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 295483 +node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 295483 +# HELP node_mountstats_nfs_total_read_bytes_total Number of bytes read from the NFS server, in total. +# TYPE node_mountstats_nfs_total_read_bytes_total counter +node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.210214218e+09 +node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.210214218e+09 +# HELP node_mountstats_nfs_total_write_bytes_total Number of bytes written to the NFS server, in total. +# TYPE node_mountstats_nfs_total_write_bytes_total counter +node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_transport_backlog_queue_total Total number of items added to the RPC backlog queue. +# TYPE node_mountstats_nfs_transport_backlog_queue_total counter +node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_transport_bad_transaction_ids_total Number of times the NFS server sent a response with a transaction ID unknown to this client. +# TYPE node_mountstats_nfs_transport_bad_transaction_ids_total counter +node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_transport_bind_total Number of times the client has had to establish a connection from scratch to the NFS server. +# TYPE node_mountstats_nfs_transport_bind_total counter +node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_transport_connect_total Number of times the client has made a TCP connection to the NFS server. +# TYPE node_mountstats_nfs_transport_connect_total counter +node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 +node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_transport_idle_time_seconds Duration since the NFS mount last saw any RPC traffic, in seconds. +# TYPE node_mountstats_nfs_transport_idle_time_seconds gauge +node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 11 +node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_transport_maximum_rpc_slots Maximum number of simultaneously active RPC requests ever used. +# TYPE node_mountstats_nfs_transport_maximum_rpc_slots gauge +node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 24 +node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 24 +# HELP node_mountstats_nfs_transport_pending_queue_total Total number of items added to the RPC transmission pending queue. +# TYPE node_mountstats_nfs_transport_pending_queue_total counter +node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 5726 +node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 5726 +# HELP node_mountstats_nfs_transport_receives_total Number of RPC responses for this mount received from the NFS server. +# TYPE node_mountstats_nfs_transport_receives_total counter +node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 +node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 +# HELP node_mountstats_nfs_transport_sending_queue_total Total number of items added to the RPC transmission sending queue. +# TYPE node_mountstats_nfs_transport_sending_queue_total counter +node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 26 +node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 26 +# HELP node_mountstats_nfs_transport_sends_total Number of RPC requests for this mount sent to the NFS server. +# TYPE node_mountstats_nfs_transport_sends_total counter +node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 +node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 +# HELP node_mountstats_nfs_write_bytes_total Number of bytes written using the write() syscall. +# TYPE node_mountstats_nfs_write_bytes_total counter +node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_mountstats_nfs_write_pages_total Number of pages written directly via mmap()'d files. +# TYPE node_mountstats_nfs_write_pages_total counter +node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 +# HELP node_netstat_Icmp6_InErrors Statistic Icmp6InErrors. +# TYPE node_netstat_Icmp6_InErrors untyped +node_netstat_Icmp6_InErrors 0 +# HELP node_netstat_Icmp6_InMsgs Statistic Icmp6InMsgs. +# TYPE node_netstat_Icmp6_InMsgs untyped +node_netstat_Icmp6_InMsgs 0 +# HELP node_netstat_Icmp6_OutMsgs Statistic Icmp6OutMsgs. +# TYPE node_netstat_Icmp6_OutMsgs untyped +node_netstat_Icmp6_OutMsgs 8 +# HELP node_netstat_Icmp_InErrors Statistic IcmpInErrors. +# TYPE node_netstat_Icmp_InErrors untyped +node_netstat_Icmp_InErrors 0 +# HELP node_netstat_Icmp_InMsgs Statistic IcmpInMsgs. +# TYPE node_netstat_Icmp_InMsgs untyped +node_netstat_Icmp_InMsgs 104 +# HELP node_netstat_Icmp_OutMsgs Statistic IcmpOutMsgs. +# TYPE node_netstat_Icmp_OutMsgs untyped +node_netstat_Icmp_OutMsgs 120 +# HELP node_netstat_Ip6_InOctets Statistic Ip6InOctets. +# TYPE node_netstat_Ip6_InOctets untyped +node_netstat_Ip6_InOctets 460 +# HELP node_netstat_Ip6_OutOctets Statistic Ip6OutOctets. +# TYPE node_netstat_Ip6_OutOctets untyped +node_netstat_Ip6_OutOctets 536 +# HELP node_netstat_IpExt_InOctets Statistic IpExtInOctets. +# TYPE node_netstat_IpExt_InOctets untyped +node_netstat_IpExt_InOctets 6.28639697e+09 +# HELP node_netstat_IpExt_OutOctets Statistic IpExtOutOctets. +# TYPE node_netstat_IpExt_OutOctets untyped +node_netstat_IpExt_OutOctets 2.786264347e+09 +# HELP node_netstat_Ip_Forwarding Statistic IpForwarding. +# TYPE node_netstat_Ip_Forwarding untyped +node_netstat_Ip_Forwarding 1 +# HELP node_netstat_TcpExt_ListenDrops Statistic TcpExtListenDrops. +# TYPE node_netstat_TcpExt_ListenDrops untyped +node_netstat_TcpExt_ListenDrops 0 +# HELP node_netstat_TcpExt_ListenOverflows Statistic TcpExtListenOverflows. +# TYPE node_netstat_TcpExt_ListenOverflows untyped +node_netstat_TcpExt_ListenOverflows 0 +# HELP node_netstat_TcpExt_SyncookiesFailed Statistic TcpExtSyncookiesFailed. +# TYPE node_netstat_TcpExt_SyncookiesFailed untyped +node_netstat_TcpExt_SyncookiesFailed 2 +# HELP node_netstat_TcpExt_SyncookiesRecv Statistic TcpExtSyncookiesRecv. +# TYPE node_netstat_TcpExt_SyncookiesRecv untyped +node_netstat_TcpExt_SyncookiesRecv 0 +# HELP node_netstat_TcpExt_SyncookiesSent Statistic TcpExtSyncookiesSent. +# TYPE node_netstat_TcpExt_SyncookiesSent untyped +node_netstat_TcpExt_SyncookiesSent 0 +# HELP node_netstat_Tcp_ActiveOpens Statistic TcpActiveOpens. +# TYPE node_netstat_Tcp_ActiveOpens untyped +node_netstat_Tcp_ActiveOpens 3556 +# HELP node_netstat_Tcp_CurrEstab Statistic TcpCurrEstab. +# TYPE node_netstat_Tcp_CurrEstab untyped +node_netstat_Tcp_CurrEstab 0 +# HELP node_netstat_Tcp_InErrs Statistic TcpInErrs. +# TYPE node_netstat_Tcp_InErrs untyped +node_netstat_Tcp_InErrs 5 +# HELP node_netstat_Tcp_InSegs Statistic TcpInSegs. +# TYPE node_netstat_Tcp_InSegs untyped +node_netstat_Tcp_InSegs 5.7252008e+07 +# HELP node_netstat_Tcp_OutRsts Statistic TcpOutRsts. +# TYPE node_netstat_Tcp_OutRsts untyped +node_netstat_Tcp_OutRsts 1003 +# HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs. +# TYPE node_netstat_Tcp_OutSegs untyped +node_netstat_Tcp_OutSegs 5.4915039e+07 +# HELP node_netstat_Tcp_PassiveOpens Statistic TcpPassiveOpens. +# TYPE node_netstat_Tcp_PassiveOpens untyped +node_netstat_Tcp_PassiveOpens 230 +# HELP node_netstat_Tcp_RetransSegs Statistic TcpRetransSegs. +# TYPE node_netstat_Tcp_RetransSegs untyped +node_netstat_Tcp_RetransSegs 227 +# HELP node_netstat_Udp6_InDatagrams Statistic Udp6InDatagrams. +# TYPE node_netstat_Udp6_InDatagrams untyped +node_netstat_Udp6_InDatagrams 0 +# HELP node_netstat_Udp6_InErrors Statistic Udp6InErrors. +# TYPE node_netstat_Udp6_InErrors untyped +node_netstat_Udp6_InErrors 0 +# HELP node_netstat_Udp6_NoPorts Statistic Udp6NoPorts. +# TYPE node_netstat_Udp6_NoPorts untyped +node_netstat_Udp6_NoPorts 0 +# HELP node_netstat_Udp6_OutDatagrams Statistic Udp6OutDatagrams. +# TYPE node_netstat_Udp6_OutDatagrams untyped +node_netstat_Udp6_OutDatagrams 0 +# HELP node_netstat_Udp6_RcvbufErrors Statistic Udp6RcvbufErrors. +# TYPE node_netstat_Udp6_RcvbufErrors untyped +node_netstat_Udp6_RcvbufErrors 9 +# HELP node_netstat_Udp6_SndbufErrors Statistic Udp6SndbufErrors. +# TYPE node_netstat_Udp6_SndbufErrors untyped +node_netstat_Udp6_SndbufErrors 8 +# HELP node_netstat_UdpLite6_InErrors Statistic UdpLite6InErrors. +# TYPE node_netstat_UdpLite6_InErrors untyped +node_netstat_UdpLite6_InErrors 0 +# HELP node_netstat_UdpLite_InErrors Statistic UdpLiteInErrors. +# TYPE node_netstat_UdpLite_InErrors untyped +node_netstat_UdpLite_InErrors 0 +# HELP node_netstat_Udp_InDatagrams Statistic UdpInDatagrams. +# TYPE node_netstat_Udp_InDatagrams untyped +node_netstat_Udp_InDatagrams 88542 +# HELP node_netstat_Udp_InErrors Statistic UdpInErrors. +# TYPE node_netstat_Udp_InErrors untyped +node_netstat_Udp_InErrors 0 +# HELP node_netstat_Udp_NoPorts Statistic UdpNoPorts. +# TYPE node_netstat_Udp_NoPorts untyped +node_netstat_Udp_NoPorts 120 +# HELP node_netstat_Udp_OutDatagrams Statistic UdpOutDatagrams. +# TYPE node_netstat_Udp_OutDatagrams untyped +node_netstat_Udp_OutDatagrams 53028 +# HELP node_netstat_Udp_RcvbufErrors Statistic UdpRcvbufErrors. +# TYPE node_netstat_Udp_RcvbufErrors untyped +node_netstat_Udp_RcvbufErrors 9 +# HELP node_netstat_Udp_SndbufErrors Statistic UdpSndbufErrors. +# TYPE node_netstat_Udp_SndbufErrors untyped +node_netstat_Udp_SndbufErrors 8 +# HELP node_network_address_assign_type address_assign_type value of /sys/class/net/. +# TYPE node_network_address_assign_type gauge +node_network_address_assign_type{device="eth0"} 3 +# HELP node_network_carrier carrier value of /sys/class/net/. +# TYPE node_network_carrier gauge +node_network_carrier{device="eth0"} 1 +# HELP node_network_carrier_changes_total carrier_changes_total value of /sys/class/net/. +# TYPE node_network_carrier_changes_total counter +node_network_carrier_changes_total{device="eth0"} 2 +# HELP node_network_carrier_down_changes_total carrier_down_changes_total value of /sys/class/net/. +# TYPE node_network_carrier_down_changes_total counter +node_network_carrier_down_changes_total{device="eth0"} 1 +# HELP node_network_carrier_up_changes_total carrier_up_changes_total value of /sys/class/net/. +# TYPE node_network_carrier_up_changes_total counter +node_network_carrier_up_changes_total{device="eth0"} 1 +# HELP node_network_device_id device_id value of /sys/class/net/. +# TYPE node_network_device_id gauge +node_network_device_id{device="eth0"} 32 +# HELP node_network_dormant dormant value of /sys/class/net/. +# TYPE node_network_dormant gauge +node_network_dormant{device="eth0"} 1 +# HELP node_network_flags flags value of /sys/class/net/. +# TYPE node_network_flags gauge +node_network_flags{device="eth0"} 4867 +# HELP node_network_iface_id iface_id value of /sys/class/net/. +# TYPE node_network_iface_id gauge +node_network_iface_id{device="eth0"} 2 +# HELP node_network_iface_link iface_link value of /sys/class/net/. +# TYPE node_network_iface_link gauge +node_network_iface_link{device="eth0"} 2 +# HELP node_network_iface_link_mode iface_link_mode value of /sys/class/net/. +# TYPE node_network_iface_link_mode gauge +node_network_iface_link_mode{device="eth0"} 1 +# HELP node_network_info Non-numeric data from /sys/class/net/, value is always 1. +# TYPE node_network_info gauge +node_network_info{address="01:01:01:01:01:01",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 +# HELP node_network_mtu_bytes mtu_bytes value of /sys/class/net/. +# TYPE node_network_mtu_bytes gauge +node_network_mtu_bytes{device="eth0"} 1500 +# HELP node_network_name_assign_type name_assign_type value of /sys/class/net/. +# TYPE node_network_name_assign_type gauge +node_network_name_assign_type{device="eth0"} 2 +# HELP node_network_net_dev_group net_dev_group value of /sys/class/net/. +# TYPE node_network_net_dev_group gauge +node_network_net_dev_group{device="eth0"} 0 +# HELP node_network_protocol_type protocol_type value of /sys/class/net/. +# TYPE node_network_protocol_type gauge +node_network_protocol_type{device="eth0"} 1 +# HELP node_network_receive_bytes_total Network device statistic receive_bytes. +# TYPE node_network_receive_bytes_total counter +node_network_receive_bytes_total{device="docker0"} 6.4910168e+07 +node_network_receive_bytes_total{device="eth0"} 6.8210035552e+10 +node_network_receive_bytes_total{device="flannel.1"} 1.8144009813e+10 +node_network_receive_bytes_total{device="ibr10:30"} 0 +node_network_receive_bytes_total{device="lo"} 4.35303245e+08 +node_network_receive_bytes_total{device="lxcbr0"} 0 +node_network_receive_bytes_total{device="tun0"} 1888 +node_network_receive_bytes_total{device="veth4B09XN"} 648 +node_network_receive_bytes_total{device="wlan0"} 1.0437182923e+10 +node_network_receive_bytes_total{device="💩0"} 5.7750104e+07 +# HELP node_network_receive_compressed_total Network device statistic receive_compressed. +# TYPE node_network_receive_compressed_total counter +node_network_receive_compressed_total{device="docker0"} 0 +node_network_receive_compressed_total{device="eth0"} 0 +node_network_receive_compressed_total{device="flannel.1"} 0 +node_network_receive_compressed_total{device="ibr10:30"} 0 +node_network_receive_compressed_total{device="lo"} 0 +node_network_receive_compressed_total{device="lxcbr0"} 0 +node_network_receive_compressed_total{device="tun0"} 0 +node_network_receive_compressed_total{device="veth4B09XN"} 0 +node_network_receive_compressed_total{device="wlan0"} 0 +node_network_receive_compressed_total{device="💩0"} 0 +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{device="docker0"} 0 +node_network_receive_drop_total{device="eth0"} 0 +node_network_receive_drop_total{device="flannel.1"} 0 +node_network_receive_drop_total{device="ibr10:30"} 0 +node_network_receive_drop_total{device="lo"} 0 +node_network_receive_drop_total{device="lxcbr0"} 0 +node_network_receive_drop_total{device="tun0"} 0 +node_network_receive_drop_total{device="veth4B09XN"} 0 +node_network_receive_drop_total{device="wlan0"} 0 +node_network_receive_drop_total{device="💩0"} 0 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{device="docker0"} 0 +node_network_receive_errs_total{device="eth0"} 0 +node_network_receive_errs_total{device="flannel.1"} 0 +node_network_receive_errs_total{device="ibr10:30"} 0 +node_network_receive_errs_total{device="lo"} 0 +node_network_receive_errs_total{device="lxcbr0"} 0 +node_network_receive_errs_total{device="tun0"} 0 +node_network_receive_errs_total{device="veth4B09XN"} 0 +node_network_receive_errs_total{device="wlan0"} 0 +node_network_receive_errs_total{device="💩0"} 0 +# HELP node_network_receive_fifo_total Network device statistic receive_fifo. +# TYPE node_network_receive_fifo_total counter +node_network_receive_fifo_total{device="docker0"} 0 +node_network_receive_fifo_total{device="eth0"} 0 +node_network_receive_fifo_total{device="flannel.1"} 0 +node_network_receive_fifo_total{device="ibr10:30"} 0 +node_network_receive_fifo_total{device="lo"} 0 +node_network_receive_fifo_total{device="lxcbr0"} 0 +node_network_receive_fifo_total{device="tun0"} 0 +node_network_receive_fifo_total{device="veth4B09XN"} 0 +node_network_receive_fifo_total{device="wlan0"} 0 +node_network_receive_fifo_total{device="💩0"} 0 +# HELP node_network_receive_frame_total Network device statistic receive_frame. +# TYPE node_network_receive_frame_total counter +node_network_receive_frame_total{device="docker0"} 0 +node_network_receive_frame_total{device="eth0"} 0 +node_network_receive_frame_total{device="flannel.1"} 0 +node_network_receive_frame_total{device="ibr10:30"} 0 +node_network_receive_frame_total{device="lo"} 0 +node_network_receive_frame_total{device="lxcbr0"} 0 +node_network_receive_frame_total{device="tun0"} 0 +node_network_receive_frame_total{device="veth4B09XN"} 0 +node_network_receive_frame_total{device="wlan0"} 0 +node_network_receive_frame_total{device="💩0"} 0 +# HELP node_network_receive_multicast_total Network device statistic receive_multicast. +# TYPE node_network_receive_multicast_total counter +node_network_receive_multicast_total{device="docker0"} 0 +node_network_receive_multicast_total{device="eth0"} 0 +node_network_receive_multicast_total{device="flannel.1"} 0 +node_network_receive_multicast_total{device="ibr10:30"} 0 +node_network_receive_multicast_total{device="lo"} 0 +node_network_receive_multicast_total{device="lxcbr0"} 0 +node_network_receive_multicast_total{device="tun0"} 0 +node_network_receive_multicast_total{device="veth4B09XN"} 0 +node_network_receive_multicast_total{device="wlan0"} 0 +node_network_receive_multicast_total{device="💩0"} 72 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +node_network_receive_packets_total{device="docker0"} 1.065585e+06 +node_network_receive_packets_total{device="eth0"} 5.20993275e+08 +node_network_receive_packets_total{device="flannel.1"} 2.28499337e+08 +node_network_receive_packets_total{device="ibr10:30"} 0 +node_network_receive_packets_total{device="lo"} 1.832522e+06 +node_network_receive_packets_total{device="lxcbr0"} 0 +node_network_receive_packets_total{device="tun0"} 24 +node_network_receive_packets_total{device="veth4B09XN"} 8 +node_network_receive_packets_total{device="wlan0"} 1.3899359e+07 +node_network_receive_packets_total{device="💩0"} 105557 +# HELP node_network_speed_bytes speed_bytes value of /sys/class/net/. +# TYPE node_network_speed_bytes gauge +node_network_speed_bytes{device="eth0"} 1.25e+08 +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +node_network_transmit_bytes_total{device="docker0"} 2.681662018e+09 +node_network_transmit_bytes_total{device="eth0"} 9.315587528e+09 +node_network_transmit_bytes_total{device="flannel.1"} 2.0758990068e+10 +node_network_transmit_bytes_total{device="ibr10:30"} 0 +node_network_transmit_bytes_total{device="lo"} 4.35303245e+08 +node_network_transmit_bytes_total{device="lxcbr0"} 2.630299e+06 +node_network_transmit_bytes_total{device="tun0"} 67120 +node_network_transmit_bytes_total{device="veth4B09XN"} 1.943284e+06 +node_network_transmit_bytes_total{device="wlan0"} 2.85164936e+09 +node_network_transmit_bytes_total{device="💩0"} 4.04570255e+08 +# HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. +# TYPE node_network_transmit_carrier_total counter +node_network_transmit_carrier_total{device="docker0"} 0 +node_network_transmit_carrier_total{device="eth0"} 0 +node_network_transmit_carrier_total{device="flannel.1"} 0 +node_network_transmit_carrier_total{device="ibr10:30"} 0 +node_network_transmit_carrier_total{device="lo"} 0 +node_network_transmit_carrier_total{device="lxcbr0"} 0 +node_network_transmit_carrier_total{device="tun0"} 0 +node_network_transmit_carrier_total{device="veth4B09XN"} 0 +node_network_transmit_carrier_total{device="wlan0"} 0 +node_network_transmit_carrier_total{device="💩0"} 0 +# HELP node_network_transmit_colls_total Network device statistic transmit_colls. +# TYPE node_network_transmit_colls_total counter +node_network_transmit_colls_total{device="docker0"} 0 +node_network_transmit_colls_total{device="eth0"} 0 +node_network_transmit_colls_total{device="flannel.1"} 0 +node_network_transmit_colls_total{device="ibr10:30"} 0 +node_network_transmit_colls_total{device="lo"} 0 +node_network_transmit_colls_total{device="lxcbr0"} 0 +node_network_transmit_colls_total{device="tun0"} 0 +node_network_transmit_colls_total{device="veth4B09XN"} 0 +node_network_transmit_colls_total{device="wlan0"} 0 +node_network_transmit_colls_total{device="💩0"} 0 +# HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. +# TYPE node_network_transmit_compressed_total counter +node_network_transmit_compressed_total{device="docker0"} 0 +node_network_transmit_compressed_total{device="eth0"} 0 +node_network_transmit_compressed_total{device="flannel.1"} 0 +node_network_transmit_compressed_total{device="ibr10:30"} 0 +node_network_transmit_compressed_total{device="lo"} 0 +node_network_transmit_compressed_total{device="lxcbr0"} 0 +node_network_transmit_compressed_total{device="tun0"} 0 +node_network_transmit_compressed_total{device="veth4B09XN"} 0 +node_network_transmit_compressed_total{device="wlan0"} 0 +node_network_transmit_compressed_total{device="💩0"} 0 +# HELP node_network_transmit_drop_total Network device statistic transmit_drop. +# TYPE node_network_transmit_drop_total counter +node_network_transmit_drop_total{device="docker0"} 0 +node_network_transmit_drop_total{device="eth0"} 0 +node_network_transmit_drop_total{device="flannel.1"} 64 +node_network_transmit_drop_total{device="ibr10:30"} 0 +node_network_transmit_drop_total{device="lo"} 0 +node_network_transmit_drop_total{device="lxcbr0"} 0 +node_network_transmit_drop_total{device="tun0"} 0 +node_network_transmit_drop_total{device="veth4B09XN"} 0 +node_network_transmit_drop_total{device="wlan0"} 0 +node_network_transmit_drop_total{device="💩0"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{device="docker0"} 0 +node_network_transmit_errs_total{device="eth0"} 0 +node_network_transmit_errs_total{device="flannel.1"} 0 +node_network_transmit_errs_total{device="ibr10:30"} 0 +node_network_transmit_errs_total{device="lo"} 0 +node_network_transmit_errs_total{device="lxcbr0"} 0 +node_network_transmit_errs_total{device="tun0"} 0 +node_network_transmit_errs_total{device="veth4B09XN"} 0 +node_network_transmit_errs_total{device="wlan0"} 0 +node_network_transmit_errs_total{device="💩0"} 0 +# HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. +# TYPE node_network_transmit_fifo_total counter +node_network_transmit_fifo_total{device="docker0"} 0 +node_network_transmit_fifo_total{device="eth0"} 0 +node_network_transmit_fifo_total{device="flannel.1"} 0 +node_network_transmit_fifo_total{device="ibr10:30"} 0 +node_network_transmit_fifo_total{device="lo"} 0 +node_network_transmit_fifo_total{device="lxcbr0"} 0 +node_network_transmit_fifo_total{device="tun0"} 0 +node_network_transmit_fifo_total{device="veth4B09XN"} 0 +node_network_transmit_fifo_total{device="wlan0"} 0 +node_network_transmit_fifo_total{device="💩0"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +node_network_transmit_packets_total{device="docker0"} 1.929779e+06 +node_network_transmit_packets_total{device="eth0"} 4.3451486e+07 +node_network_transmit_packets_total{device="flannel.1"} 2.58369223e+08 +node_network_transmit_packets_total{device="ibr10:30"} 0 +node_network_transmit_packets_total{device="lo"} 1.832522e+06 +node_network_transmit_packets_total{device="lxcbr0"} 28339 +node_network_transmit_packets_total{device="tun0"} 934 +node_network_transmit_packets_total{device="veth4B09XN"} 10640 +node_network_transmit_packets_total{device="wlan0"} 1.17262e+07 +node_network_transmit_packets_total{device="💩0"} 304261 +# HELP node_network_transmit_queue_length transmit_queue_length value of /sys/class/net/. +# TYPE node_network_transmit_queue_length gauge +node_network_transmit_queue_length{device="eth0"} 1000 +# HELP node_network_up Value is 1 if operstate is 'up', 0 otherwise. +# TYPE node_network_up gauge +node_network_up{device="eth0"} 1 +# HELP node_nf_conntrack_entries Number of currently allocated flow entries for connection tracking. +# TYPE node_nf_conntrack_entries gauge +node_nf_conntrack_entries 123 +# HELP node_nf_conntrack_entries_limit Maximum size of connection tracking table. +# TYPE node_nf_conntrack_entries_limit gauge +node_nf_conntrack_entries_limit 65536 +# HELP node_nfs_connections_total Total number of NFSd TCP connections. +# TYPE node_nfs_connections_total counter +node_nfs_connections_total 45 +# HELP node_nfs_packets_total Total NFSd network packets (sent+received) by protocol type. +# TYPE node_nfs_packets_total counter +node_nfs_packets_total{protocol="tcp"} 69 +node_nfs_packets_total{protocol="udp"} 70 +# HELP node_nfs_requests_total Number of NFS procedures invoked. +# TYPE node_nfs_requests_total counter +node_nfs_requests_total{method="Access",proto="3"} 1.17661341e+08 +node_nfs_requests_total{method="Access",proto="4"} 58 +node_nfs_requests_total{method="Allocate",proto="4"} 0 +node_nfs_requests_total{method="BindConnToSession",proto="4"} 0 +node_nfs_requests_total{method="Clone",proto="4"} 0 +node_nfs_requests_total{method="Close",proto="4"} 28 +node_nfs_requests_total{method="Commit",proto="3"} 23729 +node_nfs_requests_total{method="Commit",proto="4"} 83 +node_nfs_requests_total{method="Create",proto="2"} 52 +node_nfs_requests_total{method="Create",proto="3"} 2.993289e+06 +node_nfs_requests_total{method="Create",proto="4"} 15 +node_nfs_requests_total{method="CreateSession",proto="4"} 32 +node_nfs_requests_total{method="DeAllocate",proto="4"} 0 +node_nfs_requests_total{method="DelegReturn",proto="4"} 97 +node_nfs_requests_total{method="DestroyClientID",proto="4"} 0 +node_nfs_requests_total{method="DestroySession",proto="4"} 67 +node_nfs_requests_total{method="ExchangeID",proto="4"} 58 +node_nfs_requests_total{method="FreeStateID",proto="4"} 0 +node_nfs_requests_total{method="FsInfo",proto="3"} 2 +node_nfs_requests_total{method="FsInfo",proto="4"} 68 +node_nfs_requests_total{method="FsLocations",proto="4"} 32 +node_nfs_requests_total{method="FsStat",proto="2"} 82 +node_nfs_requests_total{method="FsStat",proto="3"} 13332 +node_nfs_requests_total{method="FsidPresent",proto="4"} 11 +node_nfs_requests_total{method="GetACL",proto="4"} 36 +node_nfs_requests_total{method="GetAttr",proto="2"} 57 +node_nfs_requests_total{method="GetAttr",proto="3"} 1.061909262e+09 +node_nfs_requests_total{method="GetDeviceInfo",proto="4"} 1 +node_nfs_requests_total{method="GetDeviceList",proto="4"} 0 +node_nfs_requests_total{method="GetLeaseTime",proto="4"} 28 +node_nfs_requests_total{method="Getattr",proto="4"} 88 +node_nfs_requests_total{method="LayoutCommit",proto="4"} 26 +node_nfs_requests_total{method="LayoutGet",proto="4"} 90 +node_nfs_requests_total{method="LayoutReturn",proto="4"} 0 +node_nfs_requests_total{method="LayoutStats",proto="4"} 0 +node_nfs_requests_total{method="Link",proto="2"} 17 +node_nfs_requests_total{method="Link",proto="3"} 0 +node_nfs_requests_total{method="Link",proto="4"} 21 +node_nfs_requests_total{method="Lock",proto="4"} 39 +node_nfs_requests_total{method="Lockt",proto="4"} 68 +node_nfs_requests_total{method="Locku",proto="4"} 59 +node_nfs_requests_total{method="Lookup",proto="2"} 71 +node_nfs_requests_total{method="Lookup",proto="3"} 4.077635e+06 +node_nfs_requests_total{method="Lookup",proto="4"} 29 +node_nfs_requests_total{method="LookupRoot",proto="4"} 74 +node_nfs_requests_total{method="MkDir",proto="2"} 50 +node_nfs_requests_total{method="MkDir",proto="3"} 590 +node_nfs_requests_total{method="MkNod",proto="3"} 0 +node_nfs_requests_total{method="Null",proto="2"} 16 +node_nfs_requests_total{method="Null",proto="3"} 0 +node_nfs_requests_total{method="Null",proto="4"} 98 +node_nfs_requests_total{method="Open",proto="4"} 85 +node_nfs_requests_total{method="OpenConfirm",proto="4"} 23 +node_nfs_requests_total{method="OpenDowngrade",proto="4"} 1 +node_nfs_requests_total{method="OpenNoattr",proto="4"} 24 +node_nfs_requests_total{method="PathConf",proto="3"} 1 +node_nfs_requests_total{method="Pathconf",proto="4"} 53 +node_nfs_requests_total{method="Read",proto="2"} 45 +node_nfs_requests_total{method="Read",proto="3"} 2.9391916e+07 +node_nfs_requests_total{method="Read",proto="4"} 51 +node_nfs_requests_total{method="ReadDir",proto="2"} 70 +node_nfs_requests_total{method="ReadDir",proto="3"} 3983 +node_nfs_requests_total{method="ReadDir",proto="4"} 66 +node_nfs_requests_total{method="ReadDirPlus",proto="3"} 92385 +node_nfs_requests_total{method="ReadLink",proto="2"} 73 +node_nfs_requests_total{method="ReadLink",proto="3"} 5 +node_nfs_requests_total{method="ReadLink",proto="4"} 54 +node_nfs_requests_total{method="ReclaimComplete",proto="4"} 35 +node_nfs_requests_total{method="ReleaseLockowner",proto="4"} 85 +node_nfs_requests_total{method="Remove",proto="2"} 83 +node_nfs_requests_total{method="Remove",proto="3"} 7815 +node_nfs_requests_total{method="Remove",proto="4"} 69 +node_nfs_requests_total{method="Rename",proto="2"} 61 +node_nfs_requests_total{method="Rename",proto="3"} 1130 +node_nfs_requests_total{method="Rename",proto="4"} 96 +node_nfs_requests_total{method="Renew",proto="4"} 83 +node_nfs_requests_total{method="RmDir",proto="2"} 23 +node_nfs_requests_total{method="RmDir",proto="3"} 15 +node_nfs_requests_total{method="Root",proto="2"} 52 +node_nfs_requests_total{method="Secinfo",proto="4"} 81 +node_nfs_requests_total{method="SecinfoNoName",proto="4"} 0 +node_nfs_requests_total{method="Seek",proto="4"} 0 +node_nfs_requests_total{method="Sequence",proto="4"} 13 +node_nfs_requests_total{method="ServerCaps",proto="4"} 56 +node_nfs_requests_total{method="SetACL",proto="4"} 49 +node_nfs_requests_total{method="SetAttr",proto="2"} 74 +node_nfs_requests_total{method="SetAttr",proto="3"} 48906 +node_nfs_requests_total{method="SetClientID",proto="4"} 12 +node_nfs_requests_total{method="SetClientIDConfirm",proto="4"} 84 +node_nfs_requests_total{method="Setattr",proto="4"} 73 +node_nfs_requests_total{method="StatFs",proto="4"} 86 +node_nfs_requests_total{method="SymLink",proto="2"} 53 +node_nfs_requests_total{method="SymLink",proto="3"} 0 +node_nfs_requests_total{method="Symlink",proto="4"} 84 +node_nfs_requests_total{method="TestStateID",proto="4"} 0 +node_nfs_requests_total{method="WrCache",proto="2"} 86 +node_nfs_requests_total{method="Write",proto="2"} 0 +node_nfs_requests_total{method="Write",proto="3"} 2.570425e+06 +node_nfs_requests_total{method="Write",proto="4"} 54 +# HELP node_nfs_rpc_authentication_refreshes_total Number of RPC authentication refreshes performed. +# TYPE node_nfs_rpc_authentication_refreshes_total counter +node_nfs_rpc_authentication_refreshes_total 1.218815394e+09 +# HELP node_nfs_rpc_retransmissions_total Number of RPC transmissions performed. +# TYPE node_nfs_rpc_retransmissions_total counter +node_nfs_rpc_retransmissions_total 374636 +# HELP node_nfs_rpcs_total Total number of RPCs performed. +# TYPE node_nfs_rpcs_total counter +node_nfs_rpcs_total 1.218785755e+09 +# HELP node_nfsd_connections_total Total number of NFSd TCP connections. +# TYPE node_nfsd_connections_total counter +node_nfsd_connections_total 1 +# HELP node_nfsd_disk_bytes_read_total Total NFSd bytes read. +# TYPE node_nfsd_disk_bytes_read_total counter +node_nfsd_disk_bytes_read_total 1.572864e+08 +# HELP node_nfsd_disk_bytes_written_total Total NFSd bytes written. +# TYPE node_nfsd_disk_bytes_written_total counter +node_nfsd_disk_bytes_written_total 72864 +# HELP node_nfsd_file_handles_stale_total Total number of NFSd stale file handles +# TYPE node_nfsd_file_handles_stale_total counter +node_nfsd_file_handles_stale_total 0 +# HELP node_nfsd_packets_total Total NFSd network packets (sent+received) by protocol type. +# TYPE node_nfsd_packets_total counter +node_nfsd_packets_total{proto="tcp"} 917 +node_nfsd_packets_total{proto="udp"} 55 +# HELP node_nfsd_read_ahead_cache_not_found_total Total number of NFSd read ahead cache not found. +# TYPE node_nfsd_read_ahead_cache_not_found_total counter +node_nfsd_read_ahead_cache_not_found_total 0 +# HELP node_nfsd_read_ahead_cache_size_blocks How large the read ahead cache is in blocks. +# TYPE node_nfsd_read_ahead_cache_size_blocks gauge +node_nfsd_read_ahead_cache_size_blocks 32 +# HELP node_nfsd_reply_cache_hits_total Total number of NFSd Reply Cache hits (client lost server response). +# TYPE node_nfsd_reply_cache_hits_total counter +node_nfsd_reply_cache_hits_total 0 +# HELP node_nfsd_reply_cache_misses_total Total number of NFSd Reply Cache an operation that requires caching (idempotent). +# TYPE node_nfsd_reply_cache_misses_total counter +node_nfsd_reply_cache_misses_total 6 +# HELP node_nfsd_reply_cache_nocache_total Total number of NFSd Reply Cache non-idempotent operations (rename/delete/…). +# TYPE node_nfsd_reply_cache_nocache_total counter +node_nfsd_reply_cache_nocache_total 18622 +# HELP node_nfsd_requests_total Total number NFSd Requests by method and protocol. +# TYPE node_nfsd_requests_total counter +node_nfsd_requests_total{method="Access",proto="3"} 111 +node_nfsd_requests_total{method="Access",proto="4"} 1098 +node_nfsd_requests_total{method="Close",proto="4"} 2 +node_nfsd_requests_total{method="Commit",proto="3"} 0 +node_nfsd_requests_total{method="Commit",proto="4"} 0 +node_nfsd_requests_total{method="Create",proto="2"} 0 +node_nfsd_requests_total{method="Create",proto="3"} 0 +node_nfsd_requests_total{method="Create",proto="4"} 0 +node_nfsd_requests_total{method="DelegPurge",proto="4"} 0 +node_nfsd_requests_total{method="DelegReturn",proto="4"} 0 +node_nfsd_requests_total{method="FsInfo",proto="3"} 2 +node_nfsd_requests_total{method="FsStat",proto="2"} 2 +node_nfsd_requests_total{method="FsStat",proto="3"} 0 +node_nfsd_requests_total{method="GetAttr",proto="2"} 69 +node_nfsd_requests_total{method="GetAttr",proto="3"} 112 +node_nfsd_requests_total{method="GetAttr",proto="4"} 8179 +node_nfsd_requests_total{method="GetFH",proto="4"} 5896 +node_nfsd_requests_total{method="Link",proto="2"} 0 +node_nfsd_requests_total{method="Link",proto="3"} 0 +node_nfsd_requests_total{method="Link",proto="4"} 0 +node_nfsd_requests_total{method="Lock",proto="4"} 0 +node_nfsd_requests_total{method="Lockt",proto="4"} 0 +node_nfsd_requests_total{method="Locku",proto="4"} 0 +node_nfsd_requests_total{method="Lookup",proto="2"} 4410 +node_nfsd_requests_total{method="Lookup",proto="3"} 2719 +node_nfsd_requests_total{method="Lookup",proto="4"} 5900 +node_nfsd_requests_total{method="LookupRoot",proto="4"} 0 +node_nfsd_requests_total{method="MkDir",proto="2"} 0 +node_nfsd_requests_total{method="MkDir",proto="3"} 0 +node_nfsd_requests_total{method="MkNod",proto="3"} 0 +node_nfsd_requests_total{method="Nverify",proto="4"} 0 +node_nfsd_requests_total{method="Open",proto="4"} 2 +node_nfsd_requests_total{method="OpenAttr",proto="4"} 0 +node_nfsd_requests_total{method="OpenConfirm",proto="4"} 2 +node_nfsd_requests_total{method="OpenDgrd",proto="4"} 0 +node_nfsd_requests_total{method="PathConf",proto="3"} 1 +node_nfsd_requests_total{method="PutFH",proto="4"} 9609 +node_nfsd_requests_total{method="Read",proto="2"} 0 +node_nfsd_requests_total{method="Read",proto="3"} 0 +node_nfsd_requests_total{method="Read",proto="4"} 150 +node_nfsd_requests_total{method="ReadDir",proto="2"} 99 +node_nfsd_requests_total{method="ReadDir",proto="3"} 27 +node_nfsd_requests_total{method="ReadDir",proto="4"} 1272 +node_nfsd_requests_total{method="ReadDirPlus",proto="3"} 216 +node_nfsd_requests_total{method="ReadLink",proto="2"} 0 +node_nfsd_requests_total{method="ReadLink",proto="3"} 0 +node_nfsd_requests_total{method="ReadLink",proto="4"} 0 +node_nfsd_requests_total{method="RelLockOwner",proto="4"} 0 +node_nfsd_requests_total{method="Remove",proto="2"} 0 +node_nfsd_requests_total{method="Remove",proto="3"} 0 +node_nfsd_requests_total{method="Remove",proto="4"} 0 +node_nfsd_requests_total{method="Rename",proto="2"} 0 +node_nfsd_requests_total{method="Rename",proto="3"} 0 +node_nfsd_requests_total{method="Rename",proto="4"} 0 +node_nfsd_requests_total{method="Renew",proto="4"} 1236 +node_nfsd_requests_total{method="RestoreFH",proto="4"} 0 +node_nfsd_requests_total{method="RmDir",proto="2"} 0 +node_nfsd_requests_total{method="RmDir",proto="3"} 0 +node_nfsd_requests_total{method="Root",proto="2"} 0 +node_nfsd_requests_total{method="SaveFH",proto="4"} 0 +node_nfsd_requests_total{method="SecInfo",proto="4"} 0 +node_nfsd_requests_total{method="SetAttr",proto="2"} 0 +node_nfsd_requests_total{method="SetAttr",proto="3"} 0 +node_nfsd_requests_total{method="SetAttr",proto="4"} 0 +node_nfsd_requests_total{method="SymLink",proto="2"} 0 +node_nfsd_requests_total{method="SymLink",proto="3"} 0 +node_nfsd_requests_total{method="Verify",proto="4"} 3 +node_nfsd_requests_total{method="WrCache",proto="2"} 0 +node_nfsd_requests_total{method="Write",proto="2"} 0 +node_nfsd_requests_total{method="Write",proto="3"} 0 +node_nfsd_requests_total{method="Write",proto="4"} 3 +# HELP node_nfsd_rpc_errors_total Total number of NFSd RPC errors by error type. +# TYPE node_nfsd_rpc_errors_total counter +node_nfsd_rpc_errors_total{error="auth"} 2 +node_nfsd_rpc_errors_total{error="cInt"} 0 +node_nfsd_rpc_errors_total{error="fmt"} 1 +# HELP node_nfsd_server_rpcs_total Total number of NFSd RPCs. +# TYPE node_nfsd_server_rpcs_total counter +node_nfsd_server_rpcs_total 18628 +# HELP node_nfsd_server_threads Total number of NFSd kernel threads that are running. +# TYPE node_nfsd_server_threads gauge +node_nfsd_server_threads 8 +# HELP node_nvme_info Non-numeric data from /sys/class/nvme/, value is always 1. +# TYPE node_nvme_info gauge +node_nvme_info{device="nvme0",firmware_revision="1B2QEXP7",model="Samsung SSD 970 PRO 512GB",serial="S680HF8N190894I",state="live"} 1 +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_power_supply_capacity capacity value of /sys/class/power_supply/. +# TYPE node_power_supply_capacity gauge +node_power_supply_capacity{power_supply="BAT0"} 81 +# HELP node_power_supply_cyclecount cyclecount value of /sys/class/power_supply/. +# TYPE node_power_supply_cyclecount gauge +node_power_supply_cyclecount{power_supply="BAT0"} 0 +# HELP node_power_supply_energy_full energy_full value of /sys/class/power_supply/. +# TYPE node_power_supply_energy_full gauge +node_power_supply_energy_full{power_supply="BAT0"} 4.507e+07 +# HELP node_power_supply_energy_full_design energy_full_design value of /sys/class/power_supply/. +# TYPE node_power_supply_energy_full_design gauge +node_power_supply_energy_full_design{power_supply="BAT0"} 4.752e+07 +# HELP node_power_supply_energy_now energy_now value of /sys/class/power_supply/. +# TYPE node_power_supply_energy_now gauge +node_power_supply_energy_now{power_supply="BAT0"} 3.658e+07 +# HELP node_power_supply_info info of /sys/class/power_supply/. +# TYPE node_power_supply_info gauge +node_power_supply_info{power_supply="AC",type="Mains"} 1 +node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 +# HELP node_power_supply_online online value of /sys/class/power_supply/. +# TYPE node_power_supply_online gauge +node_power_supply_online{power_supply="AC"} 0 +# HELP node_power_supply_power_now power_now value of /sys/class/power_supply/. +# TYPE node_power_supply_power_now gauge +node_power_supply_power_now{power_supply="BAT0"} 5.002e+06 +# HELP node_power_supply_present present value of /sys/class/power_supply/. +# TYPE node_power_supply_present gauge +node_power_supply_present{power_supply="BAT0"} 1 +# HELP node_power_supply_voltage_min_design voltage_min_design value of /sys/class/power_supply/. +# TYPE node_power_supply_voltage_min_design gauge +node_power_supply_voltage_min_design{power_supply="BAT0"} 1.08e+07 +# HELP node_power_supply_voltage_now voltage_now value of /sys/class/power_supply/. +# TYPE node_power_supply_voltage_now gauge +node_power_supply_voltage_now{power_supply="BAT0"} 1.166e+07 +# HELP node_pressure_cpu_waiting_seconds_total Total time in seconds that processes have waited for CPU time +# TYPE node_pressure_cpu_waiting_seconds_total counter +node_pressure_cpu_waiting_seconds_total 14.036781000000001 +# HELP node_pressure_io_stalled_seconds_total Total time in seconds no process could make progress due to IO congestion +# TYPE node_pressure_io_stalled_seconds_total counter +node_pressure_io_stalled_seconds_total 159.229614 +# HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion +# TYPE node_pressure_io_waiting_seconds_total counter +node_pressure_io_waiting_seconds_total 159.886802 +# HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion +# TYPE node_pressure_memory_stalled_seconds_total counter +node_pressure_memory_stalled_seconds_total 0 +# HELP node_pressure_memory_waiting_seconds_total Total time in seconds that processes have waited for memory +# TYPE node_pressure_memory_waiting_seconds_total counter +node_pressure_memory_waiting_seconds_total 0 +# HELP node_processes_max_processes Number of max PIDs limit +# TYPE node_processes_max_processes gauge +node_processes_max_processes 123 +# HELP node_processes_max_threads Limit of threads in the system +# TYPE node_processes_max_threads gauge +node_processes_max_threads 7801 +# HELP node_processes_pids Number of PIDs +# TYPE node_processes_pids gauge +node_processes_pids 1 +# HELP node_processes_state Number of processes in each state. +# TYPE node_processes_state gauge +node_processes_state{state="S"} 1 +# HELP node_processes_threads Allocated threads in system +# TYPE node_processes_threads gauge +node_processes_threads 1 +# HELP node_procs_blocked Number of processes blocked waiting for I/O to complete. +# TYPE node_procs_blocked gauge +node_procs_blocked 0 +# HELP node_procs_running Number of processes in runnable state. +# TYPE node_procs_running gauge +node_procs_running 2 +# HELP node_qdisc_backlog Number of bytes currently in queue to be sent. +# TYPE node_qdisc_backlog gauge +node_qdisc_backlog{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_backlog{device="wlan0",kind="fq"} 0 +# HELP node_qdisc_bytes_total Number of bytes sent. +# TYPE node_qdisc_bytes_total counter +node_qdisc_bytes_total{device="eth0",kind="pfifo_fast"} 83 +node_qdisc_bytes_total{device="wlan0",kind="fq"} 42 +# HELP node_qdisc_current_queue_length Number of packets currently in queue to be sent. +# TYPE node_qdisc_current_queue_length gauge +node_qdisc_current_queue_length{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_current_queue_length{device="wlan0",kind="fq"} 0 +# HELP node_qdisc_drops_total Number of packets dropped. +# TYPE node_qdisc_drops_total counter +node_qdisc_drops_total{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_drops_total{device="wlan0",kind="fq"} 1 +# HELP node_qdisc_overlimits_total Number of overlimit packets. +# TYPE node_qdisc_overlimits_total counter +node_qdisc_overlimits_total{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_overlimits_total{device="wlan0",kind="fq"} 0 +# HELP node_qdisc_packets_total Number of packets sent. +# TYPE node_qdisc_packets_total counter +node_qdisc_packets_total{device="eth0",kind="pfifo_fast"} 83 +node_qdisc_packets_total{device="wlan0",kind="fq"} 42 +# HELP node_qdisc_requeues_total Number of packets dequeued, not transmitted, and requeued. +# TYPE node_qdisc_requeues_total counter +node_qdisc_requeues_total{device="eth0",kind="pfifo_fast"} 2 +node_qdisc_requeues_total{device="wlan0",kind="fq"} 1 +# HELP node_rapl_core_joules_total Current RAPL core value in joules +# TYPE node_rapl_core_joules_total counter +node_rapl_core_joules_total{index="0"} 118821.284256 +# HELP node_rapl_package_joules_total Current RAPL package value in joules +# TYPE node_rapl_package_joules_total counter +node_rapl_package_joules_total{index="0"} 240422.366267 +# HELP node_schedstat_running_seconds_total Number of seconds CPU spent running a process. +# TYPE node_schedstat_running_seconds_total counter +node_schedstat_running_seconds_total{cpu="0"} 2.045936778163039e+06 +node_schedstat_running_seconds_total{cpu="1"} 1.904686152592476e+06 +# HELP node_schedstat_timeslices_total Number of timeslices executed by CPU. +# TYPE node_schedstat_timeslices_total counter +node_schedstat_timeslices_total{cpu="0"} 4.767485306e+09 +node_schedstat_timeslices_total{cpu="1"} 5.145567945e+09 +# HELP node_schedstat_waiting_seconds_total Number of seconds spent by processing waiting for this CPU. +# TYPE node_schedstat_waiting_seconds_total counter +node_schedstat_waiting_seconds_total{cpu="0"} 343796.328169361 +node_schedstat_waiting_seconds_total{cpu="1"} 364107.263788241 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{collector="arp"} 1 +node_scrape_collector_success{collector="bcache"} 1 +node_scrape_collector_success{collector="bonding"} 1 +node_scrape_collector_success{collector="buddyinfo"} 1 +node_scrape_collector_success{collector="conntrack"} 1 +node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="cpufreq"} 1 +node_scrape_collector_success{collector="diskstats"} 1 +node_scrape_collector_success{collector="dmi"} 1 +node_scrape_collector_success{collector="drbd"} 1 +node_scrape_collector_success{collector="edac"} 1 +node_scrape_collector_success{collector="entropy"} 1 +node_scrape_collector_success{collector="filefd"} 1 +node_scrape_collector_success{collector="hwmon"} 1 +node_scrape_collector_success{collector="infiniband"} 1 +node_scrape_collector_success{collector="interrupts"} 1 +node_scrape_collector_success{collector="ipvs"} 1 +node_scrape_collector_success{collector="ksmd"} 1 +node_scrape_collector_success{collector="loadavg"} 1 +node_scrape_collector_success{collector="mdadm"} 1 +node_scrape_collector_success{collector="meminfo"} 1 +node_scrape_collector_success{collector="meminfo_numa"} 1 +node_scrape_collector_success{collector="mountstats"} 1 +node_scrape_collector_success{collector="netclass"} 1 +node_scrape_collector_success{collector="netdev"} 1 +node_scrape_collector_success{collector="netstat"} 1 +node_scrape_collector_success{collector="nfs"} 1 +node_scrape_collector_success{collector="nfsd"} 1 +node_scrape_collector_success{collector="nvme"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="powersupplyclass"} 1 +node_scrape_collector_success{collector="pressure"} 1 +node_scrape_collector_success{collector="processes"} 1 +node_scrape_collector_success{collector="qdisc"} 1 +node_scrape_collector_success{collector="rapl"} 1 +node_scrape_collector_success{collector="schedstat"} 1 +node_scrape_collector_success{collector="sockstat"} 1 +node_scrape_collector_success{collector="softnet"} 1 +node_scrape_collector_success{collector="stat"} 1 +node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="thermal_zone"} 1 +node_scrape_collector_success{collector="vmstat"} 1 +node_scrape_collector_success{collector="wifi"} 1 +node_scrape_collector_success{collector="xfs"} 1 +node_scrape_collector_success{collector="zfs"} 1 +# HELP node_sockstat_FRAG_inuse Number of FRAG sockets in state inuse. +# TYPE node_sockstat_FRAG_inuse gauge +node_sockstat_FRAG_inuse 0 +# HELP node_sockstat_FRAG_memory Number of FRAG sockets in state memory. +# TYPE node_sockstat_FRAG_memory gauge +node_sockstat_FRAG_memory 0 +# HELP node_sockstat_RAW_inuse Number of RAW sockets in state inuse. +# TYPE node_sockstat_RAW_inuse gauge +node_sockstat_RAW_inuse 0 +# HELP node_sockstat_TCP_alloc Number of TCP sockets in state alloc. +# TYPE node_sockstat_TCP_alloc gauge +node_sockstat_TCP_alloc 17 +# HELP node_sockstat_TCP_inuse Number of TCP sockets in state inuse. +# TYPE node_sockstat_TCP_inuse gauge +node_sockstat_TCP_inuse 4 +# HELP node_sockstat_TCP_mem Number of TCP sockets in state mem. +# TYPE node_sockstat_TCP_mem gauge +node_sockstat_TCP_mem 1 +# HELP node_sockstat_TCP_mem_bytes Number of TCP sockets in state mem_bytes. +# TYPE node_sockstat_TCP_mem_bytes gauge +node_sockstat_TCP_mem_bytes 65536 +# HELP node_sockstat_TCP_orphan Number of TCP sockets in state orphan. +# TYPE node_sockstat_TCP_orphan gauge +node_sockstat_TCP_orphan 0 +# HELP node_sockstat_TCP_tw Number of TCP sockets in state tw. +# TYPE node_sockstat_TCP_tw gauge +node_sockstat_TCP_tw 4 +# HELP node_sockstat_UDPLITE_inuse Number of UDPLITE sockets in state inuse. +# TYPE node_sockstat_UDPLITE_inuse gauge +node_sockstat_UDPLITE_inuse 0 +# HELP node_sockstat_UDP_inuse Number of UDP sockets in state inuse. +# TYPE node_sockstat_UDP_inuse gauge +node_sockstat_UDP_inuse 0 +# HELP node_sockstat_UDP_mem Number of UDP sockets in state mem. +# TYPE node_sockstat_UDP_mem gauge +node_sockstat_UDP_mem 0 +# HELP node_sockstat_UDP_mem_bytes Number of UDP sockets in state mem_bytes. +# TYPE node_sockstat_UDP_mem_bytes gauge +node_sockstat_UDP_mem_bytes 0 +# HELP node_sockstat_sockets_used Number of sockets sockets in state used. +# TYPE node_sockstat_sockets_used gauge +node_sockstat_sockets_used 229 +# HELP node_softnet_dropped_total Number of dropped packets +# TYPE node_softnet_dropped_total counter +node_softnet_dropped_total{cpu="0"} 0 +node_softnet_dropped_total{cpu="1"} 41 +node_softnet_dropped_total{cpu="2"} 0 +node_softnet_dropped_total{cpu="3"} 0 +# HELP node_softnet_processed_total Number of processed packets +# TYPE node_softnet_processed_total counter +node_softnet_processed_total{cpu="0"} 299641 +node_softnet_processed_total{cpu="1"} 916354 +node_softnet_processed_total{cpu="2"} 5.577791e+06 +node_softnet_processed_total{cpu="3"} 3.113785e+06 +# HELP node_softnet_times_squeezed_total Number of times processing packets ran out of quota +# TYPE node_softnet_times_squeezed_total counter +node_softnet_times_squeezed_total{cpu="0"} 1 +node_softnet_times_squeezed_total{cpu="1"} 10 +node_softnet_times_squeezed_total{cpu="2"} 85 +node_softnet_times_squeezed_total{cpu="3"} 50 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP node_thermal_zone_temp Zone temperature in Celsius +# TYPE node_thermal_zone_temp gauge +node_thermal_zone_temp{type="cpu-thermal",zone="0"} 12.376 +# HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. +# TYPE node_vmstat_oom_kill untyped +node_vmstat_oom_kill 0 +# HELP node_vmstat_pgfault /proc/vmstat information field pgfault. +# TYPE node_vmstat_pgfault untyped +node_vmstat_pgfault 2.320168809e+09 +# HELP node_vmstat_pgmajfault /proc/vmstat information field pgmajfault. +# TYPE node_vmstat_pgmajfault untyped +node_vmstat_pgmajfault 507162 +# HELP node_vmstat_pgpgin /proc/vmstat information field pgpgin. +# TYPE node_vmstat_pgpgin untyped +node_vmstat_pgpgin 7.344136e+06 +# HELP node_vmstat_pgpgout /proc/vmstat information field pgpgout. +# TYPE node_vmstat_pgpgout untyped +node_vmstat_pgpgout 1.541180581e+09 +# HELP node_vmstat_pswpin /proc/vmstat information field pswpin. +# TYPE node_vmstat_pswpin untyped +node_vmstat_pswpin 1476 +# HELP node_vmstat_pswpout /proc/vmstat information field pswpout. +# TYPE node_vmstat_pswpout untyped +node_vmstat_pswpout 35045 +# HELP node_wifi_interface_frequency_hertz The current frequency a WiFi interface is operating at, in hertz. +# TYPE node_wifi_interface_frequency_hertz gauge +node_wifi_interface_frequency_hertz{device="wlan0"} 2.412e+09 +node_wifi_interface_frequency_hertz{device="wlan1"} 2.412e+09 +# HELP node_wifi_station_beacon_loss_total The total number of times a station has detected a beacon loss. +# TYPE node_wifi_station_beacon_loss_total counter +node_wifi_station_beacon_loss_total{device="wlan0",mac_address="01:02:03:04:05:06"} 2 +node_wifi_station_beacon_loss_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1 +# HELP node_wifi_station_connected_seconds_total The total number of seconds a station has been connected to an access point. +# TYPE node_wifi_station_connected_seconds_total counter +node_wifi_station_connected_seconds_total{device="wlan0",mac_address="01:02:03:04:05:06"} 60 +node_wifi_station_connected_seconds_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 30 +# HELP node_wifi_station_inactive_seconds The number of seconds since any wireless activity has occurred on a station. +# TYPE node_wifi_station_inactive_seconds gauge +node_wifi_station_inactive_seconds{device="wlan0",mac_address="01:02:03:04:05:06"} 0.8 +node_wifi_station_inactive_seconds{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0.4 +# HELP node_wifi_station_info Labeled WiFi interface station information as provided by the operating system. +# TYPE node_wifi_station_info gauge +node_wifi_station_info{bssid="00:11:22:33:44:55",device="wlan0",mode="client",ssid="Example"} 1 +# HELP node_wifi_station_receive_bits_per_second The current WiFi receive bitrate of a station, in bits per second. +# TYPE node_wifi_station_receive_bits_per_second gauge +node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 2.56e+08 +node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.28e+08 +# HELP node_wifi_station_receive_bytes_total The total number of bytes received by a WiFi station. +# TYPE node_wifi_station_receive_bytes_total counter +node_wifi_station_receive_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_receive_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 +# HELP node_wifi_station_signal_dbm The current WiFi signal strength, in decibel-milliwatts (dBm). +# TYPE node_wifi_station_signal_dbm gauge +node_wifi_station_signal_dbm{device="wlan0",mac_address="01:02:03:04:05:06"} -26 +node_wifi_station_signal_dbm{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} -52 +# HELP node_wifi_station_transmit_bits_per_second The current WiFi transmit bitrate of a station, in bits per second. +# TYPE node_wifi_station_transmit_bits_per_second gauge +node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 3.28e+08 +node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.64e+08 +# HELP node_wifi_station_transmit_bytes_total The total number of bytes transmitted by a WiFi station. +# TYPE node_wifi_station_transmit_bytes_total counter +node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 +# HELP node_wifi_station_transmit_failed_total The total number of times a station has failed to send a packet. +# TYPE node_wifi_station_transmit_failed_total counter +node_wifi_station_transmit_failed_total{device="wlan0",mac_address="01:02:03:04:05:06"} 4 +node_wifi_station_transmit_failed_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 2 +# HELP node_wifi_station_transmit_retries_total The total number of times a station has had to retry while sending a packet. +# TYPE node_wifi_station_transmit_retries_total counter +node_wifi_station_transmit_retries_total{device="wlan0",mac_address="01:02:03:04:05:06"} 20 +node_wifi_station_transmit_retries_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 10 +# HELP node_xfs_allocation_btree_compares_total Number of allocation B-tree compares for a filesystem. +# TYPE node_xfs_allocation_btree_compares_total counter +node_xfs_allocation_btree_compares_total{device="sda1"} 0 +# HELP node_xfs_allocation_btree_lookups_total Number of allocation B-tree lookups for a filesystem. +# TYPE node_xfs_allocation_btree_lookups_total counter +node_xfs_allocation_btree_lookups_total{device="sda1"} 0 +# HELP node_xfs_allocation_btree_records_deleted_total Number of allocation B-tree records deleted for a filesystem. +# TYPE node_xfs_allocation_btree_records_deleted_total counter +node_xfs_allocation_btree_records_deleted_total{device="sda1"} 0 +# HELP node_xfs_allocation_btree_records_inserted_total Number of allocation B-tree records inserted for a filesystem. +# TYPE node_xfs_allocation_btree_records_inserted_total counter +node_xfs_allocation_btree_records_inserted_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_compares_total Number of block map B-tree compares for a filesystem. +# TYPE node_xfs_block_map_btree_compares_total counter +node_xfs_block_map_btree_compares_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_lookups_total Number of block map B-tree lookups for a filesystem. +# TYPE node_xfs_block_map_btree_lookups_total counter +node_xfs_block_map_btree_lookups_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_records_deleted_total Number of block map B-tree records deleted for a filesystem. +# TYPE node_xfs_block_map_btree_records_deleted_total counter +node_xfs_block_map_btree_records_deleted_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_records_inserted_total Number of block map B-tree records inserted for a filesystem. +# TYPE node_xfs_block_map_btree_records_inserted_total counter +node_xfs_block_map_btree_records_inserted_total{device="sda1"} 0 +# HELP node_xfs_block_mapping_extent_list_compares_total Number of extent list compares for a filesystem. +# TYPE node_xfs_block_mapping_extent_list_compares_total counter +node_xfs_block_mapping_extent_list_compares_total{device="sda1"} 0 +# HELP node_xfs_block_mapping_extent_list_deletions_total Number of extent list deletions for a filesystem. +# TYPE node_xfs_block_mapping_extent_list_deletions_total counter +node_xfs_block_mapping_extent_list_deletions_total{device="sda1"} 1 +# HELP node_xfs_block_mapping_extent_list_insertions_total Number of extent list insertions for a filesystem. +# TYPE node_xfs_block_mapping_extent_list_insertions_total counter +node_xfs_block_mapping_extent_list_insertions_total{device="sda1"} 1 +# HELP node_xfs_block_mapping_extent_list_lookups_total Number of extent list lookups for a filesystem. +# TYPE node_xfs_block_mapping_extent_list_lookups_total counter +node_xfs_block_mapping_extent_list_lookups_total{device="sda1"} 91 +# HELP node_xfs_block_mapping_reads_total Number of block map for read operations for a filesystem. +# TYPE node_xfs_block_mapping_reads_total counter +node_xfs_block_mapping_reads_total{device="sda1"} 61 +# HELP node_xfs_block_mapping_unmaps_total Number of block unmaps (deletes) for a filesystem. +# TYPE node_xfs_block_mapping_unmaps_total counter +node_xfs_block_mapping_unmaps_total{device="sda1"} 1 +# HELP node_xfs_block_mapping_writes_total Number of block map for write operations for a filesystem. +# TYPE node_xfs_block_mapping_writes_total counter +node_xfs_block_mapping_writes_total{device="sda1"} 29 +# HELP node_xfs_directory_operation_create_total Number of times a new directory entry was created for a filesystem. +# TYPE node_xfs_directory_operation_create_total counter +node_xfs_directory_operation_create_total{device="sda1"} 2 +# HELP node_xfs_directory_operation_getdents_total Number of times the directory getdents operation was performed for a filesystem. +# TYPE node_xfs_directory_operation_getdents_total counter +node_xfs_directory_operation_getdents_total{device="sda1"} 52 +# HELP node_xfs_directory_operation_lookup_total Number of file name directory lookups which miss the operating systems directory name lookup cache. +# TYPE node_xfs_directory_operation_lookup_total counter +node_xfs_directory_operation_lookup_total{device="sda1"} 3 +# HELP node_xfs_directory_operation_remove_total Number of times an existing directory entry was created for a filesystem. +# TYPE node_xfs_directory_operation_remove_total counter +node_xfs_directory_operation_remove_total{device="sda1"} 1 +# HELP node_xfs_extent_allocation_blocks_allocated_total Number of blocks allocated for a filesystem. +# TYPE node_xfs_extent_allocation_blocks_allocated_total counter +node_xfs_extent_allocation_blocks_allocated_total{device="sda1"} 872 +# HELP node_xfs_extent_allocation_blocks_freed_total Number of blocks freed for a filesystem. +# TYPE node_xfs_extent_allocation_blocks_freed_total counter +node_xfs_extent_allocation_blocks_freed_total{device="sda1"} 0 +# HELP node_xfs_extent_allocation_extents_allocated_total Number of extents allocated for a filesystem. +# TYPE node_xfs_extent_allocation_extents_allocated_total counter +node_xfs_extent_allocation_extents_allocated_total{device="sda1"} 1 +# HELP node_xfs_extent_allocation_extents_freed_total Number of extents freed for a filesystem. +# TYPE node_xfs_extent_allocation_extents_freed_total counter +node_xfs_extent_allocation_extents_freed_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_attempts_total Number of times the OS looked for an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_attempts_total counter +node_xfs_inode_operation_attempts_total{device="sda1"} 5 +# HELP node_xfs_inode_operation_attribute_changes_total Number of times the OS explicitly changed the attributes of an XFS inode. +# TYPE node_xfs_inode_operation_attribute_changes_total counter +node_xfs_inode_operation_attribute_changes_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_duplicates_total Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process. +# TYPE node_xfs_inode_operation_duplicates_total counter +node_xfs_inode_operation_duplicates_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_found_total Number of times the OS looked for and found an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_found_total counter +node_xfs_inode_operation_found_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_missed_total Number of times the OS looked for an XFS inode in the cache, but did not find it. +# TYPE node_xfs_inode_operation_missed_total counter +node_xfs_inode_operation_missed_total{device="sda1"} 4 +# HELP node_xfs_inode_operation_reclaims_total Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose. +# TYPE node_xfs_inode_operation_reclaims_total counter +node_xfs_inode_operation_reclaims_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_recycled_total Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled. +# TYPE node_xfs_inode_operation_recycled_total counter +node_xfs_inode_operation_recycled_total{device="sda1"} 0 +# HELP node_xfs_read_calls_total Number of read(2) system calls made to files in a filesystem. +# HELP node_xfs_read_calls_total Number of read(2) system calls made to files in a filesystem. +# TYPE node_xfs_read_calls_total counter +node_xfs_read_calls_total{device="sda1"} 28 +# HELP node_xfs_vnode_active_total Number of vnodes not on free lists for a filesystem. +# TYPE node_xfs_vnode_active_total counter +node_xfs_vnode_active_total{device="sda1"} 4 +# HELP node_xfs_vnode_allocate_total Number of times vn_alloc called for a filesystem. +# TYPE node_xfs_vnode_allocate_total counter +node_xfs_vnode_allocate_total{device="sda1"} 0 +# HELP node_xfs_vnode_get_total Number of times vn_get called for a filesystem. +# TYPE node_xfs_vnode_get_total counter +node_xfs_vnode_get_total{device="sda1"} 0 +# HELP node_xfs_vnode_hold_total Number of times vn_hold called for a filesystem. +# TYPE node_xfs_vnode_hold_total counter +node_xfs_vnode_hold_total{device="sda1"} 0 +# HELP node_xfs_vnode_reclaim_total Number of times vn_reclaim called for a filesystem. +# TYPE node_xfs_vnode_reclaim_total counter +node_xfs_vnode_reclaim_total{device="sda1"} 1 +# HELP node_xfs_vnode_release_total Number of times vn_rele called for a filesystem. +# TYPE node_xfs_vnode_release_total counter +node_xfs_vnode_release_total{device="sda1"} 1 +# HELP node_xfs_vnode_remove_total Number of times vn_remove called for a filesystem. +# TYPE node_xfs_vnode_remove_total counter +node_xfs_vnode_remove_total{device="sda1"} 1 +# HELP node_xfs_write_calls_total Number of write(2) system calls made to files in a filesystem. +# TYPE node_xfs_write_calls_total counter +node_xfs_write_calls_total{device="sda1"} 0 +# HELP node_zfs_abd_linear_cnt kstat.zfs.misc.abdstats.linear_cnt +# TYPE node_zfs_abd_linear_cnt untyped +node_zfs_abd_linear_cnt 62 +# HELP node_zfs_abd_linear_data_size kstat.zfs.misc.abdstats.linear_data_size +# TYPE node_zfs_abd_linear_data_size untyped +node_zfs_abd_linear_data_size 223232 +# HELP node_zfs_abd_scatter_chunk_waste kstat.zfs.misc.abdstats.scatter_chunk_waste +# TYPE node_zfs_abd_scatter_chunk_waste untyped +node_zfs_abd_scatter_chunk_waste 0 +# HELP node_zfs_abd_scatter_cnt kstat.zfs.misc.abdstats.scatter_cnt +# TYPE node_zfs_abd_scatter_cnt untyped +node_zfs_abd_scatter_cnt 1 +# HELP node_zfs_abd_scatter_data_size kstat.zfs.misc.abdstats.scatter_data_size +# TYPE node_zfs_abd_scatter_data_size untyped +node_zfs_abd_scatter_data_size 16384 +# HELP node_zfs_abd_scatter_order_0 kstat.zfs.misc.abdstats.scatter_order_0 +# TYPE node_zfs_abd_scatter_order_0 untyped +node_zfs_abd_scatter_order_0 0 +# HELP node_zfs_abd_scatter_order_1 kstat.zfs.misc.abdstats.scatter_order_1 +# TYPE node_zfs_abd_scatter_order_1 untyped +node_zfs_abd_scatter_order_1 0 +# HELP node_zfs_abd_scatter_order_10 kstat.zfs.misc.abdstats.scatter_order_10 +# TYPE node_zfs_abd_scatter_order_10 untyped +node_zfs_abd_scatter_order_10 0 +# HELP node_zfs_abd_scatter_order_2 kstat.zfs.misc.abdstats.scatter_order_2 +# TYPE node_zfs_abd_scatter_order_2 untyped +node_zfs_abd_scatter_order_2 1 +# HELP node_zfs_abd_scatter_order_3 kstat.zfs.misc.abdstats.scatter_order_3 +# TYPE node_zfs_abd_scatter_order_3 untyped +node_zfs_abd_scatter_order_3 0 +# HELP node_zfs_abd_scatter_order_4 kstat.zfs.misc.abdstats.scatter_order_4 +# TYPE node_zfs_abd_scatter_order_4 untyped +node_zfs_abd_scatter_order_4 0 +# HELP node_zfs_abd_scatter_order_5 kstat.zfs.misc.abdstats.scatter_order_5 +# TYPE node_zfs_abd_scatter_order_5 untyped +node_zfs_abd_scatter_order_5 0 +# HELP node_zfs_abd_scatter_order_6 kstat.zfs.misc.abdstats.scatter_order_6 +# TYPE node_zfs_abd_scatter_order_6 untyped +node_zfs_abd_scatter_order_6 0 +# HELP node_zfs_abd_scatter_order_7 kstat.zfs.misc.abdstats.scatter_order_7 +# TYPE node_zfs_abd_scatter_order_7 untyped +node_zfs_abd_scatter_order_7 0 +# HELP node_zfs_abd_scatter_order_8 kstat.zfs.misc.abdstats.scatter_order_8 +# TYPE node_zfs_abd_scatter_order_8 untyped +node_zfs_abd_scatter_order_8 0 +# HELP node_zfs_abd_scatter_order_9 kstat.zfs.misc.abdstats.scatter_order_9 +# TYPE node_zfs_abd_scatter_order_9 untyped +node_zfs_abd_scatter_order_9 0 +# HELP node_zfs_abd_scatter_page_alloc_retry kstat.zfs.misc.abdstats.scatter_page_alloc_retry +# TYPE node_zfs_abd_scatter_page_alloc_retry untyped +node_zfs_abd_scatter_page_alloc_retry 0 +# HELP node_zfs_abd_scatter_page_multi_chunk kstat.zfs.misc.abdstats.scatter_page_multi_chunk +# TYPE node_zfs_abd_scatter_page_multi_chunk untyped +node_zfs_abd_scatter_page_multi_chunk 0 +# HELP node_zfs_abd_scatter_page_multi_zone kstat.zfs.misc.abdstats.scatter_page_multi_zone +# TYPE node_zfs_abd_scatter_page_multi_zone untyped +node_zfs_abd_scatter_page_multi_zone 0 +# HELP node_zfs_abd_scatter_sg_table_retry kstat.zfs.misc.abdstats.scatter_sg_table_retry +# TYPE node_zfs_abd_scatter_sg_table_retry untyped +node_zfs_abd_scatter_sg_table_retry 0 +# HELP node_zfs_abd_struct_size kstat.zfs.misc.abdstats.struct_size +# TYPE node_zfs_abd_struct_size untyped +node_zfs_abd_struct_size 2520 +# HELP node_zfs_arc_anon_evictable_data kstat.zfs.misc.arcstats.anon_evictable_data +# TYPE node_zfs_arc_anon_evictable_data untyped +node_zfs_arc_anon_evictable_data 0 +# HELP node_zfs_arc_anon_evictable_metadata kstat.zfs.misc.arcstats.anon_evictable_metadata +# TYPE node_zfs_arc_anon_evictable_metadata untyped +node_zfs_arc_anon_evictable_metadata 0 +# HELP node_zfs_arc_anon_size kstat.zfs.misc.arcstats.anon_size +# TYPE node_zfs_arc_anon_size untyped +node_zfs_arc_anon_size 1.91744e+06 +# HELP node_zfs_arc_arc_loaned_bytes kstat.zfs.misc.arcstats.arc_loaned_bytes +# TYPE node_zfs_arc_arc_loaned_bytes untyped +node_zfs_arc_arc_loaned_bytes 0 +# HELP node_zfs_arc_arc_meta_limit kstat.zfs.misc.arcstats.arc_meta_limit +# TYPE node_zfs_arc_arc_meta_limit untyped +node_zfs_arc_arc_meta_limit 6.275982336e+09 +# HELP node_zfs_arc_arc_meta_max kstat.zfs.misc.arcstats.arc_meta_max +# TYPE node_zfs_arc_arc_meta_max untyped +node_zfs_arc_arc_meta_max 4.49286096e+08 +# HELP node_zfs_arc_arc_meta_min kstat.zfs.misc.arcstats.arc_meta_min +# TYPE node_zfs_arc_arc_meta_min untyped +node_zfs_arc_arc_meta_min 1.6777216e+07 +# HELP node_zfs_arc_arc_meta_used kstat.zfs.misc.arcstats.arc_meta_used +# TYPE node_zfs_arc_arc_meta_used untyped +node_zfs_arc_arc_meta_used 3.08103632e+08 +# HELP node_zfs_arc_arc_need_free kstat.zfs.misc.arcstats.arc_need_free +# TYPE node_zfs_arc_arc_need_free untyped +node_zfs_arc_arc_need_free 0 +# HELP node_zfs_arc_arc_no_grow kstat.zfs.misc.arcstats.arc_no_grow +# TYPE node_zfs_arc_arc_no_grow untyped +node_zfs_arc_arc_no_grow 0 +# HELP node_zfs_arc_arc_prune kstat.zfs.misc.arcstats.arc_prune +# TYPE node_zfs_arc_arc_prune untyped +node_zfs_arc_arc_prune 0 +# HELP node_zfs_arc_arc_sys_free kstat.zfs.misc.arcstats.arc_sys_free +# TYPE node_zfs_arc_arc_sys_free untyped +node_zfs_arc_arc_sys_free 2.61496832e+08 +# HELP node_zfs_arc_arc_tempreserve kstat.zfs.misc.arcstats.arc_tempreserve +# TYPE node_zfs_arc_arc_tempreserve untyped +node_zfs_arc_arc_tempreserve 0 +# HELP node_zfs_arc_c kstat.zfs.misc.arcstats.c +# TYPE node_zfs_arc_c untyped +node_zfs_arc_c 1.643208777e+09 +# HELP node_zfs_arc_c_max kstat.zfs.misc.arcstats.c_max +# TYPE node_zfs_arc_c_max untyped +node_zfs_arc_c_max 8.367976448e+09 +# HELP node_zfs_arc_c_min kstat.zfs.misc.arcstats.c_min +# TYPE node_zfs_arc_c_min untyped +node_zfs_arc_c_min 3.3554432e+07 +# HELP node_zfs_arc_data_size kstat.zfs.misc.arcstats.data_size +# TYPE node_zfs_arc_data_size untyped +node_zfs_arc_data_size 1.29583616e+09 +# HELP node_zfs_arc_deleted kstat.zfs.misc.arcstats.deleted +# TYPE node_zfs_arc_deleted untyped +node_zfs_arc_deleted 60403 +# HELP node_zfs_arc_demand_data_hits kstat.zfs.misc.arcstats.demand_data_hits +# TYPE node_zfs_arc_demand_data_hits untyped +node_zfs_arc_demand_data_hits 7.221032e+06 +# HELP node_zfs_arc_demand_data_misses kstat.zfs.misc.arcstats.demand_data_misses +# TYPE node_zfs_arc_demand_data_misses untyped +node_zfs_arc_demand_data_misses 73300 +# HELP node_zfs_arc_demand_metadata_hits kstat.zfs.misc.arcstats.demand_metadata_hits +# TYPE node_zfs_arc_demand_metadata_hits untyped +node_zfs_arc_demand_metadata_hits 1.464353e+06 +# HELP node_zfs_arc_demand_metadata_misses kstat.zfs.misc.arcstats.demand_metadata_misses +# TYPE node_zfs_arc_demand_metadata_misses untyped +node_zfs_arc_demand_metadata_misses 498170 +# HELP node_zfs_arc_duplicate_buffers kstat.zfs.misc.arcstats.duplicate_buffers +# TYPE node_zfs_arc_duplicate_buffers untyped +node_zfs_arc_duplicate_buffers 0 +# HELP node_zfs_arc_duplicate_buffers_size kstat.zfs.misc.arcstats.duplicate_buffers_size +# TYPE node_zfs_arc_duplicate_buffers_size untyped +node_zfs_arc_duplicate_buffers_size 0 +# HELP node_zfs_arc_duplicate_reads kstat.zfs.misc.arcstats.duplicate_reads +# TYPE node_zfs_arc_duplicate_reads untyped +node_zfs_arc_duplicate_reads 0 +# HELP node_zfs_arc_evict_l2_cached kstat.zfs.misc.arcstats.evict_l2_cached +# TYPE node_zfs_arc_evict_l2_cached untyped +node_zfs_arc_evict_l2_cached 0 +# HELP node_zfs_arc_evict_l2_eligible kstat.zfs.misc.arcstats.evict_l2_eligible +# TYPE node_zfs_arc_evict_l2_eligible untyped +node_zfs_arc_evict_l2_eligible 8.99251456e+09 +# HELP node_zfs_arc_evict_l2_ineligible kstat.zfs.misc.arcstats.evict_l2_ineligible +# TYPE node_zfs_arc_evict_l2_ineligible untyped +node_zfs_arc_evict_l2_ineligible 9.92552448e+08 +# HELP node_zfs_arc_evict_l2_skip kstat.zfs.misc.arcstats.evict_l2_skip +# TYPE node_zfs_arc_evict_l2_skip untyped +node_zfs_arc_evict_l2_skip 0 +# HELP node_zfs_arc_evict_not_enough kstat.zfs.misc.arcstats.evict_not_enough +# TYPE node_zfs_arc_evict_not_enough untyped +node_zfs_arc_evict_not_enough 680 +# HELP node_zfs_arc_evict_skip kstat.zfs.misc.arcstats.evict_skip +# TYPE node_zfs_arc_evict_skip untyped +node_zfs_arc_evict_skip 2.265729e+06 +# HELP node_zfs_arc_hash_chain_max kstat.zfs.misc.arcstats.hash_chain_max +# TYPE node_zfs_arc_hash_chain_max untyped +node_zfs_arc_hash_chain_max 3 +# HELP node_zfs_arc_hash_chains kstat.zfs.misc.arcstats.hash_chains +# TYPE node_zfs_arc_hash_chains untyped +node_zfs_arc_hash_chains 412 +# HELP node_zfs_arc_hash_collisions kstat.zfs.misc.arcstats.hash_collisions +# TYPE node_zfs_arc_hash_collisions untyped +node_zfs_arc_hash_collisions 50564 +# HELP node_zfs_arc_hash_elements kstat.zfs.misc.arcstats.hash_elements +# TYPE node_zfs_arc_hash_elements untyped +node_zfs_arc_hash_elements 42359 +# HELP node_zfs_arc_hash_elements_max kstat.zfs.misc.arcstats.hash_elements_max +# TYPE node_zfs_arc_hash_elements_max untyped +node_zfs_arc_hash_elements_max 88245 +# HELP node_zfs_arc_hdr_size kstat.zfs.misc.arcstats.hdr_size +# TYPE node_zfs_arc_hdr_size untyped +node_zfs_arc_hdr_size 1.636108e+07 +# HELP node_zfs_arc_hits kstat.zfs.misc.arcstats.hits +# TYPE node_zfs_arc_hits untyped +node_zfs_arc_hits 8.772612e+06 +# HELP node_zfs_arc_l2_abort_lowmem kstat.zfs.misc.arcstats.l2_abort_lowmem +# TYPE node_zfs_arc_l2_abort_lowmem untyped +node_zfs_arc_l2_abort_lowmem 0 +# HELP node_zfs_arc_l2_asize kstat.zfs.misc.arcstats.l2_asize +# TYPE node_zfs_arc_l2_asize untyped +node_zfs_arc_l2_asize 0 +# HELP node_zfs_arc_l2_cdata_free_on_write kstat.zfs.misc.arcstats.l2_cdata_free_on_write +# TYPE node_zfs_arc_l2_cdata_free_on_write untyped +node_zfs_arc_l2_cdata_free_on_write 0 +# HELP node_zfs_arc_l2_cksum_bad kstat.zfs.misc.arcstats.l2_cksum_bad +# TYPE node_zfs_arc_l2_cksum_bad untyped +node_zfs_arc_l2_cksum_bad 0 +# HELP node_zfs_arc_l2_compress_failures kstat.zfs.misc.arcstats.l2_compress_failures +# TYPE node_zfs_arc_l2_compress_failures untyped +node_zfs_arc_l2_compress_failures 0 +# HELP node_zfs_arc_l2_compress_successes kstat.zfs.misc.arcstats.l2_compress_successes +# TYPE node_zfs_arc_l2_compress_successes untyped +node_zfs_arc_l2_compress_successes 0 +# HELP node_zfs_arc_l2_compress_zeros kstat.zfs.misc.arcstats.l2_compress_zeros +# TYPE node_zfs_arc_l2_compress_zeros untyped +node_zfs_arc_l2_compress_zeros 0 +# HELP node_zfs_arc_l2_evict_l1cached kstat.zfs.misc.arcstats.l2_evict_l1cached +# TYPE node_zfs_arc_l2_evict_l1cached untyped +node_zfs_arc_l2_evict_l1cached 0 +# HELP node_zfs_arc_l2_evict_lock_retry kstat.zfs.misc.arcstats.l2_evict_lock_retry +# TYPE node_zfs_arc_l2_evict_lock_retry untyped +node_zfs_arc_l2_evict_lock_retry 0 +# HELP node_zfs_arc_l2_evict_reading kstat.zfs.misc.arcstats.l2_evict_reading +# TYPE node_zfs_arc_l2_evict_reading untyped +node_zfs_arc_l2_evict_reading 0 +# HELP node_zfs_arc_l2_feeds kstat.zfs.misc.arcstats.l2_feeds +# TYPE node_zfs_arc_l2_feeds untyped +node_zfs_arc_l2_feeds 0 +# HELP node_zfs_arc_l2_free_on_write kstat.zfs.misc.arcstats.l2_free_on_write +# TYPE node_zfs_arc_l2_free_on_write untyped +node_zfs_arc_l2_free_on_write 0 +# HELP node_zfs_arc_l2_hdr_size kstat.zfs.misc.arcstats.l2_hdr_size +# TYPE node_zfs_arc_l2_hdr_size untyped +node_zfs_arc_l2_hdr_size 0 +# HELP node_zfs_arc_l2_hits kstat.zfs.misc.arcstats.l2_hits +# TYPE node_zfs_arc_l2_hits untyped +node_zfs_arc_l2_hits 0 +# HELP node_zfs_arc_l2_io_error kstat.zfs.misc.arcstats.l2_io_error +# TYPE node_zfs_arc_l2_io_error untyped +node_zfs_arc_l2_io_error 0 +# HELP node_zfs_arc_l2_misses kstat.zfs.misc.arcstats.l2_misses +# TYPE node_zfs_arc_l2_misses untyped +node_zfs_arc_l2_misses 0 +# HELP node_zfs_arc_l2_read_bytes kstat.zfs.misc.arcstats.l2_read_bytes +# TYPE node_zfs_arc_l2_read_bytes untyped +node_zfs_arc_l2_read_bytes 0 +# HELP node_zfs_arc_l2_rw_clash kstat.zfs.misc.arcstats.l2_rw_clash +# TYPE node_zfs_arc_l2_rw_clash untyped +node_zfs_arc_l2_rw_clash 0 +# HELP node_zfs_arc_l2_size kstat.zfs.misc.arcstats.l2_size +# TYPE node_zfs_arc_l2_size untyped +node_zfs_arc_l2_size 0 +# HELP node_zfs_arc_l2_write_bytes kstat.zfs.misc.arcstats.l2_write_bytes +# TYPE node_zfs_arc_l2_write_bytes untyped +node_zfs_arc_l2_write_bytes 0 +# HELP node_zfs_arc_l2_writes_done kstat.zfs.misc.arcstats.l2_writes_done +# TYPE node_zfs_arc_l2_writes_done untyped +node_zfs_arc_l2_writes_done 0 +# HELP node_zfs_arc_l2_writes_error kstat.zfs.misc.arcstats.l2_writes_error +# TYPE node_zfs_arc_l2_writes_error untyped +node_zfs_arc_l2_writes_error 0 +# HELP node_zfs_arc_l2_writes_lock_retry kstat.zfs.misc.arcstats.l2_writes_lock_retry +# TYPE node_zfs_arc_l2_writes_lock_retry untyped +node_zfs_arc_l2_writes_lock_retry 0 +# HELP node_zfs_arc_l2_writes_sent kstat.zfs.misc.arcstats.l2_writes_sent +# TYPE node_zfs_arc_l2_writes_sent untyped +node_zfs_arc_l2_writes_sent 0 +# HELP node_zfs_arc_memory_direct_count kstat.zfs.misc.arcstats.memory_direct_count +# TYPE node_zfs_arc_memory_direct_count untyped +node_zfs_arc_memory_direct_count 542 +# HELP node_zfs_arc_memory_indirect_count kstat.zfs.misc.arcstats.memory_indirect_count +# TYPE node_zfs_arc_memory_indirect_count untyped +node_zfs_arc_memory_indirect_count 3006 +# HELP node_zfs_arc_memory_throttle_count kstat.zfs.misc.arcstats.memory_throttle_count +# TYPE node_zfs_arc_memory_throttle_count untyped +node_zfs_arc_memory_throttle_count 0 +# HELP node_zfs_arc_metadata_size kstat.zfs.misc.arcstats.metadata_size +# TYPE node_zfs_arc_metadata_size untyped +node_zfs_arc_metadata_size 1.7529856e+08 +# HELP node_zfs_arc_mfu_evictable_data kstat.zfs.misc.arcstats.mfu_evictable_data +# TYPE node_zfs_arc_mfu_evictable_data untyped +node_zfs_arc_mfu_evictable_data 1.017613824e+09 +# HELP node_zfs_arc_mfu_evictable_metadata kstat.zfs.misc.arcstats.mfu_evictable_metadata +# TYPE node_zfs_arc_mfu_evictable_metadata untyped +node_zfs_arc_mfu_evictable_metadata 9.163776e+06 +# HELP node_zfs_arc_mfu_ghost_evictable_data kstat.zfs.misc.arcstats.mfu_ghost_evictable_data +# TYPE node_zfs_arc_mfu_ghost_evictable_data untyped +node_zfs_arc_mfu_ghost_evictable_data 9.6731136e+07 +# HELP node_zfs_arc_mfu_ghost_evictable_metadata kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata +# TYPE node_zfs_arc_mfu_ghost_evictable_metadata untyped +node_zfs_arc_mfu_ghost_evictable_metadata 8.205312e+06 +# HELP node_zfs_arc_mfu_ghost_hits kstat.zfs.misc.arcstats.mfu_ghost_hits +# TYPE node_zfs_arc_mfu_ghost_hits untyped +node_zfs_arc_mfu_ghost_hits 821 +# HELP node_zfs_arc_mfu_ghost_size kstat.zfs.misc.arcstats.mfu_ghost_size +# TYPE node_zfs_arc_mfu_ghost_size untyped +node_zfs_arc_mfu_ghost_size 1.04936448e+08 +# HELP node_zfs_arc_mfu_hits kstat.zfs.misc.arcstats.mfu_hits +# TYPE node_zfs_arc_mfu_hits untyped +node_zfs_arc_mfu_hits 7.829854e+06 +# HELP node_zfs_arc_mfu_size kstat.zfs.misc.arcstats.mfu_size +# TYPE node_zfs_arc_mfu_size untyped +node_zfs_arc_mfu_size 1.066623488e+09 +# HELP node_zfs_arc_misses kstat.zfs.misc.arcstats.misses +# TYPE node_zfs_arc_misses untyped +node_zfs_arc_misses 604635 +# HELP node_zfs_arc_mru_evictable_data kstat.zfs.misc.arcstats.mru_evictable_data +# TYPE node_zfs_arc_mru_evictable_data untyped +node_zfs_arc_mru_evictable_data 2.78091264e+08 +# HELP node_zfs_arc_mru_evictable_metadata kstat.zfs.misc.arcstats.mru_evictable_metadata +# TYPE node_zfs_arc_mru_evictable_metadata untyped +node_zfs_arc_mru_evictable_metadata 1.8606592e+07 +# HELP node_zfs_arc_mru_ghost_evictable_data kstat.zfs.misc.arcstats.mru_ghost_evictable_data +# TYPE node_zfs_arc_mru_ghost_evictable_data untyped +node_zfs_arc_mru_ghost_evictable_data 8.83765248e+08 +# HELP node_zfs_arc_mru_ghost_evictable_metadata kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata +# TYPE node_zfs_arc_mru_ghost_evictable_metadata untyped +node_zfs_arc_mru_ghost_evictable_metadata 1.1596288e+08 +# HELP node_zfs_arc_mru_ghost_hits kstat.zfs.misc.arcstats.mru_ghost_hits +# TYPE node_zfs_arc_mru_ghost_hits untyped +node_zfs_arc_mru_ghost_hits 21100 +# HELP node_zfs_arc_mru_ghost_size kstat.zfs.misc.arcstats.mru_ghost_size +# TYPE node_zfs_arc_mru_ghost_size untyped +node_zfs_arc_mru_ghost_size 9.99728128e+08 +# HELP node_zfs_arc_mru_hits kstat.zfs.misc.arcstats.mru_hits +# TYPE node_zfs_arc_mru_hits untyped +node_zfs_arc_mru_hits 855535 +# HELP node_zfs_arc_mru_size kstat.zfs.misc.arcstats.mru_size +# TYPE node_zfs_arc_mru_size untyped +node_zfs_arc_mru_size 4.02593792e+08 +# HELP node_zfs_arc_mutex_miss kstat.zfs.misc.arcstats.mutex_miss +# TYPE node_zfs_arc_mutex_miss untyped +node_zfs_arc_mutex_miss 2 +# HELP node_zfs_arc_other_size kstat.zfs.misc.arcstats.other_size +# TYPE node_zfs_arc_other_size untyped +node_zfs_arc_other_size 1.16443992e+08 +# HELP node_zfs_arc_p kstat.zfs.misc.arcstats.p +# TYPE node_zfs_arc_p untyped +node_zfs_arc_p 5.16395305e+08 +# HELP node_zfs_arc_prefetch_data_hits kstat.zfs.misc.arcstats.prefetch_data_hits +# TYPE node_zfs_arc_prefetch_data_hits untyped +node_zfs_arc_prefetch_data_hits 3615 +# HELP node_zfs_arc_prefetch_data_misses kstat.zfs.misc.arcstats.prefetch_data_misses +# TYPE node_zfs_arc_prefetch_data_misses untyped +node_zfs_arc_prefetch_data_misses 17094 +# HELP node_zfs_arc_prefetch_metadata_hits kstat.zfs.misc.arcstats.prefetch_metadata_hits +# TYPE node_zfs_arc_prefetch_metadata_hits untyped +node_zfs_arc_prefetch_metadata_hits 83612 +# HELP node_zfs_arc_prefetch_metadata_misses kstat.zfs.misc.arcstats.prefetch_metadata_misses +# TYPE node_zfs_arc_prefetch_metadata_misses untyped +node_zfs_arc_prefetch_metadata_misses 16071 +# HELP node_zfs_arc_size kstat.zfs.misc.arcstats.size +# TYPE node_zfs_arc_size untyped +node_zfs_arc_size 1.603939792e+09 +# HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbuf_stats.dbuf_cache_count +# TYPE node_zfs_dbuf_dbuf_cache_count untyped +node_zfs_dbuf_dbuf_cache_count 27 +# HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_hiwater_bytes +# TYPE node_zfs_dbuf_dbuf_cache_hiwater_bytes untyped +node_zfs_dbuf_dbuf_cache_hiwater_bytes 6.9117804e+07 +# HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0 +# TYPE node_zfs_dbuf_dbuf_cache_level_0 untyped +node_zfs_dbuf_dbuf_cache_level_0 27 +# HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_0_bytes untyped +node_zfs_dbuf_dbuf_cache_level_0_bytes 302080 +# HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1 +# TYPE node_zfs_dbuf_dbuf_cache_level_1 untyped +node_zfs_dbuf_dbuf_cache_level_1 0 +# HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10 +# TYPE node_zfs_dbuf_dbuf_cache_level_10 untyped +node_zfs_dbuf_dbuf_cache_level_10 0 +# HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_10_bytes untyped +node_zfs_dbuf_dbuf_cache_level_10_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11 +# TYPE node_zfs_dbuf_dbuf_cache_level_11 untyped +node_zfs_dbuf_dbuf_cache_level_11 0 +# HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_11_bytes untyped +node_zfs_dbuf_dbuf_cache_level_11_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_1_bytes untyped +node_zfs_dbuf_dbuf_cache_level_1_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2 +# TYPE node_zfs_dbuf_dbuf_cache_level_2 untyped +node_zfs_dbuf_dbuf_cache_level_2 0 +# HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_2_bytes untyped +node_zfs_dbuf_dbuf_cache_level_2_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3 +# TYPE node_zfs_dbuf_dbuf_cache_level_3 untyped +node_zfs_dbuf_dbuf_cache_level_3 0 +# HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_3_bytes untyped +node_zfs_dbuf_dbuf_cache_level_3_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4 +# TYPE node_zfs_dbuf_dbuf_cache_level_4 untyped +node_zfs_dbuf_dbuf_cache_level_4 0 +# HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_4_bytes untyped +node_zfs_dbuf_dbuf_cache_level_4_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5 +# TYPE node_zfs_dbuf_dbuf_cache_level_5 untyped +node_zfs_dbuf_dbuf_cache_level_5 0 +# HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_5_bytes untyped +node_zfs_dbuf_dbuf_cache_level_5_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6 +# TYPE node_zfs_dbuf_dbuf_cache_level_6 untyped +node_zfs_dbuf_dbuf_cache_level_6 0 +# HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_6_bytes untyped +node_zfs_dbuf_dbuf_cache_level_6_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7 +# TYPE node_zfs_dbuf_dbuf_cache_level_7 untyped +node_zfs_dbuf_dbuf_cache_level_7 0 +# HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_7_bytes untyped +node_zfs_dbuf_dbuf_cache_level_7_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8 +# TYPE node_zfs_dbuf_dbuf_cache_level_8 untyped +node_zfs_dbuf_dbuf_cache_level_8 0 +# HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_8_bytes untyped +node_zfs_dbuf_dbuf_cache_level_8_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9 +# TYPE node_zfs_dbuf_dbuf_cache_level_9 untyped +node_zfs_dbuf_dbuf_cache_level_9 0 +# HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_9_bytes untyped +node_zfs_dbuf_dbuf_cache_level_9_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_lowater_bytes +# TYPE node_zfs_dbuf_dbuf_cache_lowater_bytes untyped +node_zfs_dbuf_dbuf_cache_lowater_bytes 5.6550932e+07 +# HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_max_bytes +# TYPE node_zfs_dbuf_dbuf_cache_max_bytes untyped +node_zfs_dbuf_dbuf_cache_max_bytes 6.2834368e+07 +# HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbuf_stats.dbuf_cache_size +# TYPE node_zfs_dbuf_dbuf_cache_size untyped +node_zfs_dbuf_dbuf_cache_size 302080 +# HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbuf_stats.dbuf_cache_size_max +# TYPE node_zfs_dbuf_dbuf_cache_size_max untyped +node_zfs_dbuf_dbuf_cache_size_max 394240 +# HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbuf_stats.dbuf_cache_total_evicts +# TYPE node_zfs_dbuf_dbuf_cache_total_evicts untyped +node_zfs_dbuf_dbuf_cache_total_evicts 0 +# HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbuf_stats.hash_chain_max +# TYPE node_zfs_dbuf_hash_chain_max untyped +node_zfs_dbuf_hash_chain_max 0 +# HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbuf_stats.hash_chains +# TYPE node_zfs_dbuf_hash_chains untyped +node_zfs_dbuf_hash_chains 0 +# HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbuf_stats.hash_collisions +# TYPE node_zfs_dbuf_hash_collisions untyped +node_zfs_dbuf_hash_collisions 0 +# HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0 +# TYPE node_zfs_dbuf_hash_dbuf_level_0 untyped +node_zfs_dbuf_hash_dbuf_level_0 37 +# HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_0_bytes untyped +node_zfs_dbuf_hash_dbuf_level_0_bytes 465920 +# HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1 +# TYPE node_zfs_dbuf_hash_dbuf_level_1 untyped +node_zfs_dbuf_hash_dbuf_level_1 10 +# HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10 +# TYPE node_zfs_dbuf_hash_dbuf_level_10 untyped +node_zfs_dbuf_hash_dbuf_level_10 0 +# HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_10_bytes untyped +node_zfs_dbuf_hash_dbuf_level_10_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11 +# TYPE node_zfs_dbuf_hash_dbuf_level_11 untyped +node_zfs_dbuf_hash_dbuf_level_11 0 +# HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_11_bytes untyped +node_zfs_dbuf_hash_dbuf_level_11_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_1_bytes untyped +node_zfs_dbuf_hash_dbuf_level_1_bytes 1.31072e+06 +# HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2 +# TYPE node_zfs_dbuf_hash_dbuf_level_2 untyped +node_zfs_dbuf_hash_dbuf_level_2 2 +# HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_2_bytes untyped +node_zfs_dbuf_hash_dbuf_level_2_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3 +# TYPE node_zfs_dbuf_hash_dbuf_level_3 untyped +node_zfs_dbuf_hash_dbuf_level_3 2 +# HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_3_bytes untyped +node_zfs_dbuf_hash_dbuf_level_3_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4 +# TYPE node_zfs_dbuf_hash_dbuf_level_4 untyped +node_zfs_dbuf_hash_dbuf_level_4 2 +# HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_4_bytes untyped +node_zfs_dbuf_hash_dbuf_level_4_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5 +# TYPE node_zfs_dbuf_hash_dbuf_level_5 untyped +node_zfs_dbuf_hash_dbuf_level_5 2 +# HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_5_bytes untyped +node_zfs_dbuf_hash_dbuf_level_5_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6 +# TYPE node_zfs_dbuf_hash_dbuf_level_6 untyped +node_zfs_dbuf_hash_dbuf_level_6 0 +# HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_6_bytes untyped +node_zfs_dbuf_hash_dbuf_level_6_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7 +# TYPE node_zfs_dbuf_hash_dbuf_level_7 untyped +node_zfs_dbuf_hash_dbuf_level_7 0 +# HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_7_bytes untyped +node_zfs_dbuf_hash_dbuf_level_7_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8 +# TYPE node_zfs_dbuf_hash_dbuf_level_8 untyped +node_zfs_dbuf_hash_dbuf_level_8 0 +# HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_8_bytes untyped +node_zfs_dbuf_hash_dbuf_level_8_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9 +# TYPE node_zfs_dbuf_hash_dbuf_level_9 untyped +node_zfs_dbuf_hash_dbuf_level_9 0 +# HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_9_bytes untyped +node_zfs_dbuf_hash_dbuf_level_9_bytes 0 +# HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbuf_stats.hash_elements +# TYPE node_zfs_dbuf_hash_elements untyped +node_zfs_dbuf_hash_elements 55 +# HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbuf_stats.hash_elements_max +# TYPE node_zfs_dbuf_hash_elements_max untyped +node_zfs_dbuf_hash_elements_max 55 +# HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbuf_stats.hash_hits +# TYPE node_zfs_dbuf_hash_hits untyped +node_zfs_dbuf_hash_hits 108807 +# HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbuf_stats.hash_insert_race +# TYPE node_zfs_dbuf_hash_insert_race untyped +node_zfs_dbuf_hash_insert_race 0 +# HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbuf_stats.hash_misses +# TYPE node_zfs_dbuf_hash_misses untyped +node_zfs_dbuf_hash_misses 1851 +# HELP node_zfs_dmu_tx_dmu_tx_assigned kstat.zfs.misc.dmu_tx.dmu_tx_assigned +# TYPE node_zfs_dmu_tx_dmu_tx_assigned untyped +node_zfs_dmu_tx_dmu_tx_assigned 3.532844e+06 +# HELP node_zfs_dmu_tx_dmu_tx_delay kstat.zfs.misc.dmu_tx.dmu_tx_delay +# TYPE node_zfs_dmu_tx_dmu_tx_delay untyped +node_zfs_dmu_tx_dmu_tx_delay 0 +# HELP node_zfs_dmu_tx_dmu_tx_dirty_delay kstat.zfs.misc.dmu_tx.dmu_tx_dirty_delay +# TYPE node_zfs_dmu_tx_dmu_tx_dirty_delay untyped +node_zfs_dmu_tx_dmu_tx_dirty_delay 0 +# HELP node_zfs_dmu_tx_dmu_tx_dirty_over_max kstat.zfs.misc.dmu_tx.dmu_tx_dirty_over_max +# TYPE node_zfs_dmu_tx_dmu_tx_dirty_over_max untyped +node_zfs_dmu_tx_dmu_tx_dirty_over_max 0 +# HELP node_zfs_dmu_tx_dmu_tx_dirty_throttle kstat.zfs.misc.dmu_tx.dmu_tx_dirty_throttle +# TYPE node_zfs_dmu_tx_dmu_tx_dirty_throttle untyped +node_zfs_dmu_tx_dmu_tx_dirty_throttle 0 +# HELP node_zfs_dmu_tx_dmu_tx_error kstat.zfs.misc.dmu_tx.dmu_tx_error +# TYPE node_zfs_dmu_tx_dmu_tx_error untyped +node_zfs_dmu_tx_dmu_tx_error 0 +# HELP node_zfs_dmu_tx_dmu_tx_group kstat.zfs.misc.dmu_tx.dmu_tx_group +# TYPE node_zfs_dmu_tx_dmu_tx_group untyped +node_zfs_dmu_tx_dmu_tx_group 0 +# HELP node_zfs_dmu_tx_dmu_tx_memory_reclaim kstat.zfs.misc.dmu_tx.dmu_tx_memory_reclaim +# TYPE node_zfs_dmu_tx_dmu_tx_memory_reclaim untyped +node_zfs_dmu_tx_dmu_tx_memory_reclaim 0 +# HELP node_zfs_dmu_tx_dmu_tx_memory_reserve kstat.zfs.misc.dmu_tx.dmu_tx_memory_reserve +# TYPE node_zfs_dmu_tx_dmu_tx_memory_reserve untyped +node_zfs_dmu_tx_dmu_tx_memory_reserve 0 +# HELP node_zfs_dmu_tx_dmu_tx_quota kstat.zfs.misc.dmu_tx.dmu_tx_quota +# TYPE node_zfs_dmu_tx_dmu_tx_quota untyped +node_zfs_dmu_tx_dmu_tx_quota 0 +# HELP node_zfs_dmu_tx_dmu_tx_suspended kstat.zfs.misc.dmu_tx.dmu_tx_suspended +# TYPE node_zfs_dmu_tx_dmu_tx_suspended untyped +node_zfs_dmu_tx_dmu_tx_suspended 0 +# HELP node_zfs_dnode_dnode_alloc_next_block kstat.zfs.misc.dnodestats.dnode_alloc_next_block +# TYPE node_zfs_dnode_dnode_alloc_next_block untyped +node_zfs_dnode_dnode_alloc_next_block 0 +# HELP node_zfs_dnode_dnode_alloc_next_chunk kstat.zfs.misc.dnodestats.dnode_alloc_next_chunk +# TYPE node_zfs_dnode_dnode_alloc_next_chunk untyped +node_zfs_dnode_dnode_alloc_next_chunk 0 +# HELP node_zfs_dnode_dnode_alloc_race kstat.zfs.misc.dnodestats.dnode_alloc_race +# TYPE node_zfs_dnode_dnode_alloc_race untyped +node_zfs_dnode_dnode_alloc_race 0 +# HELP node_zfs_dnode_dnode_allocate kstat.zfs.misc.dnodestats.dnode_allocate +# TYPE node_zfs_dnode_dnode_allocate untyped +node_zfs_dnode_dnode_allocate 0 +# HELP node_zfs_dnode_dnode_buf_evict kstat.zfs.misc.dnodestats.dnode_buf_evict +# TYPE node_zfs_dnode_dnode_buf_evict untyped +node_zfs_dnode_dnode_buf_evict 17 +# HELP node_zfs_dnode_dnode_hold_alloc_hits kstat.zfs.misc.dnodestats.dnode_hold_alloc_hits +# TYPE node_zfs_dnode_dnode_hold_alloc_hits untyped +node_zfs_dnode_dnode_hold_alloc_hits 37617 +# HELP node_zfs_dnode_dnode_hold_alloc_interior kstat.zfs.misc.dnodestats.dnode_hold_alloc_interior +# TYPE node_zfs_dnode_dnode_hold_alloc_interior untyped +node_zfs_dnode_dnode_hold_alloc_interior 0 +# HELP node_zfs_dnode_dnode_hold_alloc_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_misses +# TYPE node_zfs_dnode_dnode_hold_alloc_lock_misses untyped +node_zfs_dnode_dnode_hold_alloc_lock_misses 0 +# HELP node_zfs_dnode_dnode_hold_alloc_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_retry +# TYPE node_zfs_dnode_dnode_hold_alloc_lock_retry untyped +node_zfs_dnode_dnode_hold_alloc_lock_retry 0 +# HELP node_zfs_dnode_dnode_hold_alloc_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_misses +# TYPE node_zfs_dnode_dnode_hold_alloc_misses untyped +node_zfs_dnode_dnode_hold_alloc_misses 0 +# HELP node_zfs_dnode_dnode_hold_alloc_type_none kstat.zfs.misc.dnodestats.dnode_hold_alloc_type_none +# TYPE node_zfs_dnode_dnode_hold_alloc_type_none untyped +node_zfs_dnode_dnode_hold_alloc_type_none 0 +# HELP node_zfs_dnode_dnode_hold_dbuf_hold kstat.zfs.misc.dnodestats.dnode_hold_dbuf_hold +# TYPE node_zfs_dnode_dnode_hold_dbuf_hold untyped +node_zfs_dnode_dnode_hold_dbuf_hold 0 +# HELP node_zfs_dnode_dnode_hold_dbuf_read kstat.zfs.misc.dnodestats.dnode_hold_dbuf_read +# TYPE node_zfs_dnode_dnode_hold_dbuf_read untyped +node_zfs_dnode_dnode_hold_dbuf_read 0 +# HELP node_zfs_dnode_dnode_hold_free_hits kstat.zfs.misc.dnodestats.dnode_hold_free_hits +# TYPE node_zfs_dnode_dnode_hold_free_hits untyped +node_zfs_dnode_dnode_hold_free_hits 0 +# HELP node_zfs_dnode_dnode_hold_free_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_free_lock_misses +# TYPE node_zfs_dnode_dnode_hold_free_lock_misses untyped +node_zfs_dnode_dnode_hold_free_lock_misses 0 +# HELP node_zfs_dnode_dnode_hold_free_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_free_lock_retry +# TYPE node_zfs_dnode_dnode_hold_free_lock_retry untyped +node_zfs_dnode_dnode_hold_free_lock_retry 0 +# HELP node_zfs_dnode_dnode_hold_free_misses kstat.zfs.misc.dnodestats.dnode_hold_free_misses +# TYPE node_zfs_dnode_dnode_hold_free_misses untyped +node_zfs_dnode_dnode_hold_free_misses 0 +# HELP node_zfs_dnode_dnode_hold_free_overflow kstat.zfs.misc.dnodestats.dnode_hold_free_overflow +# TYPE node_zfs_dnode_dnode_hold_free_overflow untyped +node_zfs_dnode_dnode_hold_free_overflow 0 +# HELP node_zfs_dnode_dnode_hold_free_refcount kstat.zfs.misc.dnodestats.dnode_hold_free_refcount +# TYPE node_zfs_dnode_dnode_hold_free_refcount untyped +node_zfs_dnode_dnode_hold_free_refcount 0 +# HELP node_zfs_dnode_dnode_hold_free_txg kstat.zfs.misc.dnodestats.dnode_hold_free_txg +# TYPE node_zfs_dnode_dnode_hold_free_txg untyped +node_zfs_dnode_dnode_hold_free_txg 0 +# HELP node_zfs_dnode_dnode_move_active kstat.zfs.misc.dnodestats.dnode_move_active +# TYPE node_zfs_dnode_dnode_move_active untyped +node_zfs_dnode_dnode_move_active 0 +# HELP node_zfs_dnode_dnode_move_handle kstat.zfs.misc.dnodestats.dnode_move_handle +# TYPE node_zfs_dnode_dnode_move_handle untyped +node_zfs_dnode_dnode_move_handle 0 +# HELP node_zfs_dnode_dnode_move_invalid kstat.zfs.misc.dnodestats.dnode_move_invalid +# TYPE node_zfs_dnode_dnode_move_invalid untyped +node_zfs_dnode_dnode_move_invalid 0 +# HELP node_zfs_dnode_dnode_move_recheck1 kstat.zfs.misc.dnodestats.dnode_move_recheck1 +# TYPE node_zfs_dnode_dnode_move_recheck1 untyped +node_zfs_dnode_dnode_move_recheck1 0 +# HELP node_zfs_dnode_dnode_move_recheck2 kstat.zfs.misc.dnodestats.dnode_move_recheck2 +# TYPE node_zfs_dnode_dnode_move_recheck2 untyped +node_zfs_dnode_dnode_move_recheck2 0 +# HELP node_zfs_dnode_dnode_move_rwlock kstat.zfs.misc.dnodestats.dnode_move_rwlock +# TYPE node_zfs_dnode_dnode_move_rwlock untyped +node_zfs_dnode_dnode_move_rwlock 0 +# HELP node_zfs_dnode_dnode_move_special kstat.zfs.misc.dnodestats.dnode_move_special +# TYPE node_zfs_dnode_dnode_move_special untyped +node_zfs_dnode_dnode_move_special 0 +# HELP node_zfs_dnode_dnode_reallocate kstat.zfs.misc.dnodestats.dnode_reallocate +# TYPE node_zfs_dnode_dnode_reallocate untyped +node_zfs_dnode_dnode_reallocate 0 +# HELP node_zfs_fm_erpt_dropped kstat.zfs.misc.fm.erpt-dropped +# TYPE node_zfs_fm_erpt_dropped untyped +node_zfs_fm_erpt_dropped 18 +# HELP node_zfs_fm_erpt_set_failed kstat.zfs.misc.fm.erpt-set-failed +# TYPE node_zfs_fm_erpt_set_failed untyped +node_zfs_fm_erpt_set_failed 0 +# HELP node_zfs_fm_fmri_set_failed kstat.zfs.misc.fm.fmri-set-failed +# TYPE node_zfs_fm_fmri_set_failed untyped +node_zfs_fm_fmri_set_failed 0 +# HELP node_zfs_fm_payload_set_failed kstat.zfs.misc.fm.payload-set-failed +# TYPE node_zfs_fm_payload_set_failed untyped +node_zfs_fm_payload_set_failed 0 +# HELP node_zfs_vdev_cache_delegations kstat.zfs.misc.vdev_cache_stats.delegations +# TYPE node_zfs_vdev_cache_delegations untyped +node_zfs_vdev_cache_delegations 40 +# HELP node_zfs_vdev_cache_hits kstat.zfs.misc.vdev_cache_stats.hits +# TYPE node_zfs_vdev_cache_hits untyped +node_zfs_vdev_cache_hits 0 +# HELP node_zfs_vdev_cache_misses kstat.zfs.misc.vdev_cache_stats.misses +# TYPE node_zfs_vdev_cache_misses untyped +node_zfs_vdev_cache_misses 0 +# HELP node_zfs_vdev_mirror_non_rotating_linear kstat.zfs.misc.vdev_mirror_stats.non_rotating_linear +# TYPE node_zfs_vdev_mirror_non_rotating_linear untyped +node_zfs_vdev_mirror_non_rotating_linear 0 +# HELP node_zfs_vdev_mirror_non_rotating_seek kstat.zfs.misc.vdev_mirror_stats.non_rotating_seek +# TYPE node_zfs_vdev_mirror_non_rotating_seek untyped +node_zfs_vdev_mirror_non_rotating_seek 0 +# HELP node_zfs_vdev_mirror_preferred_found kstat.zfs.misc.vdev_mirror_stats.preferred_found +# TYPE node_zfs_vdev_mirror_preferred_found untyped +node_zfs_vdev_mirror_preferred_found 0 +# HELP node_zfs_vdev_mirror_preferred_not_found kstat.zfs.misc.vdev_mirror_stats.preferred_not_found +# TYPE node_zfs_vdev_mirror_preferred_not_found untyped +node_zfs_vdev_mirror_preferred_not_found 94 +# HELP node_zfs_vdev_mirror_rotating_linear kstat.zfs.misc.vdev_mirror_stats.rotating_linear +# TYPE node_zfs_vdev_mirror_rotating_linear untyped +node_zfs_vdev_mirror_rotating_linear 0 +# HELP node_zfs_vdev_mirror_rotating_offset kstat.zfs.misc.vdev_mirror_stats.rotating_offset +# TYPE node_zfs_vdev_mirror_rotating_offset untyped +node_zfs_vdev_mirror_rotating_offset 0 +# HELP node_zfs_vdev_mirror_rotating_seek kstat.zfs.misc.vdev_mirror_stats.rotating_seek +# TYPE node_zfs_vdev_mirror_rotating_seek untyped +node_zfs_vdev_mirror_rotating_seek 0 +# HELP node_zfs_xuio_onloan_read_buf kstat.zfs.misc.xuio_stats.onloan_read_buf +# TYPE node_zfs_xuio_onloan_read_buf untyped +node_zfs_xuio_onloan_read_buf 32 +# HELP node_zfs_xuio_onloan_write_buf kstat.zfs.misc.xuio_stats.onloan_write_buf +# TYPE node_zfs_xuio_onloan_write_buf untyped +node_zfs_xuio_onloan_write_buf 0 +# HELP node_zfs_xuio_read_buf_copied kstat.zfs.misc.xuio_stats.read_buf_copied +# TYPE node_zfs_xuio_read_buf_copied untyped +node_zfs_xuio_read_buf_copied 0 +# HELP node_zfs_xuio_read_buf_nocopy kstat.zfs.misc.xuio_stats.read_buf_nocopy +# TYPE node_zfs_xuio_read_buf_nocopy untyped +node_zfs_xuio_read_buf_nocopy 0 +# HELP node_zfs_xuio_write_buf_copied kstat.zfs.misc.xuio_stats.write_buf_copied +# TYPE node_zfs_xuio_write_buf_copied untyped +node_zfs_xuio_write_buf_copied 0 +# HELP node_zfs_xuio_write_buf_nocopy kstat.zfs.misc.xuio_stats.write_buf_nocopy +# TYPE node_zfs_xuio_write_buf_nocopy untyped +node_zfs_xuio_write_buf_nocopy 0 +# HELP node_zfs_zfetch_bogus_streams kstat.zfs.misc.zfetchstats.bogus_streams +# TYPE node_zfs_zfetch_bogus_streams untyped +node_zfs_zfetch_bogus_streams 0 +# HELP node_zfs_zfetch_colinear_hits kstat.zfs.misc.zfetchstats.colinear_hits +# TYPE node_zfs_zfetch_colinear_hits untyped +node_zfs_zfetch_colinear_hits 0 +# HELP node_zfs_zfetch_colinear_misses kstat.zfs.misc.zfetchstats.colinear_misses +# TYPE node_zfs_zfetch_colinear_misses untyped +node_zfs_zfetch_colinear_misses 11 +# HELP node_zfs_zfetch_hits kstat.zfs.misc.zfetchstats.hits +# TYPE node_zfs_zfetch_hits untyped +node_zfs_zfetch_hits 7.067992e+06 +# HELP node_zfs_zfetch_misses kstat.zfs.misc.zfetchstats.misses +# TYPE node_zfs_zfetch_misses untyped +node_zfs_zfetch_misses 11 +# HELP node_zfs_zfetch_reclaim_failures kstat.zfs.misc.zfetchstats.reclaim_failures +# TYPE node_zfs_zfetch_reclaim_failures untyped +node_zfs_zfetch_reclaim_failures 11 +# HELP node_zfs_zfetch_reclaim_successes kstat.zfs.misc.zfetchstats.reclaim_successes +# TYPE node_zfs_zfetch_reclaim_successes untyped +node_zfs_zfetch_reclaim_successes 0 +# HELP node_zfs_zfetch_streams_noresets kstat.zfs.misc.zfetchstats.streams_noresets +# TYPE node_zfs_zfetch_streams_noresets untyped +node_zfs_zfetch_streams_noresets 2 +# HELP node_zfs_zfetch_streams_resets kstat.zfs.misc.zfetchstats.streams_resets +# TYPE node_zfs_zfetch_streams_resets untyped +node_zfs_zfetch_streams_resets 0 +# HELP node_zfs_zfetch_stride_hits kstat.zfs.misc.zfetchstats.stride_hits +# TYPE node_zfs_zfetch_stride_hits untyped +node_zfs_zfetch_stride_hits 7.06799e+06 +# HELP node_zfs_zfetch_stride_misses kstat.zfs.misc.zfetchstats.stride_misses +# TYPE node_zfs_zfetch_stride_misses untyped +node_zfs_zfetch_stride_misses 0 +# HELP node_zfs_zil_zil_commit_count kstat.zfs.misc.zil.zil_commit_count +# TYPE node_zfs_zil_zil_commit_count untyped +node_zfs_zil_zil_commit_count 10 +# HELP node_zfs_zil_zil_commit_writer_count kstat.zfs.misc.zil.zil_commit_writer_count +# TYPE node_zfs_zil_zil_commit_writer_count untyped +node_zfs_zil_zil_commit_writer_count 0 +# HELP node_zfs_zil_zil_itx_copied_bytes kstat.zfs.misc.zil.zil_itx_copied_bytes +# TYPE node_zfs_zil_zil_itx_copied_bytes untyped +node_zfs_zil_zil_itx_copied_bytes 0 +# HELP node_zfs_zil_zil_itx_copied_count kstat.zfs.misc.zil.zil_itx_copied_count +# TYPE node_zfs_zil_zil_itx_copied_count untyped +node_zfs_zil_zil_itx_copied_count 0 +# HELP node_zfs_zil_zil_itx_count kstat.zfs.misc.zil.zil_itx_count +# TYPE node_zfs_zil_zil_itx_count untyped +node_zfs_zil_zil_itx_count 0 +# HELP node_zfs_zil_zil_itx_indirect_bytes kstat.zfs.misc.zil.zil_itx_indirect_bytes +# TYPE node_zfs_zil_zil_itx_indirect_bytes untyped +node_zfs_zil_zil_itx_indirect_bytes 0 +# HELP node_zfs_zil_zil_itx_indirect_count kstat.zfs.misc.zil.zil_itx_indirect_count +# TYPE node_zfs_zil_zil_itx_indirect_count untyped +node_zfs_zil_zil_itx_indirect_count 0 +# HELP node_zfs_zil_zil_itx_metaslab_normal_bytes kstat.zfs.misc.zil.zil_itx_metaslab_normal_bytes +# TYPE node_zfs_zil_zil_itx_metaslab_normal_bytes untyped +node_zfs_zil_zil_itx_metaslab_normal_bytes 0 +# HELP node_zfs_zil_zil_itx_metaslab_normal_count kstat.zfs.misc.zil.zil_itx_metaslab_normal_count +# TYPE node_zfs_zil_zil_itx_metaslab_normal_count untyped +node_zfs_zil_zil_itx_metaslab_normal_count 0 +# HELP node_zfs_zil_zil_itx_metaslab_slog_bytes kstat.zfs.misc.zil.zil_itx_metaslab_slog_bytes +# TYPE node_zfs_zil_zil_itx_metaslab_slog_bytes untyped +node_zfs_zil_zil_itx_metaslab_slog_bytes 0 +# HELP node_zfs_zil_zil_itx_metaslab_slog_count kstat.zfs.misc.zil.zil_itx_metaslab_slog_count +# TYPE node_zfs_zil_zil_itx_metaslab_slog_count untyped +node_zfs_zil_zil_itx_metaslab_slog_count 0 +# HELP node_zfs_zil_zil_itx_needcopy_bytes kstat.zfs.misc.zil.zil_itx_needcopy_bytes +# TYPE node_zfs_zil_zil_itx_needcopy_bytes untyped +node_zfs_zil_zil_itx_needcopy_bytes 1.8446744073709537e+19 +# HELP node_zfs_zil_zil_itx_needcopy_count kstat.zfs.misc.zil.zil_itx_needcopy_count +# TYPE node_zfs_zil_zil_itx_needcopy_count untyped +node_zfs_zil_zil_itx_needcopy_count 0 +# HELP node_zfs_zpool_dataset_nread kstat.zfs.misc.objset.nread +# TYPE node_zfs_zpool_dataset_nread untyped +node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28 +node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28 +# HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked +# TYPE node_zfs_zpool_dataset_nunlinked untyped +node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14 +# HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks +# TYPE node_zfs_zpool_dataset_nunlinks untyped +node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14 +# HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten +# TYPE node_zfs_zpool_dataset_nwritten untyped +node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302 +node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806 +# HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads +# TYPE node_zfs_zpool_dataset_reads untyped +node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2 +node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2 +# HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes +# TYPE node_zfs_zpool_dataset_writes untyped +node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4 +node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10 +# HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread +# TYPE node_zfs_zpool_nread untyped +node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06 +node_zfs_zpool_nread{zpool="poolz1"} 2.82624e+06 +# HELP node_zfs_zpool_nwritten kstat.zfs.misc.io.nwritten +# TYPE node_zfs_zpool_nwritten untyped +node_zfs_zpool_nwritten{zpool="pool1"} 3.206144e+06 +node_zfs_zpool_nwritten{zpool="poolz1"} 2.680501248e+09 +# HELP node_zfs_zpool_rcnt kstat.zfs.misc.io.rcnt +# TYPE node_zfs_zpool_rcnt untyped +node_zfs_zpool_rcnt{zpool="pool1"} 0 +node_zfs_zpool_rcnt{zpool="poolz1"} 0 +# HELP node_zfs_zpool_reads kstat.zfs.misc.io.reads +# TYPE node_zfs_zpool_reads untyped +node_zfs_zpool_reads{zpool="pool1"} 22 +node_zfs_zpool_reads{zpool="poolz1"} 33 +# HELP node_zfs_zpool_rlentime kstat.zfs.misc.io.rlentime +# TYPE node_zfs_zpool_rlentime untyped +node_zfs_zpool_rlentime{zpool="pool1"} 1.04112268e+08 +node_zfs_zpool_rlentime{zpool="poolz1"} 6.472105124093e+12 +# HELP node_zfs_zpool_rtime kstat.zfs.misc.io.rtime +# TYPE node_zfs_zpool_rtime untyped +node_zfs_zpool_rtime{zpool="pool1"} 2.4168078e+07 +node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09 +# HELP node_zfs_zpool_rupdate kstat.zfs.misc.io.rupdate +# TYPE node_zfs_zpool_rupdate untyped +node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13 +node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14 +# HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt +# TYPE node_zfs_zpool_wcnt untyped +node_zfs_zpool_wcnt{zpool="pool1"} 0 +node_zfs_zpool_wcnt{zpool="poolz1"} 0 +# HELP node_zfs_zpool_wlentime kstat.zfs.misc.io.wlentime +# TYPE node_zfs_zpool_wlentime untyped +node_zfs_zpool_wlentime{zpool="pool1"} 1.04112268e+08 +node_zfs_zpool_wlentime{zpool="poolz1"} 6.472105124093e+12 +# HELP node_zfs_zpool_writes kstat.zfs.misc.io.writes +# TYPE node_zfs_zpool_writes untyped +node_zfs_zpool_writes{zpool="pool1"} 132 +node_zfs_zpool_writes{zpool="poolz1"} 25294 +# HELP node_zfs_zpool_wtime kstat.zfs.misc.io.wtime +# TYPE node_zfs_zpool_wtime untyped +node_zfs_zpool_wtime{zpool="pool1"} 7.155162e+06 +node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09 +# HELP node_zfs_zpool_wupdate kstat.zfs.misc.io.wupdate +# TYPE node_zfs_zpool_wupdate untyped +node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13 +node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/e2e-output.txt b/collector/fixtures/e2e-output.txt index 3e65396398..779eb3b60c 100644 --- a/collector/fixtures/e2e-output.txt +++ b/collector/fixtures/e2e-output.txt @@ -1,4 +1,4 @@ -# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge @@ -54,27 +54,6 @@ # TYPE go_memstats_sys_bytes gauge # HELP go_threads Number of OS threads created. # TYPE go_threads gauge -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} NaN -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="prometheus"} 0 -http_request_duration_microseconds_count{handler="prometheus"} 0 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="prometheus",quantile="0.5"} NaN -http_request_size_bytes{handler="prometheus",quantile="0.9"} NaN -http_request_size_bytes{handler="prometheus",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="prometheus"} 0 -http_request_size_bytes_count{handler="prometheus"} 0 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="prometheus",quantile="0.5"} NaN -http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN -http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="prometheus"} 0 -http_response_size_bytes_count{handler="prometheus"} 0 # HELP node_arp_entries ARP entries by device # TYPE node_arp_entries gauge node_arp_entries{device="eth0"} 3 @@ -97,7 +76,7 @@ node_bcache_btree_read_average_duration_seconds{uuid="deaddd54-c735-46d5-868e-f3 # HELP node_bcache_bypassed_bytes_total Amount of IO (both reads and writes) that has bypassed the cache. # TYPE node_bcache_bypassed_bytes_total counter node_bcache_bypassed_bytes_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 -# HELP node_bcache_cache_available_percent Percentage of cache device without dirty data, useable for writeback (may contain clean cached data). +# HELP node_bcache_cache_available_percent Percentage of cache device without dirty data, usable for writeback (may contain clean cached data). # TYPE node_bcache_cache_available_percent gauge node_bcache_cache_available_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 100 # HELP node_bcache_cache_bypass_hits_total Hits for IO intended to skip the cache. @@ -115,9 +94,9 @@ node_bcache_cache_miss_collisions_total{backing_device="bdev0",uuid="deaddd54-c7 # HELP node_bcache_cache_misses_total Misses counted per individual IO as bcache sees them. # TYPE node_bcache_cache_misses_total counter node_bcache_cache_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 -# HELP node_bcache_cache_read_races Counts instances where while data was being read from the cache, the bucket was reused and invalidated - i.e. where the pointer was stale after the read completed. -# TYPE node_bcache_cache_read_races counter -node_bcache_cache_read_races{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_cache_read_races_total Counts instances where while data was being read from the cache, the bucket was reused and invalidated - i.e. where the pointer was stale after the read completed. +# TYPE node_bcache_cache_read_races_total counter +node_bcache_cache_read_races_total{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_readaheads_total Count of times readahead occurred. # TYPE node_bcache_cache_readaheads_total counter node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 @@ -127,6 +106,9 @@ node_bcache_congested{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_dirty_data_bytes Amount of dirty data for this backing device in the cache. # TYPE node_bcache_dirty_data_bytes gauge node_bcache_dirty_data_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_dirty_target_bytes Current dirty data target threshold for this backing device in bytes. +# TYPE node_bcache_dirty_target_bytes gauge +node_bcache_dirty_target_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 2.189426688e+10 # HELP node_bcache_io_errors Number of errors that have occurred, decayed by io_error_halflife. # TYPE node_bcache_io_errors gauge node_bcache_io_errors{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 @@ -145,6 +127,18 @@ node_bcache_root_usage_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_tree_depth Depth of the btree. # TYPE node_bcache_tree_depth gauge node_bcache_tree_depth{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 +# HELP node_bcache_writeback_change Last writeback rate change step for this backing device. +# TYPE node_bcache_writeback_change gauge +node_bcache_writeback_change{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 329204 +# HELP node_bcache_writeback_rate Current writeback rate for this backing device in bytes. +# TYPE node_bcache_writeback_rate gauge +node_bcache_writeback_rate{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.150976e+06 +# HELP node_bcache_writeback_rate_integral_term Current result of integral controller, part of writeback rate +# TYPE node_bcache_writeback_rate_integral_term gauge +node_bcache_writeback_rate_integral_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 808960 +# HELP node_bcache_writeback_rate_proportional_term Current result of proportional controller, part of writeback rate +# TYPE node_bcache_writeback_rate_proportional_term gauge +node_bcache_writeback_rate_proportional_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 437748 # HELP node_bcache_written_bytes_total Sum of all data that has been written to the cache. # TYPE node_bcache_written_bytes_total counter node_bcache_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 @@ -158,182 +152,274 @@ node_bonding_active{master="int"} 1 node_bonding_slaves{master="bond0"} 0 node_bonding_slaves{master="dmz"} 2 node_bonding_slaves{master="int"} 2 -# HELP node_boot_time Node boot time, in unixtime. -# TYPE node_boot_time gauge -node_boot_time 1.418183276e+09 -# HELP node_buddyinfo_count Count of free blocks according to size. -# TYPE node_buddyinfo_count gauge -node_buddyinfo_count{node="0",size="0",zone="DMA"} 1 -node_buddyinfo_count{node="0",size="0",zone="DMA32"} 759 -node_buddyinfo_count{node="0",size="0",zone="Normal"} 4381 -node_buddyinfo_count{node="0",size="1",zone="DMA"} 0 -node_buddyinfo_count{node="0",size="1",zone="DMA32"} 572 -node_buddyinfo_count{node="0",size="1",zone="Normal"} 1093 -node_buddyinfo_count{node="0",size="10",zone="DMA"} 3 -node_buddyinfo_count{node="0",size="10",zone="DMA32"} 0 -node_buddyinfo_count{node="0",size="10",zone="Normal"} 0 -node_buddyinfo_count{node="0",size="2",zone="DMA"} 1 -node_buddyinfo_count{node="0",size="2",zone="DMA32"} 791 -node_buddyinfo_count{node="0",size="2",zone="Normal"} 185 -node_buddyinfo_count{node="0",size="3",zone="DMA"} 0 -node_buddyinfo_count{node="0",size="3",zone="DMA32"} 475 -node_buddyinfo_count{node="0",size="3",zone="Normal"} 1530 -node_buddyinfo_count{node="0",size="4",zone="DMA"} 2 -node_buddyinfo_count{node="0",size="4",zone="DMA32"} 194 -node_buddyinfo_count{node="0",size="4",zone="Normal"} 567 -node_buddyinfo_count{node="0",size="5",zone="DMA"} 1 -node_buddyinfo_count{node="0",size="5",zone="DMA32"} 45 -node_buddyinfo_count{node="0",size="5",zone="Normal"} 102 -node_buddyinfo_count{node="0",size="6",zone="DMA"} 1 -node_buddyinfo_count{node="0",size="6",zone="DMA32"} 12 -node_buddyinfo_count{node="0",size="6",zone="Normal"} 4 -node_buddyinfo_count{node="0",size="7",zone="DMA"} 0 -node_buddyinfo_count{node="0",size="7",zone="DMA32"} 0 -node_buddyinfo_count{node="0",size="7",zone="Normal"} 0 -node_buddyinfo_count{node="0",size="8",zone="DMA"} 1 -node_buddyinfo_count{node="0",size="8",zone="DMA32"} 0 -node_buddyinfo_count{node="0",size="8",zone="Normal"} 0 -node_buddyinfo_count{node="0",size="9",zone="DMA"} 1 -node_buddyinfo_count{node="0",size="9",zone="DMA32"} 0 -node_buddyinfo_count{node="0",size="9",zone="Normal"} 0 -# HELP node_context_switches Total number of context switches. -# TYPE node_context_switches counter -node_context_switches 3.8014093e+07 -# HELP node_cpu Seconds the cpus spent in each mode. -# TYPE node_cpu counter -node_cpu{cpu="cpu0",mode="guest"} 0 -node_cpu{cpu="cpu0",mode="guest_nice"} 0 -node_cpu{cpu="cpu0",mode="idle"} 10870.69 -node_cpu{cpu="cpu0",mode="iowait"} 2.2 -node_cpu{cpu="cpu0",mode="irq"} 0.01 -node_cpu{cpu="cpu0",mode="nice"} 0.19 -node_cpu{cpu="cpu0",mode="softirq"} 34.1 -node_cpu{cpu="cpu0",mode="steal"} 0 -node_cpu{cpu="cpu0",mode="system"} 210.45 -node_cpu{cpu="cpu0",mode="user"} 444.9 -node_cpu{cpu="cpu1",mode="guest"} 0 -node_cpu{cpu="cpu1",mode="guest_nice"} 0 -node_cpu{cpu="cpu1",mode="idle"} 11107.87 -node_cpu{cpu="cpu1",mode="iowait"} 5.91 -node_cpu{cpu="cpu1",mode="irq"} 0 -node_cpu{cpu="cpu1",mode="nice"} 0.23 -node_cpu{cpu="cpu1",mode="softirq"} 0.46 -node_cpu{cpu="cpu1",mode="steal"} 0 -node_cpu{cpu="cpu1",mode="system"} 164.74 -node_cpu{cpu="cpu1",mode="user"} 478.69 -node_cpu{cpu="cpu2",mode="guest"} 0 -node_cpu{cpu="cpu2",mode="guest_nice"} 0 -node_cpu{cpu="cpu2",mode="idle"} 11123.21 -node_cpu{cpu="cpu2",mode="iowait"} 4.41 -node_cpu{cpu="cpu2",mode="irq"} 0 -node_cpu{cpu="cpu2",mode="nice"} 0.36 -node_cpu{cpu="cpu2",mode="softirq"} 3.26 -node_cpu{cpu="cpu2",mode="steal"} 0 -node_cpu{cpu="cpu2",mode="system"} 159.16 -node_cpu{cpu="cpu2",mode="user"} 465.04 -node_cpu{cpu="cpu3",mode="guest"} 0 -node_cpu{cpu="cpu3",mode="guest_nice"} 0 -node_cpu{cpu="cpu3",mode="idle"} 11132.3 -node_cpu{cpu="cpu3",mode="iowait"} 5.33 -node_cpu{cpu="cpu3",mode="irq"} 0 -node_cpu{cpu="cpu3",mode="nice"} 1.02 -node_cpu{cpu="cpu3",mode="softirq"} 0.6 -node_cpu{cpu="cpu3",mode="steal"} 0 -node_cpu{cpu="cpu3",mode="system"} 156.83 -node_cpu{cpu="cpu3",mode="user"} 470.54 -node_cpu{cpu="cpu4",mode="guest"} 0 -node_cpu{cpu="cpu4",mode="guest_nice"} 0 -node_cpu{cpu="cpu4",mode="idle"} 11403.21 -node_cpu{cpu="cpu4",mode="iowait"} 2.17 -node_cpu{cpu="cpu4",mode="irq"} 0 -node_cpu{cpu="cpu4",mode="nice"} 0.25 -node_cpu{cpu="cpu4",mode="softirq"} 0.08 -node_cpu{cpu="cpu4",mode="steal"} 0 -node_cpu{cpu="cpu4",mode="system"} 107.76 -node_cpu{cpu="cpu4",mode="user"} 284.13 -node_cpu{cpu="cpu5",mode="guest"} 0 -node_cpu{cpu="cpu5",mode="guest_nice"} 0 -node_cpu{cpu="cpu5",mode="idle"} 11362.7 -node_cpu{cpu="cpu5",mode="iowait"} 6.72 -node_cpu{cpu="cpu5",mode="irq"} 0 -node_cpu{cpu="cpu5",mode="nice"} 1.01 -node_cpu{cpu="cpu5",mode="softirq"} 0.3 -node_cpu{cpu="cpu5",mode="steal"} 0 -node_cpu{cpu="cpu5",mode="system"} 115.86 -node_cpu{cpu="cpu5",mode="user"} 292.71 -node_cpu{cpu="cpu6",mode="guest"} 0 -node_cpu{cpu="cpu6",mode="guest_nice"} 0 -node_cpu{cpu="cpu6",mode="idle"} 11397.21 -node_cpu{cpu="cpu6",mode="iowait"} 3.19 -node_cpu{cpu="cpu6",mode="irq"} 0 -node_cpu{cpu="cpu6",mode="nice"} 0.36 -node_cpu{cpu="cpu6",mode="softirq"} 0.29 -node_cpu{cpu="cpu6",mode="steal"} 0 -node_cpu{cpu="cpu6",mode="system"} 102.76 -node_cpu{cpu="cpu6",mode="user"} 291.52 -node_cpu{cpu="cpu7",mode="guest"} 0 -node_cpu{cpu="cpu7",mode="guest_nice"} 0 -node_cpu{cpu="cpu7",mode="idle"} 11392.82 -node_cpu{cpu="cpu7",mode="iowait"} 5.55 -node_cpu{cpu="cpu7",mode="irq"} 0 -node_cpu{cpu="cpu7",mode="nice"} 2.68 -node_cpu{cpu="cpu7",mode="softirq"} 0.31 -node_cpu{cpu="cpu7",mode="steal"} 0 -node_cpu{cpu="cpu7",mode="system"} 101.64 -node_cpu{cpu="cpu7",mode="user"} 290.98 -# HELP node_cpu_core_throttles_total Number of times this cpu core has been throttled. +# HELP node_boot_time_seconds Node boot time, in unixtime. +# TYPE node_boot_time_seconds gauge +node_boot_time_seconds 1.418183276e+09 +# HELP node_btrfs_allocation_ratio Data allocation ratio for a layout/data type +# TYPE node_btrfs_allocation_ratio gauge +node_btrfs_allocation_ratio{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 +node_btrfs_allocation_ratio{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.3333333333333333 +node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 +node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 +node_btrfs_allocation_ratio{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 +node_btrfs_allocation_ratio{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 +# HELP node_btrfs_device_size_bytes Size of a device that is part of the filesystem. +# TYPE node_btrfs_device_size_bytes gauge +node_btrfs_device_size_bytes{device="loop22",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop23",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop24",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop25",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop25",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 +node_btrfs_device_size_bytes{device="loop26",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 +# HELP node_btrfs_global_rsv_size_bytes Size of global reserve. +# TYPE node_btrfs_global_rsv_size_bytes gauge +node_btrfs_global_rsv_size_bytes{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.6777216e+07 +node_btrfs_global_rsv_size_bytes{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 +# HELP node_btrfs_info Filesystem information +# TYPE node_btrfs_info gauge +node_btrfs_info{label="",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1 +node_btrfs_info{label="fixture",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 +# HELP node_btrfs_reserved_bytes Amount of space reserved for a data type +# TYPE node_btrfs_reserved_bytes gauge +node_btrfs_reserved_bytes{block_group_type="data",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 +node_btrfs_reserved_bytes{block_group_type="data",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +node_btrfs_reserved_bytes{block_group_type="metadata",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 +node_btrfs_reserved_bytes{block_group_type="metadata",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +node_btrfs_reserved_bytes{block_group_type="system",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 +node_btrfs_reserved_bytes{block_group_type="system",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +# HELP node_btrfs_size_bytes Amount of space allocated for a layout/data type +# TYPE node_btrfs_size_bytes gauge +node_btrfs_size_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2.147483648e+09 +node_btrfs_size_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 6.44087808e+08 +node_btrfs_size_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+09 +node_btrfs_size_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 4.29391872e+08 +node_btrfs_size_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.388608e+06 +node_btrfs_size_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 +# HELP node_btrfs_used_bytes Amount of used space by a layout/data type +# TYPE node_btrfs_used_bytes gauge +node_btrfs_used_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.08189952e+08 +node_btrfs_used_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 +node_btrfs_used_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 933888 +node_btrfs_used_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 114688 +node_btrfs_used_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 16384 +node_btrfs_used_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 16384 +# HELP node_buddyinfo_blocks Count of free blocks according to size. +# TYPE node_buddyinfo_blocks gauge +node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 +node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 +node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 +node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 +node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 +node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 +node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 +node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 +node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 +node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 +node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 +node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 +node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 +node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 +node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 +node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 +node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 +node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 +node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 +# HELP node_context_switches_total Total number of context switches. +# TYPE node_context_switches_total counter +node_context_switches_total 3.8014093e+07 +# HELP node_cooling_device_cur_state Current throttle state of the cooling device +# TYPE node_cooling_device_cur_state gauge +node_cooling_device_cur_state{name="0",type="Processor"} 0 +# HELP node_cooling_device_max_state Maximum throttle state of the cooling device +# TYPE node_cooling_device_max_state gauge +node_cooling_device_max_state{name="0",type="Processor"} 3 +# HELP node_cpu_bug_info The `bugs` field of CPU information from /proc/cpuinfo taken from the first core. +# TYPE node_cpu_bug_info gauge +node_cpu_bug_info{bug="cpu_meltdown"} 1 +node_cpu_bug_info{bug="mds"} 1 +node_cpu_bug_info{bug="spectre_v1"} 1 +node_cpu_bug_info{bug="spectre_v2"} 1 +# HELP node_cpu_core_throttles_total Number of times this CPU core has been throttled. # TYPE node_cpu_core_throttles_total counter -node_cpu_core_throttles_total{cpu="cpu0"} 5 -node_cpu_core_throttles_total{cpu="cpu1"} 0 -node_cpu_core_throttles_total{cpu="cpu2"} 40 -# HELP node_cpu_frequency_hertz Current cpu thread frequency in hertz. -# TYPE node_cpu_frequency_hertz gauge -node_cpu_frequency_hertz{cpu="cpu0"} 1.699981e+09 -node_cpu_frequency_hertz{cpu="cpu1"} 1.699981e+09 -node_cpu_frequency_hertz{cpu="cpu3"} 8e+06 -# HELP node_cpu_frequency_max_hertz Maximum cpu thread frequency in hertz. -# TYPE node_cpu_frequency_max_hertz gauge -node_cpu_frequency_max_hertz{cpu="cpu0"} 3.7e+09 -node_cpu_frequency_max_hertz{cpu="cpu1"} 3.7e+09 -node_cpu_frequency_max_hertz{cpu="cpu3"} 4.2e+09 -# HELP node_cpu_frequency_min_hertz Minimum cpu thread frequency in hertz. -# TYPE node_cpu_frequency_min_hertz gauge -node_cpu_frequency_min_hertz{cpu="cpu0"} 8e+08 -node_cpu_frequency_min_hertz{cpu="cpu1"} 8e+08 -node_cpu_frequency_min_hertz{cpu="cpu3"} 1e+06 -# HELP node_cpu_package_throttles_total Number of times this cpu package has been throttled. +node_cpu_core_throttles_total{core="0",package="0"} 5 +node_cpu_core_throttles_total{core="0",package="1"} 0 +node_cpu_core_throttles_total{core="1",package="0"} 0 +node_cpu_core_throttles_total{core="1",package="1"} 9 +# HELP node_cpu_flag_info The `flags` field of CPU information from /proc/cpuinfo taken from the first core. +# TYPE node_cpu_flag_info gauge +node_cpu_flag_info{flag="aes"} 1 +node_cpu_flag_info{flag="avx"} 1 +node_cpu_flag_info{flag="avx2"} 1 +node_cpu_flag_info{flag="constant_tsc"} 1 +# HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. +# TYPE node_cpu_guest_seconds_total counter +node_cpu_guest_seconds_total{cpu="0",mode="nice"} 0.01 +node_cpu_guest_seconds_total{cpu="0",mode="user"} 0.02 +node_cpu_guest_seconds_total{cpu="1",mode="nice"} 0.02 +node_cpu_guest_seconds_total{cpu="1",mode="user"} 0.03 +node_cpu_guest_seconds_total{cpu="2",mode="nice"} 0.03 +node_cpu_guest_seconds_total{cpu="2",mode="user"} 0.04 +node_cpu_guest_seconds_total{cpu="3",mode="nice"} 0.04 +node_cpu_guest_seconds_total{cpu="3",mode="user"} 0.05 +node_cpu_guest_seconds_total{cpu="4",mode="nice"} 0.05 +node_cpu_guest_seconds_total{cpu="4",mode="user"} 0.06 +node_cpu_guest_seconds_total{cpu="5",mode="nice"} 0.06 +node_cpu_guest_seconds_total{cpu="5",mode="user"} 0.07 +node_cpu_guest_seconds_total{cpu="6",mode="nice"} 0.07 +node_cpu_guest_seconds_total{cpu="6",mode="user"} 0.08 +node_cpu_guest_seconds_total{cpu="7",mode="nice"} 0.08 +node_cpu_guest_seconds_total{cpu="7",mode="user"} 0.09 +# HELP node_cpu_info CPU information from /proc/cpuinfo. +# TYPE node_cpu_info gauge +node_cpu_info{cachesize="8192 KB",core="0",cpu="0",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="0",cpu="4",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="1",cpu="1",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="1",cpu="5",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="2",cpu="2",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="2",cpu="6",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="3",cpu="3",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +node_cpu_info{cachesize="8192 KB",core="3",cpu="7",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 +# HELP node_cpu_package_throttles_total Number of times this CPU package has been throttled. # TYPE node_cpu_package_throttles_total counter -node_cpu_package_throttles_total{node="0"} 30 -# HELP node_disk_bytes_read The total number of bytes read successfully. -# TYPE node_disk_bytes_read counter -node_disk_bytes_read{device="dm-0"} 5.13708655616e+11 -node_disk_bytes_read{device="dm-1"} 1.589248e+06 -node_disk_bytes_read{device="dm-2"} 1.578752e+08 -node_disk_bytes_read{device="dm-3"} 1.98144e+06 -node_disk_bytes_read{device="dm-4"} 529408 -node_disk_bytes_read{device="dm-5"} 4.3150848e+07 -node_disk_bytes_read{device="mmcblk0"} 798720 -node_disk_bytes_read{device="mmcblk0p1"} 81920 -node_disk_bytes_read{device="mmcblk0p2"} 389120 -node_disk_bytes_read{device="nvme0n1"} 2.377714176e+09 -node_disk_bytes_read{device="sda"} 5.13713216512e+11 -node_disk_bytes_read{device="sr0"} 0 -node_disk_bytes_read{device="vda"} 1.6727491584e+10 -# HELP node_disk_bytes_written The total number of bytes written successfully. -# TYPE node_disk_bytes_written counter -node_disk_bytes_written{device="dm-0"} 2.5891680256e+11 -node_disk_bytes_written{device="dm-1"} 303104 -node_disk_bytes_written{device="dm-2"} 2.607828992e+09 -node_disk_bytes_written{device="dm-3"} 0 -node_disk_bytes_written{device="dm-4"} 70144 -node_disk_bytes_written{device="dm-5"} 5.89664256e+08 -node_disk_bytes_written{device="mmcblk0"} 0 -node_disk_bytes_written{device="mmcblk0p1"} 0 -node_disk_bytes_written{device="mmcblk0p2"} 0 -node_disk_bytes_written{device="nvme0n1"} 2.0199236096e+10 -node_disk_bytes_written{device="sda"} 2.58916880384e+11 -node_disk_bytes_written{device="sr0"} 0 -node_disk_bytes_written{device="vda"} 1.0938236928e+11 +node_cpu_package_throttles_total{package="0"} 30 +node_cpu_package_throttles_total{package="1"} 6 +# HELP node_cpu_scaling_frequency_hertz Current scaled CPU thread frequency in hertz. +# TYPE node_cpu_scaling_frequency_hertz gauge +node_cpu_scaling_frequency_hertz{cpu="0"} 1.699981e+09 +node_cpu_scaling_frequency_hertz{cpu="1"} 1.699981e+09 +node_cpu_scaling_frequency_hertz{cpu="2"} 8e+06 +node_cpu_scaling_frequency_hertz{cpu="3"} 8e+06 +# HELP node_cpu_scaling_frequency_max_hertz Maximum scaled CPU thread frequency in hertz. +# TYPE node_cpu_scaling_frequency_max_hertz gauge +node_cpu_scaling_frequency_max_hertz{cpu="0"} 3.7e+09 +node_cpu_scaling_frequency_max_hertz{cpu="1"} 3.7e+09 +node_cpu_scaling_frequency_max_hertz{cpu="2"} 4.2e+09 +node_cpu_scaling_frequency_max_hertz{cpu="3"} 4.2e+09 +# HELP node_cpu_scaling_frequency_min_hertz Minimum scaled CPU thread frequency in hertz. +# TYPE node_cpu_scaling_frequency_min_hertz gauge +node_cpu_scaling_frequency_min_hertz{cpu="0"} 8e+08 +node_cpu_scaling_frequency_min_hertz{cpu="1"} 8e+08 +node_cpu_scaling_frequency_min_hertz{cpu="2"} 1e+06 +node_cpu_scaling_frequency_min_hertz{cpu="3"} 1e+06 +# HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. +# TYPE node_cpu_seconds_total counter +node_cpu_seconds_total{cpu="0",mode="idle"} 10870.69 +node_cpu_seconds_total{cpu="0",mode="iowait"} 2.2 +node_cpu_seconds_total{cpu="0",mode="irq"} 0.01 +node_cpu_seconds_total{cpu="0",mode="nice"} 0.19 +node_cpu_seconds_total{cpu="0",mode="softirq"} 34.1 +node_cpu_seconds_total{cpu="0",mode="steal"} 0 +node_cpu_seconds_total{cpu="0",mode="system"} 210.45 +node_cpu_seconds_total{cpu="0",mode="user"} 444.9 +node_cpu_seconds_total{cpu="1",mode="idle"} 11107.87 +node_cpu_seconds_total{cpu="1",mode="iowait"} 5.91 +node_cpu_seconds_total{cpu="1",mode="irq"} 0 +node_cpu_seconds_total{cpu="1",mode="nice"} 0.23 +node_cpu_seconds_total{cpu="1",mode="softirq"} 0.46 +node_cpu_seconds_total{cpu="1",mode="steal"} 0 +node_cpu_seconds_total{cpu="1",mode="system"} 164.74 +node_cpu_seconds_total{cpu="1",mode="user"} 478.69 +node_cpu_seconds_total{cpu="2",mode="idle"} 11123.21 +node_cpu_seconds_total{cpu="2",mode="iowait"} 4.41 +node_cpu_seconds_total{cpu="2",mode="irq"} 0 +node_cpu_seconds_total{cpu="2",mode="nice"} 0.36 +node_cpu_seconds_total{cpu="2",mode="softirq"} 3.26 +node_cpu_seconds_total{cpu="2",mode="steal"} 0 +node_cpu_seconds_total{cpu="2",mode="system"} 159.16 +node_cpu_seconds_total{cpu="2",mode="user"} 465.04 +node_cpu_seconds_total{cpu="3",mode="idle"} 11132.3 +node_cpu_seconds_total{cpu="3",mode="iowait"} 5.33 +node_cpu_seconds_total{cpu="3",mode="irq"} 0 +node_cpu_seconds_total{cpu="3",mode="nice"} 1.02 +node_cpu_seconds_total{cpu="3",mode="softirq"} 0.6 +node_cpu_seconds_total{cpu="3",mode="steal"} 0 +node_cpu_seconds_total{cpu="3",mode="system"} 156.83 +node_cpu_seconds_total{cpu="3",mode="user"} 470.54 +node_cpu_seconds_total{cpu="4",mode="idle"} 11403.21 +node_cpu_seconds_total{cpu="4",mode="iowait"} 2.17 +node_cpu_seconds_total{cpu="4",mode="irq"} 0 +node_cpu_seconds_total{cpu="4",mode="nice"} 0.25 +node_cpu_seconds_total{cpu="4",mode="softirq"} 0.08 +node_cpu_seconds_total{cpu="4",mode="steal"} 0 +node_cpu_seconds_total{cpu="4",mode="system"} 107.76 +node_cpu_seconds_total{cpu="4",mode="user"} 284.13 +node_cpu_seconds_total{cpu="5",mode="idle"} 11362.7 +node_cpu_seconds_total{cpu="5",mode="iowait"} 6.72 +node_cpu_seconds_total{cpu="5",mode="irq"} 0 +node_cpu_seconds_total{cpu="5",mode="nice"} 1.01 +node_cpu_seconds_total{cpu="5",mode="softirq"} 0.3 +node_cpu_seconds_total{cpu="5",mode="steal"} 0 +node_cpu_seconds_total{cpu="5",mode="system"} 115.86 +node_cpu_seconds_total{cpu="5",mode="user"} 292.71 +node_cpu_seconds_total{cpu="6",mode="idle"} 11397.21 +node_cpu_seconds_total{cpu="6",mode="iowait"} 3.19 +node_cpu_seconds_total{cpu="6",mode="irq"} 0 +node_cpu_seconds_total{cpu="6",mode="nice"} 0.36 +node_cpu_seconds_total{cpu="6",mode="softirq"} 0.29 +node_cpu_seconds_total{cpu="6",mode="steal"} 0 +node_cpu_seconds_total{cpu="6",mode="system"} 102.76 +node_cpu_seconds_total{cpu="6",mode="user"} 291.52 +node_cpu_seconds_total{cpu="7",mode="idle"} 11392.82 +node_cpu_seconds_total{cpu="7",mode="iowait"} 5.55 +node_cpu_seconds_total{cpu="7",mode="irq"} 0 +node_cpu_seconds_total{cpu="7",mode="nice"} 2.68 +node_cpu_seconds_total{cpu="7",mode="softirq"} 0.31 +node_cpu_seconds_total{cpu="7",mode="steal"} 0 +node_cpu_seconds_total{cpu="7",mode="system"} 101.64 +node_cpu_seconds_total{cpu="7",mode="user"} 290.98 +# HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. +# TYPE node_disk_discard_time_seconds_total counter +node_disk_discard_time_seconds_total{device="sdb"} 11.13 +node_disk_discard_time_seconds_total{device="sdc"} 11.13 +# HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. +# TYPE node_disk_discarded_sectors_total counter +node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 +node_disk_discarded_sectors_total{device="sdc"} 1.25173784e+08 +# HELP node_disk_discards_completed_total The total number of discards completed successfully. +# TYPE node_disk_discards_completed_total counter +node_disk_discards_completed_total{device="sdb"} 68851 +node_disk_discards_completed_total{device="sdc"} 18851 +# HELP node_disk_discards_merged_total The total number of discards merged. +# TYPE node_disk_discards_merged_total counter +node_disk_discards_merged_total{device="sdb"} 0 +node_disk_discards_merged_total{device="sdc"} 0 +# HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. +# TYPE node_disk_flush_requests_time_seconds_total counter +node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 +# HELP node_disk_flush_requests_total The total number of flush requests completed successfully +# TYPE node_disk_flush_requests_total counter +node_disk_flush_requests_total{device="sdc"} 1555 +# HELP node_disk_info Info of /sys/block/. +# TYPE node_disk_info gauge +node_disk_info{device="dm-0",major="252",minor="0"} 1 +node_disk_info{device="dm-1",major="252",minor="1"} 1 +node_disk_info{device="dm-2",major="252",minor="2"} 1 +node_disk_info{device="dm-3",major="252",minor="3"} 1 +node_disk_info{device="dm-4",major="252",minor="4"} 1 +node_disk_info{device="dm-5",major="252",minor="5"} 1 +node_disk_info{device="mmcblk0",major="179",minor="0"} 1 +node_disk_info{device="mmcblk0p1",major="179",minor="1"} 1 +node_disk_info{device="mmcblk0p2",major="179",minor="2"} 1 +node_disk_info{device="nvme0n1",major="259",minor="0"} 1 +node_disk_info{device="sda",major="8",minor="0"} 1 +node_disk_info{device="sdb",major="8",minor="0"} 1 +node_disk_info{device="sdc",major="8",minor="0"} 1 +node_disk_info{device="sr0",major="11",minor="0"} 1 +node_disk_info{device="vda",major="254",minor="0"} 1 # HELP node_disk_io_now The number of I/Os currently in progress. # TYPE node_disk_io_now gauge node_disk_io_now{device="dm-0"} 0 @@ -347,158 +433,183 @@ node_disk_io_now{device="mmcblk0p1"} 0 node_disk_io_now{device="mmcblk0p2"} 0 node_disk_io_now{device="nvme0n1"} 0 node_disk_io_now{device="sda"} 0 +node_disk_io_now{device="sdb"} 0 +node_disk_io_now{device="sdc"} 0 node_disk_io_now{device="sr0"} 0 node_disk_io_now{device="vda"} 0 -# HELP node_disk_io_time_ms Total Milliseconds spent doing I/Os. -# TYPE node_disk_io_time_ms counter -node_disk_io_time_ms{device="dm-0"} 1.1325968e+07 -node_disk_io_time_ms{device="dm-1"} 76 -node_disk_io_time_ms{device="dm-2"} 65400 -node_disk_io_time_ms{device="dm-3"} 16 -node_disk_io_time_ms{device="dm-4"} 24 -node_disk_io_time_ms{device="dm-5"} 58848 -node_disk_io_time_ms{device="mmcblk0"} 136 -node_disk_io_time_ms{device="mmcblk0p1"} 24 -node_disk_io_time_ms{device="mmcblk0p2"} 68 -node_disk_io_time_ms{device="nvme0n1"} 222766 -node_disk_io_time_ms{device="sda"} 9.65388e+06 -node_disk_io_time_ms{device="sr0"} 0 -node_disk_io_time_ms{device="vda"} 4.1614592e+07 -# HELP node_disk_io_time_weighted The weighted # of milliseconds spent doing I/Os. See https://www.kernel.org/doc/Documentation/iostats.txt. -# TYPE node_disk_io_time_weighted counter -node_disk_io_time_weighted{device="dm-0"} 1.206301256e+09 -node_disk_io_time_weighted{device="dm-1"} 84 -node_disk_io_time_weighted{device="dm-2"} 129416 -node_disk_io_time_weighted{device="dm-3"} 104 -node_disk_io_time_weighted{device="dm-4"} 44 -node_disk_io_time_weighted{device="dm-5"} 105632 -node_disk_io_time_weighted{device="mmcblk0"} 156 -node_disk_io_time_weighted{device="mmcblk0p1"} 24 -node_disk_io_time_weighted{device="mmcblk0p2"} 68 -node_disk_io_time_weighted{device="nvme0n1"} 1.032546e+06 -node_disk_io_time_weighted{device="sda"} 8.2621804e+07 -node_disk_io_time_weighted{device="sr0"} 0 -node_disk_io_time_weighted{device="vda"} 2.077872228e+09 -# HELP node_disk_read_time_ms The total number of milliseconds spent by all reads. -# TYPE node_disk_read_time_ms counter -node_disk_read_time_ms{device="dm-0"} 4.6229572e+07 -node_disk_read_time_ms{device="dm-1"} 84 -node_disk_read_time_ms{device="dm-2"} 6536 -node_disk_read_time_ms{device="dm-3"} 104 -node_disk_read_time_ms{device="dm-4"} 28 -node_disk_read_time_ms{device="dm-5"} 924 -node_disk_read_time_ms{device="mmcblk0"} 156 -node_disk_read_time_ms{device="mmcblk0p1"} 24 -node_disk_read_time_ms{device="mmcblk0p2"} 68 -node_disk_read_time_ms{device="nvme0n1"} 21650 -node_disk_read_time_ms{device="sda"} 1.8492372e+07 -node_disk_read_time_ms{device="sr0"} 0 -node_disk_read_time_ms{device="vda"} 8.655768e+06 -# HELP node_disk_reads_completed The total number of reads completed successfully. -# TYPE node_disk_reads_completed counter -node_disk_reads_completed{device="dm-0"} 5.9910002e+07 -node_disk_reads_completed{device="dm-1"} 388 -node_disk_reads_completed{device="dm-2"} 11571 -node_disk_reads_completed{device="dm-3"} 3870 -node_disk_reads_completed{device="dm-4"} 392 -node_disk_reads_completed{device="dm-5"} 3729 -node_disk_reads_completed{device="mmcblk0"} 192 -node_disk_reads_completed{device="mmcblk0p1"} 17 -node_disk_reads_completed{device="mmcblk0p2"} 95 -node_disk_reads_completed{device="nvme0n1"} 47114 -node_disk_reads_completed{device="sda"} 2.5354637e+07 -node_disk_reads_completed{device="sr0"} 0 -node_disk_reads_completed{device="vda"} 1.775784e+06 -# HELP node_disk_reads_merged The total number of reads merged. See https://www.kernel.org/doc/Documentation/iostats.txt. -# TYPE node_disk_reads_merged counter -node_disk_reads_merged{device="dm-0"} 0 -node_disk_reads_merged{device="dm-1"} 0 -node_disk_reads_merged{device="dm-2"} 0 -node_disk_reads_merged{device="dm-3"} 0 -node_disk_reads_merged{device="dm-4"} 0 -node_disk_reads_merged{device="dm-5"} 0 -node_disk_reads_merged{device="mmcblk0"} 3 -node_disk_reads_merged{device="mmcblk0p1"} 3 -node_disk_reads_merged{device="mmcblk0p2"} 0 -node_disk_reads_merged{device="nvme0n1"} 4 -node_disk_reads_merged{device="sda"} 3.4367663e+07 -node_disk_reads_merged{device="sr0"} 0 -node_disk_reads_merged{device="vda"} 15386 -# HELP node_disk_sectors_read The total number of sectors read successfully. -# TYPE node_disk_sectors_read counter -node_disk_sectors_read{device="dm-0"} 1.003337218e+09 -node_disk_sectors_read{device="dm-1"} 3104 -node_disk_sectors_read{device="dm-2"} 308350 -node_disk_sectors_read{device="dm-3"} 3870 -node_disk_sectors_read{device="dm-4"} 1034 -node_disk_sectors_read{device="dm-5"} 84279 -node_disk_sectors_read{device="mmcblk0"} 1560 -node_disk_sectors_read{device="mmcblk0p1"} 160 -node_disk_sectors_read{device="mmcblk0p2"} 760 -node_disk_sectors_read{device="nvme0n1"} 4.643973e+06 -node_disk_sectors_read{device="sda"} 1.003346126e+09 -node_disk_sectors_read{device="sr0"} 0 -node_disk_sectors_read{device="vda"} 3.2670882e+07 -# HELP node_disk_sectors_written The total number of sectors written successfully. -# TYPE node_disk_sectors_written counter -node_disk_sectors_written{device="dm-0"} 5.0569688e+08 -node_disk_sectors_written{device="dm-1"} 592 -node_disk_sectors_written{device="dm-2"} 5.093416e+06 -node_disk_sectors_written{device="dm-3"} 0 -node_disk_sectors_written{device="dm-4"} 137 -node_disk_sectors_written{device="dm-5"} 1.151688e+06 -node_disk_sectors_written{device="mmcblk0"} 0 -node_disk_sectors_written{device="mmcblk0p1"} 0 -node_disk_sectors_written{device="mmcblk0p2"} 0 -node_disk_sectors_written{device="nvme0n1"} 3.9451633e+07 -node_disk_sectors_written{device="sda"} 5.05697032e+08 -node_disk_sectors_written{device="sr0"} 0 -node_disk_sectors_written{device="vda"} 2.1363744e+08 -# HELP node_disk_write_time_ms This is the total number of milliseconds spent by all writes. -# TYPE node_disk_write_time_ms counter -node_disk_write_time_ms{device="dm-0"} 1.1585578e+09 -node_disk_write_time_ms{device="dm-1"} 0 -node_disk_write_time_ms{device="dm-2"} 122884 -node_disk_write_time_ms{device="dm-3"} 0 -node_disk_write_time_ms{device="dm-4"} 16 -node_disk_write_time_ms{device="dm-5"} 104684 -node_disk_write_time_ms{device="mmcblk0"} 0 -node_disk_write_time_ms{device="mmcblk0p1"} 0 -node_disk_write_time_ms{device="mmcblk0p2"} 0 -node_disk_write_time_ms{device="nvme0n1"} 1.011053e+06 -node_disk_write_time_ms{device="sda"} 6.387796e+07 -node_disk_write_time_ms{device="sr0"} 0 -node_disk_write_time_ms{device="vda"} 2.069221364e+09 -# HELP node_disk_writes_completed The total number of writes completed successfully. -# TYPE node_disk_writes_completed counter -node_disk_writes_completed{device="dm-0"} 3.9231014e+07 -node_disk_writes_completed{device="dm-1"} 74 -node_disk_writes_completed{device="dm-2"} 153522 -node_disk_writes_completed{device="dm-3"} 0 -node_disk_writes_completed{device="dm-4"} 38 -node_disk_writes_completed{device="dm-5"} 98918 -node_disk_writes_completed{device="mmcblk0"} 0 -node_disk_writes_completed{device="mmcblk0p1"} 0 -node_disk_writes_completed{device="mmcblk0p2"} 0 -node_disk_writes_completed{device="nvme0n1"} 1.07832e+06 -node_disk_writes_completed{device="sda"} 2.8444756e+07 -node_disk_writes_completed{device="sr0"} 0 -node_disk_writes_completed{device="vda"} 6.038856e+06 -# HELP node_disk_writes_merged The number of writes merged. See https://www.kernel.org/doc/Documentation/iostats.txt. -# TYPE node_disk_writes_merged counter -node_disk_writes_merged{device="dm-0"} 0 -node_disk_writes_merged{device="dm-1"} 0 -node_disk_writes_merged{device="dm-2"} 0 -node_disk_writes_merged{device="dm-3"} 0 -node_disk_writes_merged{device="dm-4"} 0 -node_disk_writes_merged{device="dm-5"} 0 -node_disk_writes_merged{device="mmcblk0"} 0 -node_disk_writes_merged{device="mmcblk0p1"} 0 -node_disk_writes_merged{device="mmcblk0p2"} 0 -node_disk_writes_merged{device="nvme0n1"} 43950 -node_disk_writes_merged{device="sda"} 1.1134226e+07 -node_disk_writes_merged{device="sr0"} 0 -node_disk_writes_merged{device="vda"} 2.0711856e+07 +# HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. +# TYPE node_disk_io_time_seconds_total counter +node_disk_io_time_seconds_total{device="dm-0"} 11325.968 +node_disk_io_time_seconds_total{device="dm-1"} 0.076 +node_disk_io_time_seconds_total{device="dm-2"} 65.4 +node_disk_io_time_seconds_total{device="dm-3"} 0.016 +node_disk_io_time_seconds_total{device="dm-4"} 0.024 +node_disk_io_time_seconds_total{device="dm-5"} 58.848 +node_disk_io_time_seconds_total{device="mmcblk0"} 0.136 +node_disk_io_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 +node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 +node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 +node_disk_io_time_seconds_total{device="sdc"} 10.73 +node_disk_io_time_seconds_total{device="sr0"} 0 +node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 +# HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. +# TYPE node_disk_io_time_weighted_seconds_total counter +node_disk_io_time_weighted_seconds_total{device="dm-0"} 1.206301256e+06 +node_disk_io_time_weighted_seconds_total{device="dm-1"} 0.084 +node_disk_io_time_weighted_seconds_total{device="dm-2"} 129.416 +node_disk_io_time_weighted_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_io_time_weighted_seconds_total{device="dm-4"} 0.044 +node_disk_io_time_weighted_seconds_total{device="dm-5"} 105.632 +node_disk_io_time_weighted_seconds_total{device="mmcblk0"} 0.156 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 +node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 +node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 +node_disk_io_time_weighted_seconds_total{device="sdc"} 17.07 +node_disk_io_time_weighted_seconds_total{device="sr0"} 0 +node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 +# HELP node_disk_read_bytes_total The total number of bytes read successfully. +# TYPE node_disk_read_bytes_total counter +node_disk_read_bytes_total{device="dm-0"} 5.13708655616e+11 +node_disk_read_bytes_total{device="dm-1"} 1.589248e+06 +node_disk_read_bytes_total{device="dm-2"} 1.578752e+08 +node_disk_read_bytes_total{device="dm-3"} 1.98144e+06 +node_disk_read_bytes_total{device="dm-4"} 529408 +node_disk_read_bytes_total{device="dm-5"} 4.3150848e+07 +node_disk_read_bytes_total{device="mmcblk0"} 798720 +node_disk_read_bytes_total{device="mmcblk0p1"} 81920 +node_disk_read_bytes_total{device="mmcblk0p2"} 389120 +node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 +node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 +node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 +node_disk_read_bytes_total{device="sdc"} 8.48782848e+08 +node_disk_read_bytes_total{device="sr0"} 0 +node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 +# HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. +# TYPE node_disk_read_time_seconds_total counter +node_disk_read_time_seconds_total{device="dm-0"} 46229.572 +node_disk_read_time_seconds_total{device="dm-1"} 0.084 +node_disk_read_time_seconds_total{device="dm-2"} 6.5360000000000005 +node_disk_read_time_seconds_total{device="dm-3"} 0.10400000000000001 +node_disk_read_time_seconds_total{device="dm-4"} 0.028 +node_disk_read_time_seconds_total{device="dm-5"} 0.924 +node_disk_read_time_seconds_total{device="mmcblk0"} 0.156 +node_disk_read_time_seconds_total{device="mmcblk0p1"} 0.024 +node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 +node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 +node_disk_read_time_seconds_total{device="sda"} 18492.372 +node_disk_read_time_seconds_total{device="sdb"} 0.084 +node_disk_read_time_seconds_total{device="sdc"} 0.014 +node_disk_read_time_seconds_total{device="sr0"} 0 +node_disk_read_time_seconds_total{device="vda"} 8655.768 +# HELP node_disk_reads_completed_total The total number of reads completed successfully. +# TYPE node_disk_reads_completed_total counter +node_disk_reads_completed_total{device="dm-0"} 5.9910002e+07 +node_disk_reads_completed_total{device="dm-1"} 388 +node_disk_reads_completed_total{device="dm-2"} 11571 +node_disk_reads_completed_total{device="dm-3"} 3870 +node_disk_reads_completed_total{device="dm-4"} 392 +node_disk_reads_completed_total{device="dm-5"} 3729 +node_disk_reads_completed_total{device="mmcblk0"} 192 +node_disk_reads_completed_total{device="mmcblk0p1"} 17 +node_disk_reads_completed_total{device="mmcblk0p2"} 95 +node_disk_reads_completed_total{device="nvme0n1"} 47114 +node_disk_reads_completed_total{device="sda"} 2.5354637e+07 +node_disk_reads_completed_total{device="sdb"} 326552 +node_disk_reads_completed_total{device="sdc"} 126552 +node_disk_reads_completed_total{device="sr0"} 0 +node_disk_reads_completed_total{device="vda"} 1.775784e+06 +# HELP node_disk_reads_merged_total The total number of reads merged. +# TYPE node_disk_reads_merged_total counter +node_disk_reads_merged_total{device="dm-0"} 0 +node_disk_reads_merged_total{device="dm-1"} 0 +node_disk_reads_merged_total{device="dm-2"} 0 +node_disk_reads_merged_total{device="dm-3"} 0 +node_disk_reads_merged_total{device="dm-4"} 0 +node_disk_reads_merged_total{device="dm-5"} 0 +node_disk_reads_merged_total{device="mmcblk0"} 3 +node_disk_reads_merged_total{device="mmcblk0p1"} 3 +node_disk_reads_merged_total{device="mmcblk0p2"} 0 +node_disk_reads_merged_total{device="nvme0n1"} 4 +node_disk_reads_merged_total{device="sda"} 3.4367663e+07 +node_disk_reads_merged_total{device="sdb"} 841 +node_disk_reads_merged_total{device="sdc"} 141 +node_disk_reads_merged_total{device="sr0"} 0 +node_disk_reads_merged_total{device="vda"} 15386 +# HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. +# TYPE node_disk_write_time_seconds_total counter +node_disk_write_time_seconds_total{device="dm-0"} 1.1585578e+06 +node_disk_write_time_seconds_total{device="dm-1"} 0 +node_disk_write_time_seconds_total{device="dm-2"} 122.884 +node_disk_write_time_seconds_total{device="dm-3"} 0 +node_disk_write_time_seconds_total{device="dm-4"} 0.016 +node_disk_write_time_seconds_total{device="dm-5"} 104.684 +node_disk_write_time_seconds_total{device="mmcblk0"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p1"} 0 +node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 +node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 +node_disk_write_time_seconds_total{device="sda"} 63877.96 +node_disk_write_time_seconds_total{device="sdb"} 5.007 +node_disk_write_time_seconds_total{device="sdc"} 1.0070000000000001 +node_disk_write_time_seconds_total{device="sr0"} 0 +node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 +# HELP node_disk_writes_completed_total The total number of writes completed successfully. +# TYPE node_disk_writes_completed_total counter +node_disk_writes_completed_total{device="dm-0"} 3.9231014e+07 +node_disk_writes_completed_total{device="dm-1"} 74 +node_disk_writes_completed_total{device="dm-2"} 153522 +node_disk_writes_completed_total{device="dm-3"} 0 +node_disk_writes_completed_total{device="dm-4"} 38 +node_disk_writes_completed_total{device="dm-5"} 98918 +node_disk_writes_completed_total{device="mmcblk0"} 0 +node_disk_writes_completed_total{device="mmcblk0p1"} 0 +node_disk_writes_completed_total{device="mmcblk0p2"} 0 +node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 +node_disk_writes_completed_total{device="sda"} 2.8444756e+07 +node_disk_writes_completed_total{device="sdb"} 41822 +node_disk_writes_completed_total{device="sdc"} 11822 +node_disk_writes_completed_total{device="sr0"} 0 +node_disk_writes_completed_total{device="vda"} 6.038856e+06 +# HELP node_disk_writes_merged_total The number of writes merged. +# TYPE node_disk_writes_merged_total counter +node_disk_writes_merged_total{device="dm-0"} 0 +node_disk_writes_merged_total{device="dm-1"} 0 +node_disk_writes_merged_total{device="dm-2"} 0 +node_disk_writes_merged_total{device="dm-3"} 0 +node_disk_writes_merged_total{device="dm-4"} 0 +node_disk_writes_merged_total{device="dm-5"} 0 +node_disk_writes_merged_total{device="mmcblk0"} 0 +node_disk_writes_merged_total{device="mmcblk0p1"} 0 +node_disk_writes_merged_total{device="mmcblk0p2"} 0 +node_disk_writes_merged_total{device="nvme0n1"} 43950 +node_disk_writes_merged_total{device="sda"} 1.1134226e+07 +node_disk_writes_merged_total{device="sdb"} 2895 +node_disk_writes_merged_total{device="sdc"} 1895 +node_disk_writes_merged_total{device="sr0"} 0 +node_disk_writes_merged_total{device="vda"} 2.0711856e+07 +# HELP node_disk_written_bytes_total The total number of bytes written successfully. +# TYPE node_disk_written_bytes_total counter +node_disk_written_bytes_total{device="dm-0"} 2.5891680256e+11 +node_disk_written_bytes_total{device="dm-1"} 303104 +node_disk_written_bytes_total{device="dm-2"} 2.607828992e+09 +node_disk_written_bytes_total{device="dm-3"} 0 +node_disk_written_bytes_total{device="dm-4"} 70144 +node_disk_written_bytes_total{device="dm-5"} 5.89664256e+08 +node_disk_written_bytes_total{device="mmcblk0"} 0 +node_disk_written_bytes_total{device="mmcblk0p1"} 0 +node_disk_written_bytes_total{device="mmcblk0p2"} 0 +node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 +node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 +node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 +node_disk_written_bytes_total{device="sdc"} 8.852736e+07 +node_disk_written_bytes_total{device="sr0"} 0 +node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 +# HELP node_dmi_info A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, product_sku, product_uuid, product_version, system_vendor if provided by DMI. +# TYPE node_dmi_info gauge +node_dmi_info{bios_date="04/12/2021",bios_release="2.2",bios_vendor="Dell Inc.",bios_version="2.2.4",board_name="07PXPY",board_serial=".7N62AI2.GRTCL6944100GP.",board_vendor="Dell Inc.",board_version="A01",chassis_asset_tag="",chassis_serial="7N62AI2",chassis_vendor="Dell Inc.",chassis_version="",product_family="PowerEdge",product_name="PowerEdge R6515",product_serial="7N62AI2",product_sku="SKU=NotProvided;ModelName=PowerEdge R6515",product_uuid="83340ca8-cb49-4474-8c29-d2088ca84dd9",product_version="",system_vendor="Dell Inc."} 1 # HELP node_drbd_activitylog_writes_total Number of updates of the activity log area of the meta data. # TYPE node_drbd_activitylog_writes_total counter node_drbd_activitylog_writes_total{device="drbd1"} 1100 @@ -563,17 +674,62 @@ node_edac_uncorrectable_errors_total{controller="0"} 5 # HELP node_entropy_available_bits Bits of available entropy. # TYPE node_entropy_available_bits gauge node_entropy_available_bits 1337 +# HELP node_entropy_pool_size_bits Bits of entropy pool. +# TYPE node_entropy_pool_size_bits gauge +node_entropy_pool_size_bits 4096 # HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which node_exporter was built. # TYPE node_exporter_build_info gauge +# HELP node_fibrechannel_error_frames_total Number of errors in frames +# TYPE node_fibrechannel_error_frames_total counter +node_fibrechannel_error_frames_total{fc_host="host0"} 0 +# HELP node_fibrechannel_fcp_packet_aborts_total Number of aborted packets +# TYPE node_fibrechannel_fcp_packet_aborts_total counter +node_fibrechannel_fcp_packet_aborts_total{fc_host="host0"} 19 +# HELP node_fibrechannel_info Non-numeric data from /sys/class/fc_host/, value is always 1. +# TYPE node_fibrechannel_info gauge +node_fibrechannel_info{dev_loss_tmo="30",fabric_name="0",fc_host="host0",port_id="000002",port_name="1000e0071bce95f2",port_state="Online",port_type="Point-To-Point (direct nport connection)",speed="16 Gbit",supported_classes="Class 3",supported_speeds="4 Gbit, 8 Gbit, 16 Gbit",symbolic_name="Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux"} 1 +# HELP node_fibrechannel_invalid_crc_total Invalid Cyclic Redundancy Check count +# TYPE node_fibrechannel_invalid_crc_total counter +node_fibrechannel_invalid_crc_total{fc_host="host0"} 2 +# HELP node_fibrechannel_invalid_tx_words_total Number of invalid words transmitted by host port +# TYPE node_fibrechannel_invalid_tx_words_total counter +node_fibrechannel_invalid_tx_words_total{fc_host="host0"} 8 +# HELP node_fibrechannel_link_failure_total Number of times the host port link has failed +# TYPE node_fibrechannel_link_failure_total counter +node_fibrechannel_link_failure_total{fc_host="host0"} 9 +# HELP node_fibrechannel_loss_of_signal_total Number of times signal has been lost +# TYPE node_fibrechannel_loss_of_signal_total counter +node_fibrechannel_loss_of_signal_total{fc_host="host0"} 17 +# HELP node_fibrechannel_loss_of_sync_total Number of failures on either bit or transmission word boundaries +# TYPE node_fibrechannel_loss_of_sync_total counter +node_fibrechannel_loss_of_sync_total{fc_host="host0"} 16 +# HELP node_fibrechannel_nos_total Number Not_Operational Primitive Sequence received by host port +# TYPE node_fibrechannel_nos_total counter +node_fibrechannel_nos_total{fc_host="host0"} 18 +# HELP node_fibrechannel_rx_frames_total Number of frames received +# TYPE node_fibrechannel_rx_frames_total counter +node_fibrechannel_rx_frames_total{fc_host="host0"} 3 +# HELP node_fibrechannel_rx_words_total Number of words received by host port +# TYPE node_fibrechannel_rx_words_total counter +node_fibrechannel_rx_words_total{fc_host="host0"} 4 +# HELP node_fibrechannel_seconds_since_last_reset_total Number of seconds since last host port reset +# TYPE node_fibrechannel_seconds_since_last_reset_total counter +node_fibrechannel_seconds_since_last_reset_total{fc_host="host0"} 7 +# HELP node_fibrechannel_tx_frames_total Number of frames transmitted by host port +# TYPE node_fibrechannel_tx_frames_total counter +node_fibrechannel_tx_frames_total{fc_host="host0"} 5 +# HELP node_fibrechannel_tx_words_total Number of words transmitted by host port +# TYPE node_fibrechannel_tx_words_total counter +node_fibrechannel_tx_words_total{fc_host="host0"} 6 # HELP node_filefd_allocated File descriptor statistics: allocated. # TYPE node_filefd_allocated gauge node_filefd_allocated 1024 # HELP node_filefd_maximum File descriptor statistics: maximum. # TYPE node_filefd_maximum gauge node_filefd_maximum 1.631329e+06 -# HELP node_forks Total number of forks. -# TYPE node_forks counter -node_forks 26442 +# HELP node_forks_total Total number of forks. +# TYPE node_forks_total counter +node_forks_total 26442 # HELP node_hwmon_chip_names Annotation metric for human-readable chip names # TYPE node_hwmon_chip_names gauge node_hwmon_chip_names{chip="nct6779",chip_name="nct6779"} 1 @@ -797,6 +953,10 @@ node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp2"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp3"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp4"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp5"} 84 +# HELP node_infiniband_info Non-numeric data from /sys/class/infiniband/, value is always 1. +# TYPE node_infiniband_info gauge +node_infiniband_info{board_id="I40IW Board ID",device="i40iw0",firmware_version="0.2",hca_type="I40IW"} 1 +node_infiniband_info{board_id="SM_1141000001000",device="mlx4_0",firmware_version="2.31.5050",hca_type="MT4099"} 1 # HELP node_infiniband_legacy_data_received_bytes_total Number of data octets received on all links # TYPE node_infiniband_legacy_data_received_bytes_total counter node_infiniband_legacy_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 @@ -845,14 +1005,53 @@ node_infiniband_multicast_packets_received_total{device="mlx4_0",port="2"} 0 # TYPE node_infiniband_multicast_packets_transmitted_total counter node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="1"} 16 node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 -# HELP node_infiniband_port_data_received_bytes Number of data octets received on all links -# TYPE node_infiniband_port_data_received_bytes counter -node_infiniband_port_data_received_bytes{device="mlx4_0",port="1"} 1.8527668e+07 -node_infiniband_port_data_received_bytes{device="mlx4_0",port="2"} 0 -# HELP node_infiniband_port_data_transmitted_bytes Number of data octets transmitted on all links -# TYPE node_infiniband_port_data_transmitted_bytes counter -node_infiniband_port_data_transmitted_bytes{device="mlx4_0",port="1"} 1.493376e+07 -node_infiniband_port_data_transmitted_bytes{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_physical_state_id Physical state of the InfiniBand port (0: no change, 1: sleep, 2: polling, 3: disable, 4: shift, 5: link up, 6: link error recover, 7: phytest) +# TYPE node_infiniband_physical_state_id gauge +node_infiniband_physical_state_id{device="i40iw0",port="1"} 5 +node_infiniband_physical_state_id{device="mlx4_0",port="1"} 5 +node_infiniband_physical_state_id{device="mlx4_0",port="2"} 5 +# HELP node_infiniband_port_constraint_errors_received_total Number of packets received on the switch physical port that are discarded +# TYPE node_infiniband_port_constraint_errors_received_total counter +node_infiniband_port_constraint_errors_received_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_constraint_errors_transmitted_total Number of packets not transmitted from the switch physical port +# TYPE node_infiniband_port_constraint_errors_transmitted_total counter +node_infiniband_port_constraint_errors_transmitted_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_data_received_bytes_total Number of data octets received on all links +# TYPE node_infiniband_port_data_received_bytes_total counter +node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 +node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_port_data_transmitted_bytes_total Number of data octets transmitted on all links +# TYPE node_infiniband_port_data_transmitted_bytes_total counter +node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="1"} 1.493376e+07 +node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="2"} 0 +# HELP node_infiniband_port_discards_received_total Number of inbound packets discarded by the port because the port is down or congested +# TYPE node_infiniband_port_discards_received_total counter +node_infiniband_port_discards_received_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_discards_transmitted_total Number of outbound packets discarded by the port because the port is down or congested +# TYPE node_infiniband_port_discards_transmitted_total counter +node_infiniband_port_discards_transmitted_total{device="mlx4_0",port="1"} 5 +# HELP node_infiniband_port_errors_received_total Number of packets containing an error that were received on this port +# TYPE node_infiniband_port_errors_received_total counter +node_infiniband_port_errors_received_total{device="mlx4_0",port="1"} 0 +# HELP node_infiniband_port_packets_received_total Number of packets received on all VLs by this port (including errors) +# TYPE node_infiniband_port_packets_received_total counter +node_infiniband_port_packets_received_total{device="mlx4_0",port="1"} 6.825908347e+09 +# HELP node_infiniband_port_packets_transmitted_total Number of packets transmitted on all VLs from this port (including errors) +# TYPE node_infiniband_port_packets_transmitted_total counter +node_infiniband_port_packets_transmitted_total{device="mlx4_0",port="1"} 6.235865e+06 +# HELP node_infiniband_port_transmit_wait_total Number of ticks during which the port had data to transmit but no data was sent during the entire tick +# TYPE node_infiniband_port_transmit_wait_total counter +node_infiniband_port_transmit_wait_total{device="mlx4_0",port="1"} 4.294967295e+09 +# HELP node_infiniband_rate_bytes_per_second Maximum signal transfer rate +# TYPE node_infiniband_rate_bytes_per_second gauge +node_infiniband_rate_bytes_per_second{device="i40iw0",port="1"} 1.25e+09 +node_infiniband_rate_bytes_per_second{device="mlx4_0",port="1"} 5e+09 +node_infiniband_rate_bytes_per_second{device="mlx4_0",port="2"} 5e+09 +# HELP node_infiniband_state_id State of the InfiniBand port (0: no change, 1: down, 2: init, 3: armed, 4: active, 5: act defer) +# TYPE node_infiniband_state_id gauge +node_infiniband_state_id{device="i40iw0",port="1"} 4 +node_infiniband_state_id{device="mlx4_0",port="1"} 4 +node_infiniband_state_id{device="mlx4_0",port="2"} 4 # HELP node_infiniband_unicast_packets_received_total Number of unicast packets received (including errors) # TYPE node_infiniband_unicast_packets_received_total counter node_infiniband_unicast_packets_received_total{device="mlx4_0",port="1"} 61148 @@ -861,39 +1060,159 @@ node_infiniband_unicast_packets_received_total{device="mlx4_0",port="2"} 0 # TYPE node_infiniband_unicast_packets_transmitted_total counter node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="1"} 61239 node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 -# HELP node_intr Total number of interrupts serviced. -# TYPE node_intr counter -node_intr 8.885917e+06 +# HELP node_interrupts_total Interrupt details. +# TYPE node_interrupts_total counter +node_interrupts_total{cpu="0",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="0",devices="",info="Function call interrupts",type="CAL"} 148554 +node_interrupts_total{cpu="0",devices="",info="IRQ work interrupts",type="IWI"} 1.509379e+06 +node_interrupts_total{cpu="0",devices="",info="Local timer interrupts",type="LOC"} 1.74326351e+08 +node_interrupts_total{cpu="0",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="0",devices="",info="Machine check polls",type="MCP"} 2406 +node_interrupts_total{cpu="0",devices="",info="Non-maskable interrupts",type="NMI"} 47 +node_interrupts_total{cpu="0",devices="",info="Performance monitoring interrupts",type="PMI"} 47 +node_interrupts_total{cpu="0",devices="",info="Rescheduling interrupts",type="RES"} 1.0847134e+07 +node_interrupts_total{cpu="0",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="0",devices="",info="TLB shootdowns",type="TLB"} 1.0460334e+07 +node_interrupts_total{cpu="0",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="0",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="0",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 398553 +node_interrupts_total{cpu="0",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.434032e+06 +node_interrupts_total{cpu="0",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="0",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="0",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 328511 +node_interrupts_total{cpu="0",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.451445e+06 +node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="1"} 17960 +node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="12"} 380847 +node_interrupts_total{cpu="0",devices="i915",info="IR-PCI-MSI-edge",type="44"} 140636 +node_interrupts_total{cpu="0",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 4.3078464e+07 +node_interrupts_total{cpu="0",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 4 +node_interrupts_total{cpu="0",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 1 +node_interrupts_total{cpu="0",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 350 +node_interrupts_total{cpu="0",devices="timer",info="IR-IO-APIC-edge",type="0"} 18 +node_interrupts_total{cpu="0",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 378324 +node_interrupts_total{cpu="1",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="1",devices="",info="Function call interrupts",type="CAL"} 157441 +node_interrupts_total{cpu="1",devices="",info="IRQ work interrupts",type="IWI"} 2.411776e+06 +node_interrupts_total{cpu="1",devices="",info="Local timer interrupts",type="LOC"} 1.35776678e+08 +node_interrupts_total{cpu="1",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="1",devices="",info="Machine check polls",type="MCP"} 2399 +node_interrupts_total{cpu="1",devices="",info="Non-maskable interrupts",type="NMI"} 5031 +node_interrupts_total{cpu="1",devices="",info="Performance monitoring interrupts",type="PMI"} 5031 +node_interrupts_total{cpu="1",devices="",info="Rescheduling interrupts",type="RES"} 9.111507e+06 +node_interrupts_total{cpu="1",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="1",devices="",info="TLB shootdowns",type="TLB"} 9.918429e+06 +node_interrupts_total{cpu="1",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="1",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="1",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 2320 +node_interrupts_total{cpu="1",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 8.092205e+06 +node_interrupts_total{cpu="1",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="1",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="1",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 322879 +node_interrupts_total{cpu="1",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 3.333499e+06 +node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="1"} 105 +node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="12"} 1021 +node_interrupts_total{cpu="1",devices="i915",info="IR-PCI-MSI-edge",type="44"} 226313 +node_interrupts_total{cpu="1",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 130 +node_interrupts_total{cpu="1",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 22 +node_interrupts_total{cpu="1",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 +node_interrupts_total{cpu="1",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 224 +node_interrupts_total{cpu="1",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 +node_interrupts_total{cpu="1",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 1.734637e+06 +node_interrupts_total{cpu="2",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="2",devices="",info="Function call interrupts",type="CAL"} 142912 +node_interrupts_total{cpu="2",devices="",info="IRQ work interrupts",type="IWI"} 1.512975e+06 +node_interrupts_total{cpu="2",devices="",info="Local timer interrupts",type="LOC"} 1.68393257e+08 +node_interrupts_total{cpu="2",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="2",devices="",info="Machine check polls",type="MCP"} 2399 +node_interrupts_total{cpu="2",devices="",info="Non-maskable interrupts",type="NMI"} 6211 +node_interrupts_total{cpu="2",devices="",info="Performance monitoring interrupts",type="PMI"} 6211 +node_interrupts_total{cpu="2",devices="",info="Rescheduling interrupts",type="RES"} 1.5999335e+07 +node_interrupts_total{cpu="2",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="2",devices="",info="TLB shootdowns",type="TLB"} 1.0494258e+07 +node_interrupts_total{cpu="2",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="2",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="2",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 824 +node_interrupts_total{cpu="2",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 6.478877e+06 +node_interrupts_total{cpu="2",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="2",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="2",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 293782 +node_interrupts_total{cpu="2",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.092032e+06 +node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 +node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="12"} 240 +node_interrupts_total{cpu="2",devices="i915",info="IR-PCI-MSI-edge",type="44"} 347 +node_interrupts_total{cpu="2",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 460171 +node_interrupts_total{cpu="2",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 +node_interrupts_total{cpu="2",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 +node_interrupts_total{cpu="2",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 +node_interrupts_total{cpu="2",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 +node_interrupts_total{cpu="2",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 440240 +node_interrupts_total{cpu="3",devices="",info="APIC ICR read retries",type="RTR"} 0 +node_interrupts_total{cpu="3",devices="",info="Function call interrupts",type="CAL"} 155528 +node_interrupts_total{cpu="3",devices="",info="IRQ work interrupts",type="IWI"} 2.428828e+06 +node_interrupts_total{cpu="3",devices="",info="Local timer interrupts",type="LOC"} 1.30980079e+08 +node_interrupts_total{cpu="3",devices="",info="Machine check exceptions",type="MCE"} 0 +node_interrupts_total{cpu="3",devices="",info="Machine check polls",type="MCP"} 2399 +node_interrupts_total{cpu="3",devices="",info="Non-maskable interrupts",type="NMI"} 4968 +node_interrupts_total{cpu="3",devices="",info="Performance monitoring interrupts",type="PMI"} 4968 +node_interrupts_total{cpu="3",devices="",info="Rescheduling interrupts",type="RES"} 7.45726e+06 +node_interrupts_total{cpu="3",devices="",info="Spurious interrupts",type="SPU"} 0 +node_interrupts_total{cpu="3",devices="",info="TLB shootdowns",type="TLB"} 1.0345022e+07 +node_interrupts_total{cpu="3",devices="",info="Thermal event interrupts",type="TRM"} 0 +node_interrupts_total{cpu="3",devices="",info="Threshold APIC interrupts",type="THR"} 0 +node_interrupts_total{cpu="3",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 863 +node_interrupts_total{cpu="3",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.492252e+06 +node_interrupts_total{cpu="3",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 +node_interrupts_total{cpu="3",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 +node_interrupts_total{cpu="3",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 351412 +node_interrupts_total{cpu="3",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 2.644609e+06 +node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 +node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="12"} 198 +node_interrupts_total{cpu="3",devices="i915",info="IR-PCI-MSI-edge",type="44"} 633 +node_interrupts_total{cpu="3",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 290 +node_interrupts_total{cpu="3",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 +node_interrupts_total{cpu="3",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 +node_interrupts_total{cpu="3",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 +node_interrupts_total{cpu="3",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 +node_interrupts_total{cpu="3",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 2.434308e+06 +# HELP node_intr_total Total number of interrupts serviced. +# TYPE node_intr_total counter +node_intr_total 8.885917e+06 # HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge -node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 -node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 -node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 -node_ipvs_backend_connections_active{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 -node_ipvs_backend_connections_active{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 -node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 -node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 -node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 321 +node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 64 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge -node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 -node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 -node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 -node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 5 +node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 1 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 +node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge -node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 -node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 20 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 @@ -936,6 +1255,184 @@ node_ksmd_run 1 # HELP node_ksmd_sleep_seconds ksmd 'sleep_millisecs' file. # TYPE node_ksmd_sleep_seconds gauge node_ksmd_sleep_seconds 0.02 +# HELP node_lnstat_allocs_total linux network cache stats +# TYPE node_lnstat_allocs_total counter +node_lnstat_allocs_total{cpu="0",subsystem="arp_cache"} 1 +node_lnstat_allocs_total{cpu="0",subsystem="ndisc_cache"} 240 +node_lnstat_allocs_total{cpu="1",subsystem="arp_cache"} 13 +node_lnstat_allocs_total{cpu="1",subsystem="ndisc_cache"} 252 +# HELP node_lnstat_delete_list_total linux network cache stats +# TYPE node_lnstat_delete_list_total counter +node_lnstat_delete_list_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_delete_list_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_delete_total linux network cache stats +# TYPE node_lnstat_delete_total counter +node_lnstat_delete_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_delete_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_destroys_total linux network cache stats +# TYPE node_lnstat_destroys_total counter +node_lnstat_destroys_total{cpu="0",subsystem="arp_cache"} 2 +node_lnstat_destroys_total{cpu="0",subsystem="ndisc_cache"} 241 +node_lnstat_destroys_total{cpu="1",subsystem="arp_cache"} 14 +node_lnstat_destroys_total{cpu="1",subsystem="ndisc_cache"} 253 +# HELP node_lnstat_drop_total linux network cache stats +# TYPE node_lnstat_drop_total counter +node_lnstat_drop_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_drop_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_early_drop_total linux network cache stats +# TYPE node_lnstat_early_drop_total counter +node_lnstat_early_drop_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_early_drop_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_entries_total linux network cache stats +# TYPE node_lnstat_entries_total counter +node_lnstat_entries_total{cpu="0",subsystem="arp_cache"} 20 +node_lnstat_entries_total{cpu="0",subsystem="ndisc_cache"} 36 +node_lnstat_entries_total{cpu="0",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="1",subsystem="arp_cache"} 20 +node_lnstat_entries_total{cpu="1",subsystem="ndisc_cache"} 36 +node_lnstat_entries_total{cpu="1",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="2",subsystem="nf_conntrack"} 33 +node_lnstat_entries_total{cpu="3",subsystem="nf_conntrack"} 33 +# HELP node_lnstat_expect_create_total linux network cache stats +# TYPE node_lnstat_expect_create_total counter +node_lnstat_expect_create_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_create_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_expect_delete_total linux network cache stats +# TYPE node_lnstat_expect_delete_total counter +node_lnstat_expect_delete_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_delete_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_expect_new_total linux network cache stats +# TYPE node_lnstat_expect_new_total counter +node_lnstat_expect_new_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_expect_new_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_forced_gc_runs_total linux network cache stats +# TYPE node_lnstat_forced_gc_runs_total counter +node_lnstat_forced_gc_runs_total{cpu="0",subsystem="arp_cache"} 10 +node_lnstat_forced_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 249 +node_lnstat_forced_gc_runs_total{cpu="1",subsystem="arp_cache"} 22 +node_lnstat_forced_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 261 +# HELP node_lnstat_found_total linux network cache stats +# TYPE node_lnstat_found_total counter +node_lnstat_found_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_found_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_hash_grows_total linux network cache stats +# TYPE node_lnstat_hash_grows_total counter +node_lnstat_hash_grows_total{cpu="0",subsystem="arp_cache"} 3 +node_lnstat_hash_grows_total{cpu="0",subsystem="ndisc_cache"} 242 +node_lnstat_hash_grows_total{cpu="1",subsystem="arp_cache"} 15 +node_lnstat_hash_grows_total{cpu="1",subsystem="ndisc_cache"} 254 +# HELP node_lnstat_hits_total linux network cache stats +# TYPE node_lnstat_hits_total counter +node_lnstat_hits_total{cpu="0",subsystem="arp_cache"} 5 +node_lnstat_hits_total{cpu="0",subsystem="ndisc_cache"} 244 +node_lnstat_hits_total{cpu="1",subsystem="arp_cache"} 17 +node_lnstat_hits_total{cpu="1",subsystem="ndisc_cache"} 256 +# HELP node_lnstat_icmp_error_total linux network cache stats +# TYPE node_lnstat_icmp_error_total counter +node_lnstat_icmp_error_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_icmp_error_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_ignore_total linux network cache stats +# TYPE node_lnstat_ignore_total counter +node_lnstat_ignore_total{cpu="0",subsystem="nf_conntrack"} 22666 +node_lnstat_ignore_total{cpu="1",subsystem="nf_conntrack"} 22180 +node_lnstat_ignore_total{cpu="2",subsystem="nf_conntrack"} 22740 +node_lnstat_ignore_total{cpu="3",subsystem="nf_conntrack"} 22152 +# HELP node_lnstat_insert_failed_total linux network cache stats +# TYPE node_lnstat_insert_failed_total counter +node_lnstat_insert_failed_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_insert_failed_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_insert_total linux network cache stats +# TYPE node_lnstat_insert_total counter +node_lnstat_insert_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_insert_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_invalid_total linux network cache stats +# TYPE node_lnstat_invalid_total counter +node_lnstat_invalid_total{cpu="0",subsystem="nf_conntrack"} 3 +node_lnstat_invalid_total{cpu="1",subsystem="nf_conntrack"} 2 +node_lnstat_invalid_total{cpu="2",subsystem="nf_conntrack"} 1 +node_lnstat_invalid_total{cpu="3",subsystem="nf_conntrack"} 47 +# HELP node_lnstat_lookups_total linux network cache stats +# TYPE node_lnstat_lookups_total counter +node_lnstat_lookups_total{cpu="0",subsystem="arp_cache"} 4 +node_lnstat_lookups_total{cpu="0",subsystem="ndisc_cache"} 243 +node_lnstat_lookups_total{cpu="1",subsystem="arp_cache"} 16 +node_lnstat_lookups_total{cpu="1",subsystem="ndisc_cache"} 255 +# HELP node_lnstat_new_total linux network cache stats +# TYPE node_lnstat_new_total counter +node_lnstat_new_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_new_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_periodic_gc_runs_total linux network cache stats +# TYPE node_lnstat_periodic_gc_runs_total counter +node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="arp_cache"} 9 +node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 248 +node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="arp_cache"} 21 +node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 260 +# HELP node_lnstat_rcv_probes_mcast_total linux network cache stats +# TYPE node_lnstat_rcv_probes_mcast_total counter +node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="arp_cache"} 7 +node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="ndisc_cache"} 246 +node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="arp_cache"} 19 +node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="ndisc_cache"} 258 +# HELP node_lnstat_rcv_probes_ucast_total linux network cache stats +# TYPE node_lnstat_rcv_probes_ucast_total counter +node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="arp_cache"} 8 +node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="ndisc_cache"} 247 +node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="arp_cache"} 20 +node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="ndisc_cache"} 259 +# HELP node_lnstat_res_failed_total linux network cache stats +# TYPE node_lnstat_res_failed_total counter +node_lnstat_res_failed_total{cpu="0",subsystem="arp_cache"} 6 +node_lnstat_res_failed_total{cpu="0",subsystem="ndisc_cache"} 245 +node_lnstat_res_failed_total{cpu="1",subsystem="arp_cache"} 18 +node_lnstat_res_failed_total{cpu="1",subsystem="ndisc_cache"} 257 +# HELP node_lnstat_search_restart_total linux network cache stats +# TYPE node_lnstat_search_restart_total counter +node_lnstat_search_restart_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_search_restart_total{cpu="1",subsystem="nf_conntrack"} 2 +node_lnstat_search_restart_total{cpu="2",subsystem="nf_conntrack"} 1 +node_lnstat_search_restart_total{cpu="3",subsystem="nf_conntrack"} 4 +# HELP node_lnstat_searched_total linux network cache stats +# TYPE node_lnstat_searched_total counter +node_lnstat_searched_total{cpu="0",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="1",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="2",subsystem="nf_conntrack"} 0 +node_lnstat_searched_total{cpu="3",subsystem="nf_conntrack"} 0 +# HELP node_lnstat_table_fulls_total linux network cache stats +# TYPE node_lnstat_table_fulls_total counter +node_lnstat_table_fulls_total{cpu="0",subsystem="arp_cache"} 12 +node_lnstat_table_fulls_total{cpu="0",subsystem="ndisc_cache"} 251 +node_lnstat_table_fulls_total{cpu="1",subsystem="arp_cache"} 24 +node_lnstat_table_fulls_total{cpu="1",subsystem="ndisc_cache"} 263 +# HELP node_lnstat_unresolved_discards_total linux network cache stats +# TYPE node_lnstat_unresolved_discards_total counter +node_lnstat_unresolved_discards_total{cpu="0",subsystem="arp_cache"} 11 +node_lnstat_unresolved_discards_total{cpu="0",subsystem="ndisc_cache"} 250 +node_lnstat_unresolved_discards_total{cpu="1",subsystem="arp_cache"} 23 +node_lnstat_unresolved_discards_total{cpu="1",subsystem="ndisc_cache"} 262 # HELP node_load1 1m load average. # TYPE node_load1 gauge node_load1 0.21 @@ -950,10 +1447,13 @@ node_load5 0.37 node_md_blocks{device="md0"} 248896 node_md_blocks{device="md00"} 4.186624e+06 node_md_blocks{device="md10"} 3.14159265e+08 +node_md_blocks{device="md101"} 322560 node_md_blocks{device="md11"} 4.190208e+06 node_md_blocks{device="md12"} 3.886394368e+09 +node_md_blocks{device="md120"} 2.095104e+06 node_md_blocks{device="md126"} 1.855870976e+09 node_md_blocks{device="md127"} 3.12319552e+08 +node_md_blocks{device="md201"} 1.993728e+06 node_md_blocks{device="md219"} 7932 node_md_blocks{device="md3"} 5.853468288e+09 node_md_blocks{device="md4"} 4.883648e+06 @@ -966,126 +1466,221 @@ node_md_blocks{device="md9"} 523968 node_md_blocks_synced{device="md0"} 248896 node_md_blocks_synced{device="md00"} 4.186624e+06 node_md_blocks_synced{device="md10"} 3.14159265e+08 -node_md_blocks_synced{device="md11"} 4.190208e+06 +node_md_blocks_synced{device="md101"} 322560 +node_md_blocks_synced{device="md11"} 0 node_md_blocks_synced{device="md12"} 3.886394368e+09 +node_md_blocks_synced{device="md120"} 2.095104e+06 node_md_blocks_synced{device="md126"} 1.855870976e+09 node_md_blocks_synced{device="md127"} 3.12319552e+08 +node_md_blocks_synced{device="md201"} 114176 node_md_blocks_synced{device="md219"} 7932 node_md_blocks_synced{device="md3"} 5.853468288e+09 node_md_blocks_synced{device="md4"} 4.883648e+06 node_md_blocks_synced{device="md6"} 1.6775552e+07 node_md_blocks_synced{device="md7"} 7.813735424e+09 node_md_blocks_synced{device="md8"} 1.6775552e+07 -node_md_blocks_synced{device="md9"} 523968 -# HELP node_md_disks Total number of disks of device. +node_md_blocks_synced{device="md9"} 0 +# HELP node_md_disks Number of active/failed/spare disks of device. # TYPE node_md_disks gauge -node_md_disks{device="md0"} 2 -node_md_disks{device="md00"} 1 -node_md_disks{device="md10"} 2 -node_md_disks{device="md11"} 2 -node_md_disks{device="md12"} 2 -node_md_disks{device="md126"} 2 -node_md_disks{device="md127"} 2 -node_md_disks{device="md219"} 0 -node_md_disks{device="md3"} 8 -node_md_disks{device="md4"} 2 -node_md_disks{device="md6"} 2 -node_md_disks{device="md7"} 4 -node_md_disks{device="md8"} 2 -node_md_disks{device="md9"} 4 -# HELP node_md_disks_active Number of active disks of device. -# TYPE node_md_disks_active gauge -node_md_disks_active{device="md0"} 2 -node_md_disks_active{device="md00"} 1 -node_md_disks_active{device="md10"} 2 -node_md_disks_active{device="md11"} 2 -node_md_disks_active{device="md12"} 2 -node_md_disks_active{device="md126"} 2 -node_md_disks_active{device="md127"} 2 -node_md_disks_active{device="md219"} 0 -node_md_disks_active{device="md3"} 8 -node_md_disks_active{device="md4"} 2 -node_md_disks_active{device="md6"} 1 -node_md_disks_active{device="md7"} 3 -node_md_disks_active{device="md8"} 2 -node_md_disks_active{device="md9"} 4 -# HELP node_md_is_active Indicator whether the md-device is active or not. -# TYPE node_md_is_active gauge -node_md_is_active{device="md0"} 1 -node_md_is_active{device="md00"} 1 -node_md_is_active{device="md10"} 1 -node_md_is_active{device="md11"} 1 -node_md_is_active{device="md12"} 1 -node_md_is_active{device="md126"} 1 -node_md_is_active{device="md127"} 1 -node_md_is_active{device="md219"} 0 -node_md_is_active{device="md3"} 1 -node_md_is_active{device="md4"} 0 -node_md_is_active{device="md6"} 1 -node_md_is_active{device="md7"} 1 -node_md_is_active{device="md8"} 1 -node_md_is_active{device="md9"} 1 -# HELP node_megacli_drive_count megacli: drive error and event counters -# TYPE node_megacli_drive_count gauge -node_megacli_drive_count{enclosure="32",slot="0",type="Media Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="0",type="Other Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="0",type="Predictive Failure Count"} 0 -node_megacli_drive_count{enclosure="32",slot="1",type="Media Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="1",type="Other Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="1",type="Predictive Failure Count"} 0 -node_megacli_drive_count{enclosure="32",slot="2",type="Media Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="2",type="Other Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="2",type="Predictive Failure Count"} 0 -node_megacli_drive_count{enclosure="32",slot="3",type="Media Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="3",type="Other Error Count"} 0 -node_megacli_drive_count{enclosure="32",slot="3",type="Predictive Failure Count"} 23 -# HELP node_megacli_drive_temperature_celsius megacli: drive temperature -# TYPE node_megacli_drive_temperature_celsius gauge -node_megacli_drive_temperature_celsius{enclosure="32",slot="0"} 37 -node_megacli_drive_temperature_celsius{enclosure="32",slot="2"} 39 -node_megacli_drive_temperature_celsius{enclosure="32",slot="3"} 38 -# HELP node_memory_Active Memory information field Active. -# TYPE node_memory_Active gauge -node_memory_Active 2.287017984e+09 -# HELP node_memory_Active_anon Memory information field Active_anon. -# TYPE node_memory_Active_anon gauge -node_memory_Active_anon 2.068484096e+09 -# HELP node_memory_Active_file Memory information field Active_file. -# TYPE node_memory_Active_file gauge -node_memory_Active_file 2.18533888e+08 -# HELP node_memory_AnonHugePages Memory information field AnonHugePages. -# TYPE node_memory_AnonHugePages gauge -node_memory_AnonHugePages 0 -# HELP node_memory_AnonPages Memory information field AnonPages. -# TYPE node_memory_AnonPages gauge -node_memory_AnonPages 2.298032128e+09 -# HELP node_memory_Bounce Memory information field Bounce. -# TYPE node_memory_Bounce gauge -node_memory_Bounce 0 -# HELP node_memory_Buffers Memory information field Buffers. -# TYPE node_memory_Buffers gauge -node_memory_Buffers 2.256896e+07 -# HELP node_memory_Cached Memory information field Cached. -# TYPE node_memory_Cached gauge -node_memory_Cached 9.53229312e+08 -# HELP node_memory_CommitLimit Memory information field CommitLimit. -# TYPE node_memory_CommitLimit gauge -node_memory_CommitLimit 6.210940928e+09 -# HELP node_memory_Committed_AS Memory information field Committed_AS. -# TYPE node_memory_Committed_AS gauge -node_memory_Committed_AS 8.023486464e+09 -# HELP node_memory_DirectMap2M Memory information field DirectMap2M. -# TYPE node_memory_DirectMap2M gauge -node_memory_DirectMap2M 3.787456512e+09 -# HELP node_memory_DirectMap4k Memory information field DirectMap4k. -# TYPE node_memory_DirectMap4k gauge -node_memory_DirectMap4k 1.9011584e+08 -# HELP node_memory_Dirty Memory information field Dirty. -# TYPE node_memory_Dirty gauge -node_memory_Dirty 1.077248e+06 -# HELP node_memory_HardwareCorrupted Memory information field HardwareCorrupted. -# TYPE node_memory_HardwareCorrupted gauge -node_memory_HardwareCorrupted 0 +node_md_disks{device="md0",state="active"} 2 +node_md_disks{device="md0",state="failed"} 0 +node_md_disks{device="md0",state="spare"} 0 +node_md_disks{device="md00",state="active"} 1 +node_md_disks{device="md00",state="failed"} 0 +node_md_disks{device="md00",state="spare"} 0 +node_md_disks{device="md10",state="active"} 2 +node_md_disks{device="md10",state="failed"} 0 +node_md_disks{device="md10",state="spare"} 0 +node_md_disks{device="md101",state="active"} 3 +node_md_disks{device="md101",state="failed"} 0 +node_md_disks{device="md101",state="spare"} 0 +node_md_disks{device="md11",state="active"} 2 +node_md_disks{device="md11",state="failed"} 1 +node_md_disks{device="md11",state="spare"} 2 +node_md_disks{device="md12",state="active"} 2 +node_md_disks{device="md12",state="failed"} 0 +node_md_disks{device="md12",state="spare"} 0 +node_md_disks{device="md120",state="active"} 2 +node_md_disks{device="md120",state="failed"} 0 +node_md_disks{device="md120",state="spare"} 0 +node_md_disks{device="md126",state="active"} 2 +node_md_disks{device="md126",state="failed"} 0 +node_md_disks{device="md126",state="spare"} 0 +node_md_disks{device="md127",state="active"} 2 +node_md_disks{device="md127",state="failed"} 0 +node_md_disks{device="md127",state="spare"} 0 +node_md_disks{device="md201",state="active"} 2 +node_md_disks{device="md201",state="failed"} 0 +node_md_disks{device="md201",state="spare"} 0 +node_md_disks{device="md219",state="active"} 0 +node_md_disks{device="md219",state="failed"} 0 +node_md_disks{device="md219",state="spare"} 3 +node_md_disks{device="md3",state="active"} 8 +node_md_disks{device="md3",state="failed"} 0 +node_md_disks{device="md3",state="spare"} 2 +node_md_disks{device="md4",state="active"} 0 +node_md_disks{device="md4",state="failed"} 1 +node_md_disks{device="md4",state="spare"} 1 +node_md_disks{device="md6",state="active"} 1 +node_md_disks{device="md6",state="failed"} 1 +node_md_disks{device="md6",state="spare"} 1 +node_md_disks{device="md7",state="active"} 3 +node_md_disks{device="md7",state="failed"} 1 +node_md_disks{device="md7",state="spare"} 0 +node_md_disks{device="md8",state="active"} 2 +node_md_disks{device="md8",state="failed"} 0 +node_md_disks{device="md8",state="spare"} 2 +node_md_disks{device="md9",state="active"} 4 +node_md_disks{device="md9",state="failed"} 2 +node_md_disks{device="md9",state="spare"} 1 +# HELP node_md_disks_required Total number of disks of device. +# TYPE node_md_disks_required gauge +node_md_disks_required{device="md0"} 2 +node_md_disks_required{device="md00"} 1 +node_md_disks_required{device="md10"} 2 +node_md_disks_required{device="md101"} 3 +node_md_disks_required{device="md11"} 2 +node_md_disks_required{device="md12"} 2 +node_md_disks_required{device="md120"} 2 +node_md_disks_required{device="md126"} 2 +node_md_disks_required{device="md127"} 2 +node_md_disks_required{device="md201"} 2 +node_md_disks_required{device="md219"} 0 +node_md_disks_required{device="md3"} 8 +node_md_disks_required{device="md4"} 0 +node_md_disks_required{device="md6"} 2 +node_md_disks_required{device="md7"} 4 +node_md_disks_required{device="md8"} 2 +node_md_disks_required{device="md9"} 4 +# HELP node_md_state Indicates the state of md-device. +# TYPE node_md_state gauge +node_md_state{device="md0",state="active"} 1 +node_md_state{device="md0",state="check"} 0 +node_md_state{device="md0",state="inactive"} 0 +node_md_state{device="md0",state="recovering"} 0 +node_md_state{device="md0",state="resync"} 0 +node_md_state{device="md00",state="active"} 1 +node_md_state{device="md00",state="check"} 0 +node_md_state{device="md00",state="inactive"} 0 +node_md_state{device="md00",state="recovering"} 0 +node_md_state{device="md00",state="resync"} 0 +node_md_state{device="md10",state="active"} 1 +node_md_state{device="md10",state="check"} 0 +node_md_state{device="md10",state="inactive"} 0 +node_md_state{device="md10",state="recovering"} 0 +node_md_state{device="md10",state="resync"} 0 +node_md_state{device="md101",state="active"} 1 +node_md_state{device="md101",state="check"} 0 +node_md_state{device="md101",state="inactive"} 0 +node_md_state{device="md101",state="recovering"} 0 +node_md_state{device="md101",state="resync"} 0 +node_md_state{device="md11",state="active"} 0 +node_md_state{device="md11",state="check"} 0 +node_md_state{device="md11",state="inactive"} 0 +node_md_state{device="md11",state="recovering"} 0 +node_md_state{device="md11",state="resync"} 1 +node_md_state{device="md12",state="active"} 1 +node_md_state{device="md12",state="check"} 0 +node_md_state{device="md12",state="inactive"} 0 +node_md_state{device="md12",state="recovering"} 0 +node_md_state{device="md12",state="resync"} 0 +node_md_state{device="md120",state="active"} 1 +node_md_state{device="md120",state="check"} 0 +node_md_state{device="md120",state="inactive"} 0 +node_md_state{device="md120",state="recovering"} 0 +node_md_state{device="md120",state="resync"} 0 +node_md_state{device="md126",state="active"} 1 +node_md_state{device="md126",state="check"} 0 +node_md_state{device="md126",state="inactive"} 0 +node_md_state{device="md126",state="recovering"} 0 +node_md_state{device="md126",state="resync"} 0 +node_md_state{device="md127",state="active"} 1 +node_md_state{device="md127",state="check"} 0 +node_md_state{device="md127",state="inactive"} 0 +node_md_state{device="md127",state="recovering"} 0 +node_md_state{device="md127",state="resync"} 0 +node_md_state{device="md201",state="active"} 0 +node_md_state{device="md201",state="check"} 1 +node_md_state{device="md201",state="inactive"} 0 +node_md_state{device="md201",state="recovering"} 0 +node_md_state{device="md201",state="resync"} 0 +node_md_state{device="md219",state="active"} 0 +node_md_state{device="md219",state="check"} 0 +node_md_state{device="md219",state="inactive"} 1 +node_md_state{device="md219",state="recovering"} 0 +node_md_state{device="md219",state="resync"} 0 +node_md_state{device="md3",state="active"} 1 +node_md_state{device="md3",state="check"} 0 +node_md_state{device="md3",state="inactive"} 0 +node_md_state{device="md3",state="recovering"} 0 +node_md_state{device="md3",state="resync"} 0 +node_md_state{device="md4",state="active"} 0 +node_md_state{device="md4",state="check"} 0 +node_md_state{device="md4",state="inactive"} 1 +node_md_state{device="md4",state="recovering"} 0 +node_md_state{device="md4",state="resync"} 0 +node_md_state{device="md6",state="active"} 0 +node_md_state{device="md6",state="check"} 0 +node_md_state{device="md6",state="inactive"} 0 +node_md_state{device="md6",state="recovering"} 1 +node_md_state{device="md6",state="resync"} 0 +node_md_state{device="md7",state="active"} 1 +node_md_state{device="md7",state="check"} 0 +node_md_state{device="md7",state="inactive"} 0 +node_md_state{device="md7",state="recovering"} 0 +node_md_state{device="md7",state="resync"} 0 +node_md_state{device="md8",state="active"} 0 +node_md_state{device="md8",state="check"} 0 +node_md_state{device="md8",state="inactive"} 0 +node_md_state{device="md8",state="recovering"} 0 +node_md_state{device="md8",state="resync"} 1 +node_md_state{device="md9",state="active"} 0 +node_md_state{device="md9",state="check"} 0 +node_md_state{device="md9",state="inactive"} 0 +node_md_state{device="md9",state="recovering"} 0 +node_md_state{device="md9",state="resync"} 1 +# HELP node_memory_Active_anon_bytes Memory information field Active_anon_bytes. +# TYPE node_memory_Active_anon_bytes gauge +node_memory_Active_anon_bytes 2.068484096e+09 +# HELP node_memory_Active_bytes Memory information field Active_bytes. +# TYPE node_memory_Active_bytes gauge +node_memory_Active_bytes 2.287017984e+09 +# HELP node_memory_Active_file_bytes Memory information field Active_file_bytes. +# TYPE node_memory_Active_file_bytes gauge +node_memory_Active_file_bytes 2.18533888e+08 +# HELP node_memory_AnonHugePages_bytes Memory information field AnonHugePages_bytes. +# TYPE node_memory_AnonHugePages_bytes gauge +node_memory_AnonHugePages_bytes 0 +# HELP node_memory_AnonPages_bytes Memory information field AnonPages_bytes. +# TYPE node_memory_AnonPages_bytes gauge +node_memory_AnonPages_bytes 2.298032128e+09 +# HELP node_memory_Bounce_bytes Memory information field Bounce_bytes. +# TYPE node_memory_Bounce_bytes gauge +node_memory_Bounce_bytes 0 +# HELP node_memory_Buffers_bytes Memory information field Buffers_bytes. +# TYPE node_memory_Buffers_bytes gauge +node_memory_Buffers_bytes 2.256896e+07 +# HELP node_memory_Cached_bytes Memory information field Cached_bytes. +# TYPE node_memory_Cached_bytes gauge +node_memory_Cached_bytes 9.53229312e+08 +# HELP node_memory_CommitLimit_bytes Memory information field CommitLimit_bytes. +# TYPE node_memory_CommitLimit_bytes gauge +node_memory_CommitLimit_bytes 6.210940928e+09 +# HELP node_memory_Committed_AS_bytes Memory information field Committed_AS_bytes. +# TYPE node_memory_Committed_AS_bytes gauge +node_memory_Committed_AS_bytes 8.023486464e+09 +# HELP node_memory_DirectMap2M_bytes Memory information field DirectMap2M_bytes. +# TYPE node_memory_DirectMap2M_bytes gauge +node_memory_DirectMap2M_bytes 3.787456512e+09 +# HELP node_memory_DirectMap4k_bytes Memory information field DirectMap4k_bytes. +# TYPE node_memory_DirectMap4k_bytes gauge +node_memory_DirectMap4k_bytes 1.9011584e+08 +# HELP node_memory_Dirty_bytes Memory information field Dirty_bytes. +# TYPE node_memory_Dirty_bytes gauge +node_memory_Dirty_bytes 1.077248e+06 +# HELP node_memory_HardwareCorrupted_bytes Memory information field HardwareCorrupted_bytes. +# TYPE node_memory_HardwareCorrupted_bytes gauge +node_memory_HardwareCorrupted_bytes 0 # HELP node_memory_HugePages_Free Memory information field HugePages_Free. # TYPE node_memory_HugePages_Free gauge node_memory_HugePages_Free 0 @@ -1098,831 +1693,544 @@ node_memory_HugePages_Surp 0 # HELP node_memory_HugePages_Total Memory information field HugePages_Total. # TYPE node_memory_HugePages_Total gauge node_memory_HugePages_Total 0 -# HELP node_memory_Hugepagesize Memory information field Hugepagesize. -# TYPE node_memory_Hugepagesize gauge -node_memory_Hugepagesize 2.097152e+06 -# HELP node_memory_Inactive Memory information field Inactive. -# TYPE node_memory_Inactive gauge -node_memory_Inactive 1.053417472e+09 -# HELP node_memory_Inactive_anon Memory information field Inactive_anon. -# TYPE node_memory_Inactive_anon gauge -node_memory_Inactive_anon 9.04245248e+08 -# HELP node_memory_Inactive_file Memory information field Inactive_file. -# TYPE node_memory_Inactive_file gauge -node_memory_Inactive_file 1.49172224e+08 -# HELP node_memory_KernelStack Memory information field KernelStack. -# TYPE node_memory_KernelStack gauge -node_memory_KernelStack 5.9392e+06 -# HELP node_memory_Mapped Memory information field Mapped. -# TYPE node_memory_Mapped gauge -node_memory_Mapped 2.4496128e+08 -# HELP node_memory_MemFree Memory information field MemFree. -# TYPE node_memory_MemFree gauge -node_memory_MemFree 2.30883328e+08 -# HELP node_memory_MemTotal Memory information field MemTotal. -# TYPE node_memory_MemTotal gauge -node_memory_MemTotal 3.831959552e+09 -# HELP node_memory_Mlocked Memory information field Mlocked. -# TYPE node_memory_Mlocked gauge -node_memory_Mlocked 32768 -# HELP node_memory_NFS_Unstable Memory information field NFS_Unstable. -# TYPE node_memory_NFS_Unstable gauge -node_memory_NFS_Unstable 0 -# HELP node_memory_PageTables Memory information field PageTables. -# TYPE node_memory_PageTables gauge -node_memory_PageTables 7.7017088e+07 -# HELP node_memory_SReclaimable Memory information field SReclaimable. -# TYPE node_memory_SReclaimable gauge -node_memory_SReclaimable 4.5846528e+07 -# HELP node_memory_SUnreclaim Memory information field SUnreclaim. -# TYPE node_memory_SUnreclaim gauge -node_memory_SUnreclaim 5.545984e+07 -# HELP node_memory_Shmem Memory information field Shmem. -# TYPE node_memory_Shmem gauge -node_memory_Shmem 6.0809216e+08 -# HELP node_memory_Slab Memory information field Slab. -# TYPE node_memory_Slab gauge -node_memory_Slab 1.01306368e+08 -# HELP node_memory_SwapCached Memory information field SwapCached. -# TYPE node_memory_SwapCached gauge -node_memory_SwapCached 1.97124096e+08 -# HELP node_memory_SwapFree Memory information field SwapFree. -# TYPE node_memory_SwapFree gauge -node_memory_SwapFree 3.23108864e+09 -# HELP node_memory_SwapTotal Memory information field SwapTotal. -# TYPE node_memory_SwapTotal gauge -node_memory_SwapTotal 4.2949632e+09 -# HELP node_memory_Unevictable Memory information field Unevictable. -# TYPE node_memory_Unevictable gauge -node_memory_Unevictable 32768 -# HELP node_memory_VmallocChunk Memory information field VmallocChunk. -# TYPE node_memory_VmallocChunk gauge -node_memory_VmallocChunk 3.5183963009024e+13 -# HELP node_memory_VmallocTotal Memory information field VmallocTotal. -# TYPE node_memory_VmallocTotal gauge -node_memory_VmallocTotal 3.5184372087808e+13 -# HELP node_memory_VmallocUsed Memory information field VmallocUsed. -# TYPE node_memory_VmallocUsed gauge -node_memory_VmallocUsed 3.6130816e+08 -# HELP node_memory_Writeback Memory information field Writeback. -# TYPE node_memory_Writeback gauge -node_memory_Writeback 0 -# HELP node_memory_WritebackTmp Memory information field WritebackTmp. -# TYPE node_memory_WritebackTmp gauge -node_memory_WritebackTmp 0 +# HELP node_memory_Hugepagesize_bytes Memory information field Hugepagesize_bytes. +# TYPE node_memory_Hugepagesize_bytes gauge +node_memory_Hugepagesize_bytes 2.097152e+06 +# HELP node_memory_Inactive_anon_bytes Memory information field Inactive_anon_bytes. +# TYPE node_memory_Inactive_anon_bytes gauge +node_memory_Inactive_anon_bytes 9.04245248e+08 +# HELP node_memory_Inactive_bytes Memory information field Inactive_bytes. +# TYPE node_memory_Inactive_bytes gauge +node_memory_Inactive_bytes 1.053417472e+09 +# HELP node_memory_Inactive_file_bytes Memory information field Inactive_file_bytes. +# TYPE node_memory_Inactive_file_bytes gauge +node_memory_Inactive_file_bytes 1.49172224e+08 +# HELP node_memory_KernelStack_bytes Memory information field KernelStack_bytes. +# TYPE node_memory_KernelStack_bytes gauge +node_memory_KernelStack_bytes 5.9392e+06 +# HELP node_memory_Mapped_bytes Memory information field Mapped_bytes. +# TYPE node_memory_Mapped_bytes gauge +node_memory_Mapped_bytes 2.4496128e+08 +# HELP node_memory_MemFree_bytes Memory information field MemFree_bytes. +# TYPE node_memory_MemFree_bytes gauge +node_memory_MemFree_bytes 2.30883328e+08 +# HELP node_memory_MemTotal_bytes Memory information field MemTotal_bytes. +# TYPE node_memory_MemTotal_bytes gauge +node_memory_MemTotal_bytes 3.831959552e+09 +# HELP node_memory_Mlocked_bytes Memory information field Mlocked_bytes. +# TYPE node_memory_Mlocked_bytes gauge +node_memory_Mlocked_bytes 32768 +# HELP node_memory_NFS_Unstable_bytes Memory information field NFS_Unstable_bytes. +# TYPE node_memory_NFS_Unstable_bytes gauge +node_memory_NFS_Unstable_bytes 0 +# HELP node_memory_PageTables_bytes Memory information field PageTables_bytes. +# TYPE node_memory_PageTables_bytes gauge +node_memory_PageTables_bytes 7.7017088e+07 +# HELP node_memory_SReclaimable_bytes Memory information field SReclaimable_bytes. +# TYPE node_memory_SReclaimable_bytes gauge +node_memory_SReclaimable_bytes 4.5846528e+07 +# HELP node_memory_SUnreclaim_bytes Memory information field SUnreclaim_bytes. +# TYPE node_memory_SUnreclaim_bytes gauge +node_memory_SUnreclaim_bytes 5.545984e+07 +# HELP node_memory_Shmem_bytes Memory information field Shmem_bytes. +# TYPE node_memory_Shmem_bytes gauge +node_memory_Shmem_bytes 6.0809216e+08 +# HELP node_memory_Slab_bytes Memory information field Slab_bytes. +# TYPE node_memory_Slab_bytes gauge +node_memory_Slab_bytes 1.01306368e+08 +# HELP node_memory_SwapCached_bytes Memory information field SwapCached_bytes. +# TYPE node_memory_SwapCached_bytes gauge +node_memory_SwapCached_bytes 1.97124096e+08 +# HELP node_memory_SwapFree_bytes Memory information field SwapFree_bytes. +# TYPE node_memory_SwapFree_bytes gauge +node_memory_SwapFree_bytes 3.23108864e+09 +# HELP node_memory_SwapTotal_bytes Memory information field SwapTotal_bytes. +# TYPE node_memory_SwapTotal_bytes gauge +node_memory_SwapTotal_bytes 4.2949632e+09 +# HELP node_memory_Unevictable_bytes Memory information field Unevictable_bytes. +# TYPE node_memory_Unevictable_bytes gauge +node_memory_Unevictable_bytes 32768 +# HELP node_memory_VmallocChunk_bytes Memory information field VmallocChunk_bytes. +# TYPE node_memory_VmallocChunk_bytes gauge +node_memory_VmallocChunk_bytes 3.5183963009024e+13 +# HELP node_memory_VmallocTotal_bytes Memory information field VmallocTotal_bytes. +# TYPE node_memory_VmallocTotal_bytes gauge +node_memory_VmallocTotal_bytes 3.5184372087808e+13 +# HELP node_memory_VmallocUsed_bytes Memory information field VmallocUsed_bytes. +# TYPE node_memory_VmallocUsed_bytes gauge +node_memory_VmallocUsed_bytes 3.6130816e+08 +# HELP node_memory_WritebackTmp_bytes Memory information field WritebackTmp_bytes. +# TYPE node_memory_WritebackTmp_bytes gauge +node_memory_WritebackTmp_bytes 0 +# HELP node_memory_Writeback_bytes Memory information field Writeback_bytes. +# TYPE node_memory_Writeback_bytes gauge +node_memory_Writeback_bytes 0 # HELP node_memory_numa_Active Memory information field Active. # TYPE node_memory_numa_Active gauge node_memory_numa_Active{node="0"} 5.58733312e+09 node_memory_numa_Active{node="1"} 5.739003904e+09 +node_memory_numa_Active{node="2"} 5.739003904e+09 # HELP node_memory_numa_Active_anon Memory information field Active_anon. # TYPE node_memory_numa_Active_anon gauge node_memory_numa_Active_anon{node="0"} 7.07915776e+08 node_memory_numa_Active_anon{node="1"} 6.04635136e+08 +node_memory_numa_Active_anon{node="2"} 6.04635136e+08 # HELP node_memory_numa_Active_file Memory information field Active_file. # TYPE node_memory_numa_Active_file gauge node_memory_numa_Active_file{node="0"} 4.879417344e+09 node_memory_numa_Active_file{node="1"} 5.134368768e+09 +node_memory_numa_Active_file{node="2"} 5.134368768e+09 # HELP node_memory_numa_AnonHugePages Memory information field AnonHugePages. # TYPE node_memory_numa_AnonHugePages gauge node_memory_numa_AnonHugePages{node="0"} 1.50994944e+08 node_memory_numa_AnonHugePages{node="1"} 9.2274688e+07 +node_memory_numa_AnonHugePages{node="2"} 9.2274688e+07 # HELP node_memory_numa_AnonPages Memory information field AnonPages. # TYPE node_memory_numa_AnonPages gauge node_memory_numa_AnonPages{node="0"} 8.07112704e+08 node_memory_numa_AnonPages{node="1"} 6.88058368e+08 +node_memory_numa_AnonPages{node="2"} 6.88058368e+08 # HELP node_memory_numa_Bounce Memory information field Bounce. # TYPE node_memory_numa_Bounce gauge node_memory_numa_Bounce{node="0"} 0 node_memory_numa_Bounce{node="1"} 0 +node_memory_numa_Bounce{node="2"} 0 # HELP node_memory_numa_Dirty Memory information field Dirty. # TYPE node_memory_numa_Dirty gauge node_memory_numa_Dirty{node="0"} 20480 node_memory_numa_Dirty{node="1"} 122880 +node_memory_numa_Dirty{node="2"} 122880 # HELP node_memory_numa_FilePages Memory information field FilePages. # TYPE node_memory_numa_FilePages gauge node_memory_numa_FilePages{node="0"} 7.1855017984e+10 node_memory_numa_FilePages{node="1"} 8.5585088512e+10 +node_memory_numa_FilePages{node="2"} 8.5585088512e+10 # HELP node_memory_numa_HugePages_Free Memory information field HugePages_Free. # TYPE node_memory_numa_HugePages_Free gauge node_memory_numa_HugePages_Free{node="0"} 0 node_memory_numa_HugePages_Free{node="1"} 0 +node_memory_numa_HugePages_Free{node="2"} 0 # HELP node_memory_numa_HugePages_Surp Memory information field HugePages_Surp. # TYPE node_memory_numa_HugePages_Surp gauge node_memory_numa_HugePages_Surp{node="0"} 0 node_memory_numa_HugePages_Surp{node="1"} 0 +node_memory_numa_HugePages_Surp{node="2"} 0 # HELP node_memory_numa_HugePages_Total Memory information field HugePages_Total. # TYPE node_memory_numa_HugePages_Total gauge node_memory_numa_HugePages_Total{node="0"} 0 node_memory_numa_HugePages_Total{node="1"} 0 +node_memory_numa_HugePages_Total{node="2"} 0 # HELP node_memory_numa_Inactive Memory information field Inactive. # TYPE node_memory_numa_Inactive gauge node_memory_numa_Inactive{node="0"} 6.0569788416e+10 node_memory_numa_Inactive{node="1"} 7.3165406208e+10 +node_memory_numa_Inactive{node="2"} 7.3165406208e+10 # HELP node_memory_numa_Inactive_anon Memory information field Inactive_anon. # TYPE node_memory_numa_Inactive_anon gauge node_memory_numa_Inactive_anon{node="0"} 3.48626944e+08 node_memory_numa_Inactive_anon{node="1"} 2.91930112e+08 +node_memory_numa_Inactive_anon{node="2"} 2.91930112e+08 # HELP node_memory_numa_Inactive_file Memory information field Inactive_file. # TYPE node_memory_numa_Inactive_file gauge node_memory_numa_Inactive_file{node="0"} 6.0221161472e+10 node_memory_numa_Inactive_file{node="1"} 7.2873476096e+10 +node_memory_numa_Inactive_file{node="2"} 7.2873476096e+10 # HELP node_memory_numa_KernelStack Memory information field KernelStack. # TYPE node_memory_numa_KernelStack gauge node_memory_numa_KernelStack{node="0"} 3.4832384e+07 node_memory_numa_KernelStack{node="1"} 3.1850496e+07 +node_memory_numa_KernelStack{node="2"} 3.1850496e+07 # HELP node_memory_numa_Mapped Memory information field Mapped. # TYPE node_memory_numa_Mapped gauge node_memory_numa_Mapped{node="0"} 9.1570176e+08 node_memory_numa_Mapped{node="1"} 8.84850688e+08 +node_memory_numa_Mapped{node="2"} 8.84850688e+08 # HELP node_memory_numa_MemFree Memory information field MemFree. # TYPE node_memory_numa_MemFree gauge node_memory_numa_MemFree{node="0"} 5.4303100928e+10 node_memory_numa_MemFree{node="1"} 4.0586022912e+10 +node_memory_numa_MemFree{node="2"} 4.0586022912e+10 # HELP node_memory_numa_MemTotal Memory information field MemTotal. # TYPE node_memory_numa_MemTotal gauge node_memory_numa_MemTotal{node="0"} 1.3740271616e+11 node_memory_numa_MemTotal{node="1"} 1.37438953472e+11 +node_memory_numa_MemTotal{node="2"} 1.37438953472e+11 # HELP node_memory_numa_MemUsed Memory information field MemUsed. # TYPE node_memory_numa_MemUsed gauge node_memory_numa_MemUsed{node="0"} 8.3099615232e+10 node_memory_numa_MemUsed{node="1"} 9.685293056e+10 +node_memory_numa_MemUsed{node="2"} 9.685293056e+10 # HELP node_memory_numa_Mlocked Memory information field Mlocked. # TYPE node_memory_numa_Mlocked gauge node_memory_numa_Mlocked{node="0"} 0 node_memory_numa_Mlocked{node="1"} 0 +node_memory_numa_Mlocked{node="2"} 0 # HELP node_memory_numa_NFS_Unstable Memory information field NFS_Unstable. # TYPE node_memory_numa_NFS_Unstable gauge node_memory_numa_NFS_Unstable{node="0"} 0 node_memory_numa_NFS_Unstable{node="1"} 0 +node_memory_numa_NFS_Unstable{node="2"} 0 # HELP node_memory_numa_PageTables Memory information field PageTables. # TYPE node_memory_numa_PageTables gauge node_memory_numa_PageTables{node="0"} 1.46743296e+08 node_memory_numa_PageTables{node="1"} 1.27254528e+08 +node_memory_numa_PageTables{node="2"} 1.27254528e+08 # HELP node_memory_numa_SReclaimable Memory information field SReclaimable. # TYPE node_memory_numa_SReclaimable gauge node_memory_numa_SReclaimable{node="0"} 4.580478976e+09 node_memory_numa_SReclaimable{node="1"} 4.724822016e+09 +node_memory_numa_SReclaimable{node="2"} 4.724822016e+09 # HELP node_memory_numa_SUnreclaim Memory information field SUnreclaim. # TYPE node_memory_numa_SUnreclaim gauge node_memory_numa_SUnreclaim{node="0"} 2.23352832e+09 node_memory_numa_SUnreclaim{node="1"} 2.464391168e+09 +node_memory_numa_SUnreclaim{node="2"} 2.464391168e+09 # HELP node_memory_numa_Shmem Memory information field Shmem. # TYPE node_memory_numa_Shmem gauge node_memory_numa_Shmem{node="0"} 4.900864e+07 node_memory_numa_Shmem{node="1"} 8.968192e+07 +node_memory_numa_Shmem{node="2"} 8.968192e+07 # HELP node_memory_numa_Slab Memory information field Slab. # TYPE node_memory_numa_Slab gauge node_memory_numa_Slab{node="0"} 6.814007296e+09 node_memory_numa_Slab{node="1"} 7.189213184e+09 +node_memory_numa_Slab{node="2"} 7.189213184e+09 # HELP node_memory_numa_Unevictable Memory information field Unevictable. # TYPE node_memory_numa_Unevictable gauge node_memory_numa_Unevictable{node="0"} 0 node_memory_numa_Unevictable{node="1"} 0 +node_memory_numa_Unevictable{node="2"} 0 # HELP node_memory_numa_Writeback Memory information field Writeback. # TYPE node_memory_numa_Writeback gauge node_memory_numa_Writeback{node="0"} 0 node_memory_numa_Writeback{node="1"} 0 +node_memory_numa_Writeback{node="2"} 0 # HELP node_memory_numa_WritebackTmp Memory information field WritebackTmp. # TYPE node_memory_numa_WritebackTmp gauge node_memory_numa_WritebackTmp{node="0"} 0 node_memory_numa_WritebackTmp{node="1"} 0 +node_memory_numa_WritebackTmp{node="2"} 0 # HELP node_memory_numa_interleave_hit_total Memory information field interleave_hit_total. # TYPE node_memory_numa_interleave_hit_total counter node_memory_numa_interleave_hit_total{node="0"} 57146 node_memory_numa_interleave_hit_total{node="1"} 57286 +node_memory_numa_interleave_hit_total{node="2"} 7286 # HELP node_memory_numa_local_node_total Memory information field local_node_total. # TYPE node_memory_numa_local_node_total counter node_memory_numa_local_node_total{node="0"} 1.93454780853e+11 node_memory_numa_local_node_total{node="1"} 3.2671904655e+11 +node_memory_numa_local_node_total{node="2"} 2.671904655e+10 # HELP node_memory_numa_numa_foreign_total Memory information field numa_foreign_total. # TYPE node_memory_numa_numa_foreign_total counter node_memory_numa_numa_foreign_total{node="0"} 5.98586233e+10 node_memory_numa_numa_foreign_total{node="1"} 1.2624528e+07 +node_memory_numa_numa_foreign_total{node="2"} 2.624528e+06 # HELP node_memory_numa_numa_hit_total Memory information field numa_hit_total. # TYPE node_memory_numa_numa_hit_total counter node_memory_numa_numa_hit_total{node="0"} 1.93460335812e+11 node_memory_numa_numa_hit_total{node="1"} 3.26720946761e+11 +node_memory_numa_numa_hit_total{node="2"} 2.6720946761e+10 # HELP node_memory_numa_numa_miss_total Memory information field numa_miss_total. # TYPE node_memory_numa_numa_miss_total counter node_memory_numa_numa_miss_total{node="0"} 1.2624528e+07 node_memory_numa_numa_miss_total{node="1"} 5.9858626709e+10 +node_memory_numa_numa_miss_total{node="2"} 9.858626709e+09 # HELP node_memory_numa_other_node_total Memory information field other_node_total. # TYPE node_memory_numa_other_node_total counter node_memory_numa_other_node_total{node="0"} 1.8179487e+07 node_memory_numa_other_node_total{node="1"} 5.986052692e+10 +node_memory_numa_other_node_total{node="2"} 9.86052692e+09 # HELP node_mountstats_nfs_age_seconds_total The age of the NFS mount in seconds. # TYPE node_mountstats_nfs_age_seconds_total counter -node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test"} 13968 +node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13968 +node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13968 # HELP node_mountstats_nfs_direct_read_bytes_total Number of bytes read using the read() syscall in O_DIRECT mode. # TYPE node_mountstats_nfs_direct_read_bytes_total counter -node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_direct_write_bytes_total Number of bytes written using the write() syscall in O_DIRECT mode. # TYPE node_mountstats_nfs_direct_write_bytes_total counter -node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_attribute_invalidate_total Number of times cached inode attributes are invalidated. # TYPE node_mountstats_nfs_event_attribute_invalidate_total counter -node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_data_invalidate_total Number of times an inode cache is cleared. # TYPE node_mountstats_nfs_event_data_invalidate_total counter -node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_dnode_revalidate_total Number of times cached dentry nodes are re-validated from the server. # TYPE node_mountstats_nfs_event_dnode_revalidate_total counter -node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test"} 226 +node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 226 +node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 226 # HELP node_mountstats_nfs_event_inode_revalidate_total Number of times cached inode attributes are re-validated from the server. # TYPE node_mountstats_nfs_event_inode_revalidate_total counter -node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test"} 52 +node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 52 +node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 52 # HELP node_mountstats_nfs_event_jukebox_delay_total Number of times the NFS server indicated EJUKEBOX; retrieving data from offline storage. # TYPE node_mountstats_nfs_event_jukebox_delay_total counter -node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_pnfs_read_total Number of NFS v4.1+ pNFS reads. # TYPE node_mountstats_nfs_event_pnfs_read_total counter -node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_pnfs_write_total Number of NFS v4.1+ pNFS writes. # TYPE node_mountstats_nfs_event_pnfs_write_total counter -node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_short_read_total Number of times the NFS server gave less data than expected while reading. # TYPE node_mountstats_nfs_event_short_read_total counter -node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_short_write_total Number of times the NFS server wrote less data than expected while writing. # TYPE node_mountstats_nfs_event_short_write_total counter -node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_silly_rename_total Number of times a file was removed while still open by another process. # TYPE node_mountstats_nfs_event_silly_rename_total counter -node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_truncation_total Number of times files have been truncated. # TYPE node_mountstats_nfs_event_truncation_total counter -node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_access_total Number of times permissions have been checked. # TYPE node_mountstats_nfs_event_vfs_access_total counter -node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test"} 398 +node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 398 +node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 398 # HELP node_mountstats_nfs_event_vfs_file_release_total Number of times files have been closed and released. # TYPE node_mountstats_nfs_event_vfs_file_release_total counter -node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test"} 77 +node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 +node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 # HELP node_mountstats_nfs_event_vfs_flush_total Number of pending writes that have been forcefully flushed to the server. # TYPE node_mountstats_nfs_event_vfs_flush_total counter -node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test"} 77 +node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 +node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 # HELP node_mountstats_nfs_event_vfs_fsync_total Number of times fsync() has been called on directories and files. # TYPE node_mountstats_nfs_event_vfs_fsync_total counter -node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_getdents_total Number of times directory entries have been read with getdents(). # TYPE node_mountstats_nfs_event_vfs_getdents_total counter -node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_lock_total Number of times locking has been attempted on a file. # TYPE node_mountstats_nfs_event_vfs_lock_total counter -node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_lookup_total Number of times a directory lookup has occurred. # TYPE node_mountstats_nfs_event_vfs_lookup_total counter -node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test"} 13 +node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13 +node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13 # HELP node_mountstats_nfs_event_vfs_open_total Number of times cached inode attributes are invalidated. # TYPE node_mountstats_nfs_event_vfs_open_total counter -node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test"} 1 +node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 +node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1 # HELP node_mountstats_nfs_event_vfs_read_page_total Number of pages read directly via mmap()'d files. # TYPE node_mountstats_nfs_event_vfs_read_page_total counter -node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_read_pages_total Number of times a group of pages have been read. # TYPE node_mountstats_nfs_event_vfs_read_pages_total counter -node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test"} 331 +node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 331 +node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 331 # HELP node_mountstats_nfs_event_vfs_setattr_total Number of times directory entries have been read with getdents(). # TYPE node_mountstats_nfs_event_vfs_setattr_total counter -node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_update_page_total Number of updates (and potential writes) to pages. # TYPE node_mountstats_nfs_event_vfs_update_page_total counter -node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_write_page_total Number of pages written directly via mmap()'d files. # TYPE node_mountstats_nfs_event_vfs_write_page_total counter -node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_write_pages_total Number of times a group of pages have been written. # TYPE node_mountstats_nfs_event_vfs_write_pages_total counter -node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test"} 47 +node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 47 +node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 47 # HELP node_mountstats_nfs_event_write_extension_total Number of times a file has been grown due to writes beyond its existing end. # TYPE node_mountstats_nfs_event_write_extension_total counter -node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_major_timeouts_total Number of times a request has had a major timeout for a given operation. # TYPE node_mountstats_nfs_operations_major_timeouts_total counter -node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",operation="READ"} 0 -node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_queue_time_seconds_total Duration all requests spent queued for transmission for a given operation before they were sent, in seconds. # TYPE node_mountstats_nfs_operations_queue_time_seconds_total counter -node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",operation="READ"} 0.006 -node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 9.007044786793922e+12 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0.006 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0.006 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_received_bytes_total Number of bytes received for a given operation, including RPC headers and payload. # TYPE node_mountstats_nfs_operations_received_bytes_total counter -node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",operation="READ"} 1.210292152e+09 -node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 3.62996810236e+11 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1.210292152e+09 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1.210292152e+09 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_request_time_seconds_total Duration all requests took from when a request was enqueued to when it was completely handled for a given operation, in seconds. # TYPE node_mountstats_nfs_operations_request_time_seconds_total counter -node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",operation="READ"} 79.407 -node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.953587717e+06 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.407 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.407 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_requests_total Number of requests performed for a given operation. # TYPE node_mountstats_nfs_operations_requests_total counter -node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",operation="READ"} 1298 -node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927395007e+09 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_response_time_seconds_total Duration all requests took to get a reply back after a request for a given operation was transmitted, in seconds. # TYPE node_mountstats_nfs_operations_response_time_seconds_total counter -node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",operation="READ"} 79.386 -node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.667369447e+06 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.386 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.386 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_sent_bytes_total Number of bytes sent for a given operation, including RPC headers and payload. # TYPE node_mountstats_nfs_operations_sent_bytes_total counter -node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",operation="READ"} 207680 -node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 5.26931094212e+11 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 207680 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 207680 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_transmissions_total Number of times an actual RPC request has been transmitted for a given operation. # TYPE node_mountstats_nfs_operations_transmissions_total counter -node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",operation="NULL"} 0 -node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",operation="READ"} 1298 -node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",operation="WRITE"} 0 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927394995e+09 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 +node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_read_bytes_total Number of bytes read using the read() syscall. # TYPE node_mountstats_nfs_read_bytes_total counter -node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test"} 1.20764023e+09 +node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.20764023e+09 +node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.20764023e+09 # HELP node_mountstats_nfs_read_pages_total Number of pages read directly via mmap()'d files. # TYPE node_mountstats_nfs_read_pages_total counter -node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test"} 295483 +node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 295483 +node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 295483 # HELP node_mountstats_nfs_total_read_bytes_total Number of bytes read from the NFS server, in total. # TYPE node_mountstats_nfs_total_read_bytes_total counter -node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test"} 1.210214218e+09 +node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.210214218e+09 +node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.210214218e+09 # HELP node_mountstats_nfs_total_write_bytes_total Number of bytes written to the NFS server, in total. # TYPE node_mountstats_nfs_total_write_bytes_total counter -node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_backlog_queue_total Total number of items added to the RPC backlog queue. # TYPE node_mountstats_nfs_transport_backlog_queue_total counter -node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_bad_transaction_ids_total Number of times the NFS server sent a response with a transaction ID unknown to this client. # TYPE node_mountstats_nfs_transport_bad_transaction_ids_total counter -node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_bind_total Number of times the client has had to establish a connection from scratch to the NFS server. # TYPE node_mountstats_nfs_transport_bind_total counter -node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_connect_total Number of times the client has made a TCP connection to the NFS server. # TYPE node_mountstats_nfs_transport_connect_total counter -node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test"} 1 +node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 +node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_idle_time_seconds Duration since the NFS mount last saw any RPC traffic, in seconds. # TYPE node_mountstats_nfs_transport_idle_time_seconds gauge -node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test"} 11 +node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 11 +node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_maximum_rpc_slots Maximum number of simultaneously active RPC requests ever used. # TYPE node_mountstats_nfs_transport_maximum_rpc_slots gauge -node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test"} 24 +node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 24 +node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 24 # HELP node_mountstats_nfs_transport_pending_queue_total Total number of items added to the RPC transmission pending queue. # TYPE node_mountstats_nfs_transport_pending_queue_total counter -node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test"} 5726 +node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 5726 +node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 5726 # HELP node_mountstats_nfs_transport_receives_total Number of RPC responses for this mount received from the NFS server. # TYPE node_mountstats_nfs_transport_receives_total counter -node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test"} 6428 +node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 +node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 # HELP node_mountstats_nfs_transport_sending_queue_total Total number of items added to the RPC transmission sending queue. # TYPE node_mountstats_nfs_transport_sending_queue_total counter -node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test"} 26 +node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 26 +node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 26 # HELP node_mountstats_nfs_transport_sends_total Number of RPC requests for this mount sent to the NFS server. # TYPE node_mountstats_nfs_transport_sends_total counter -node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test"} 6428 +node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 +node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 # HELP node_mountstats_nfs_write_bytes_total Number of bytes written using the write() syscall. # TYPE node_mountstats_nfs_write_bytes_total counter -node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test"} 0 +node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_write_pages_total Number of pages written directly via mmap()'d files. # TYPE node_mountstats_nfs_write_pages_total counter -node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test"} 0 -# HELP node_netstat_Icmp6_InCsumErrors Statistic Icmp6InCsumErrors. -# TYPE node_netstat_Icmp6_InCsumErrors untyped -node_netstat_Icmp6_InCsumErrors 0 -# HELP node_netstat_Icmp6_InDestUnreachs Statistic Icmp6InDestUnreachs. -# TYPE node_netstat_Icmp6_InDestUnreachs untyped -node_netstat_Icmp6_InDestUnreachs 0 -# HELP node_netstat_Icmp6_InEchoReplies Statistic Icmp6InEchoReplies. -# TYPE node_netstat_Icmp6_InEchoReplies untyped -node_netstat_Icmp6_InEchoReplies 0 -# HELP node_netstat_Icmp6_InEchos Statistic Icmp6InEchos. -# TYPE node_netstat_Icmp6_InEchos untyped -node_netstat_Icmp6_InEchos 0 +node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 +node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_netstat_Icmp6_InErrors Statistic Icmp6InErrors. # TYPE node_netstat_Icmp6_InErrors untyped node_netstat_Icmp6_InErrors 0 -# HELP node_netstat_Icmp6_InGroupMembQueries Statistic Icmp6InGroupMembQueries. -# TYPE node_netstat_Icmp6_InGroupMembQueries untyped -node_netstat_Icmp6_InGroupMembQueries 0 -# HELP node_netstat_Icmp6_InGroupMembReductions Statistic Icmp6InGroupMembReductions. -# TYPE node_netstat_Icmp6_InGroupMembReductions untyped -node_netstat_Icmp6_InGroupMembReductions 0 -# HELP node_netstat_Icmp6_InGroupMembResponses Statistic Icmp6InGroupMembResponses. -# TYPE node_netstat_Icmp6_InGroupMembResponses untyped -node_netstat_Icmp6_InGroupMembResponses 0 -# HELP node_netstat_Icmp6_InMLDv2Reports Statistic Icmp6InMLDv2Reports. -# TYPE node_netstat_Icmp6_InMLDv2Reports untyped -node_netstat_Icmp6_InMLDv2Reports 0 # HELP node_netstat_Icmp6_InMsgs Statistic Icmp6InMsgs. # TYPE node_netstat_Icmp6_InMsgs untyped node_netstat_Icmp6_InMsgs 0 -# HELP node_netstat_Icmp6_InNeighborAdvertisements Statistic Icmp6InNeighborAdvertisements. -# TYPE node_netstat_Icmp6_InNeighborAdvertisements untyped -node_netstat_Icmp6_InNeighborAdvertisements 0 -# HELP node_netstat_Icmp6_InNeighborSolicits Statistic Icmp6InNeighborSolicits. -# TYPE node_netstat_Icmp6_InNeighborSolicits untyped -node_netstat_Icmp6_InNeighborSolicits 0 -# HELP node_netstat_Icmp6_InParmProblems Statistic Icmp6InParmProblems. -# TYPE node_netstat_Icmp6_InParmProblems untyped -node_netstat_Icmp6_InParmProblems 0 -# HELP node_netstat_Icmp6_InPktTooBigs Statistic Icmp6InPktTooBigs. -# TYPE node_netstat_Icmp6_InPktTooBigs untyped -node_netstat_Icmp6_InPktTooBigs 0 -# HELP node_netstat_Icmp6_InRedirects Statistic Icmp6InRedirects. -# TYPE node_netstat_Icmp6_InRedirects untyped -node_netstat_Icmp6_InRedirects 0 -# HELP node_netstat_Icmp6_InRouterAdvertisements Statistic Icmp6InRouterAdvertisements. -# TYPE node_netstat_Icmp6_InRouterAdvertisements untyped -node_netstat_Icmp6_InRouterAdvertisements 0 -# HELP node_netstat_Icmp6_InRouterSolicits Statistic Icmp6InRouterSolicits. -# TYPE node_netstat_Icmp6_InRouterSolicits untyped -node_netstat_Icmp6_InRouterSolicits 0 -# HELP node_netstat_Icmp6_InTimeExcds Statistic Icmp6InTimeExcds. -# TYPE node_netstat_Icmp6_InTimeExcds untyped -node_netstat_Icmp6_InTimeExcds 0 -# HELP node_netstat_Icmp6_OutDestUnreachs Statistic Icmp6OutDestUnreachs. -# TYPE node_netstat_Icmp6_OutDestUnreachs untyped -node_netstat_Icmp6_OutDestUnreachs 0 -# HELP node_netstat_Icmp6_OutEchoReplies Statistic Icmp6OutEchoReplies. -# TYPE node_netstat_Icmp6_OutEchoReplies untyped -node_netstat_Icmp6_OutEchoReplies 0 -# HELP node_netstat_Icmp6_OutEchos Statistic Icmp6OutEchos. -# TYPE node_netstat_Icmp6_OutEchos untyped -node_netstat_Icmp6_OutEchos 0 -# HELP node_netstat_Icmp6_OutErrors Statistic Icmp6OutErrors. -# TYPE node_netstat_Icmp6_OutErrors untyped -node_netstat_Icmp6_OutErrors 0 -# HELP node_netstat_Icmp6_OutGroupMembQueries Statistic Icmp6OutGroupMembQueries. -# TYPE node_netstat_Icmp6_OutGroupMembQueries untyped -node_netstat_Icmp6_OutGroupMembQueries 0 -# HELP node_netstat_Icmp6_OutGroupMembReductions Statistic Icmp6OutGroupMembReductions. -# TYPE node_netstat_Icmp6_OutGroupMembReductions untyped -node_netstat_Icmp6_OutGroupMembReductions 0 -# HELP node_netstat_Icmp6_OutGroupMembResponses Statistic Icmp6OutGroupMembResponses. -# TYPE node_netstat_Icmp6_OutGroupMembResponses untyped -node_netstat_Icmp6_OutGroupMembResponses 0 -# HELP node_netstat_Icmp6_OutMLDv2Reports Statistic Icmp6OutMLDv2Reports. -# TYPE node_netstat_Icmp6_OutMLDv2Reports untyped -node_netstat_Icmp6_OutMLDv2Reports 4 # HELP node_netstat_Icmp6_OutMsgs Statistic Icmp6OutMsgs. # TYPE node_netstat_Icmp6_OutMsgs untyped node_netstat_Icmp6_OutMsgs 8 -# HELP node_netstat_Icmp6_OutNeighborAdvertisements Statistic Icmp6OutNeighborAdvertisements. -# TYPE node_netstat_Icmp6_OutNeighborAdvertisements untyped -node_netstat_Icmp6_OutNeighborAdvertisements 0 -# HELP node_netstat_Icmp6_OutNeighborSolicits Statistic Icmp6OutNeighborSolicits. -# TYPE node_netstat_Icmp6_OutNeighborSolicits untyped -node_netstat_Icmp6_OutNeighborSolicits 1 -# HELP node_netstat_Icmp6_OutParmProblems Statistic Icmp6OutParmProblems. -# TYPE node_netstat_Icmp6_OutParmProblems untyped -node_netstat_Icmp6_OutParmProblems 0 -# HELP node_netstat_Icmp6_OutPktTooBigs Statistic Icmp6OutPktTooBigs. -# TYPE node_netstat_Icmp6_OutPktTooBigs untyped -node_netstat_Icmp6_OutPktTooBigs 0 -# HELP node_netstat_Icmp6_OutRedirects Statistic Icmp6OutRedirects. -# TYPE node_netstat_Icmp6_OutRedirects untyped -node_netstat_Icmp6_OutRedirects 0 -# HELP node_netstat_Icmp6_OutRouterAdvertisements Statistic Icmp6OutRouterAdvertisements. -# TYPE node_netstat_Icmp6_OutRouterAdvertisements untyped -node_netstat_Icmp6_OutRouterAdvertisements 0 -# HELP node_netstat_Icmp6_OutRouterSolicits Statistic Icmp6OutRouterSolicits. -# TYPE node_netstat_Icmp6_OutRouterSolicits untyped -node_netstat_Icmp6_OutRouterSolicits 3 -# HELP node_netstat_Icmp6_OutTimeExcds Statistic Icmp6OutTimeExcds. -# TYPE node_netstat_Icmp6_OutTimeExcds untyped -node_netstat_Icmp6_OutTimeExcds 0 -# HELP node_netstat_Icmp6_OutType133 Statistic Icmp6OutType133. -# TYPE node_netstat_Icmp6_OutType133 untyped -node_netstat_Icmp6_OutType133 3 -# HELP node_netstat_Icmp6_OutType135 Statistic Icmp6OutType135. -# TYPE node_netstat_Icmp6_OutType135 untyped -node_netstat_Icmp6_OutType135 1 -# HELP node_netstat_Icmp6_OutType143 Statistic Icmp6OutType143. -# TYPE node_netstat_Icmp6_OutType143 untyped -node_netstat_Icmp6_OutType143 4 -# HELP node_netstat_IcmpMsg_InType3 Statistic IcmpMsgInType3. -# TYPE node_netstat_IcmpMsg_InType3 untyped -node_netstat_IcmpMsg_InType3 104 -# HELP node_netstat_IcmpMsg_OutType3 Statistic IcmpMsgOutType3. -# TYPE node_netstat_IcmpMsg_OutType3 untyped -node_netstat_IcmpMsg_OutType3 120 -# HELP node_netstat_Icmp_InAddrMaskReps Statistic IcmpInAddrMaskReps. -# TYPE node_netstat_Icmp_InAddrMaskReps untyped -node_netstat_Icmp_InAddrMaskReps 0 -# HELP node_netstat_Icmp_InAddrMasks Statistic IcmpInAddrMasks. -# TYPE node_netstat_Icmp_InAddrMasks untyped -node_netstat_Icmp_InAddrMasks 0 -# HELP node_netstat_Icmp_InCsumErrors Statistic IcmpInCsumErrors. -# TYPE node_netstat_Icmp_InCsumErrors untyped -node_netstat_Icmp_InCsumErrors 0 -# HELP node_netstat_Icmp_InDestUnreachs Statistic IcmpInDestUnreachs. -# TYPE node_netstat_Icmp_InDestUnreachs untyped -node_netstat_Icmp_InDestUnreachs 104 -# HELP node_netstat_Icmp_InEchoReps Statistic IcmpInEchoReps. -# TYPE node_netstat_Icmp_InEchoReps untyped -node_netstat_Icmp_InEchoReps 0 -# HELP node_netstat_Icmp_InEchos Statistic IcmpInEchos. -# TYPE node_netstat_Icmp_InEchos untyped -node_netstat_Icmp_InEchos 0 # HELP node_netstat_Icmp_InErrors Statistic IcmpInErrors. # TYPE node_netstat_Icmp_InErrors untyped node_netstat_Icmp_InErrors 0 # HELP node_netstat_Icmp_InMsgs Statistic IcmpInMsgs. # TYPE node_netstat_Icmp_InMsgs untyped node_netstat_Icmp_InMsgs 104 -# HELP node_netstat_Icmp_InParmProbs Statistic IcmpInParmProbs. -# TYPE node_netstat_Icmp_InParmProbs untyped -node_netstat_Icmp_InParmProbs 0 -# HELP node_netstat_Icmp_InRedirects Statistic IcmpInRedirects. -# TYPE node_netstat_Icmp_InRedirects untyped -node_netstat_Icmp_InRedirects 0 -# HELP node_netstat_Icmp_InSrcQuenchs Statistic IcmpInSrcQuenchs. -# TYPE node_netstat_Icmp_InSrcQuenchs untyped -node_netstat_Icmp_InSrcQuenchs 0 -# HELP node_netstat_Icmp_InTimeExcds Statistic IcmpInTimeExcds. -# TYPE node_netstat_Icmp_InTimeExcds untyped -node_netstat_Icmp_InTimeExcds 0 -# HELP node_netstat_Icmp_InTimestampReps Statistic IcmpInTimestampReps. -# TYPE node_netstat_Icmp_InTimestampReps untyped -node_netstat_Icmp_InTimestampReps 0 -# HELP node_netstat_Icmp_InTimestamps Statistic IcmpInTimestamps. -# TYPE node_netstat_Icmp_InTimestamps untyped -node_netstat_Icmp_InTimestamps 0 -# HELP node_netstat_Icmp_OutAddrMaskReps Statistic IcmpOutAddrMaskReps. -# TYPE node_netstat_Icmp_OutAddrMaskReps untyped -node_netstat_Icmp_OutAddrMaskReps 0 -# HELP node_netstat_Icmp_OutAddrMasks Statistic IcmpOutAddrMasks. -# TYPE node_netstat_Icmp_OutAddrMasks untyped -node_netstat_Icmp_OutAddrMasks 0 -# HELP node_netstat_Icmp_OutDestUnreachs Statistic IcmpOutDestUnreachs. -# TYPE node_netstat_Icmp_OutDestUnreachs untyped -node_netstat_Icmp_OutDestUnreachs 120 -# HELP node_netstat_Icmp_OutEchoReps Statistic IcmpOutEchoReps. -# TYPE node_netstat_Icmp_OutEchoReps untyped -node_netstat_Icmp_OutEchoReps 0 -# HELP node_netstat_Icmp_OutEchos Statistic IcmpOutEchos. -# TYPE node_netstat_Icmp_OutEchos untyped -node_netstat_Icmp_OutEchos 0 -# HELP node_netstat_Icmp_OutErrors Statistic IcmpOutErrors. -# TYPE node_netstat_Icmp_OutErrors untyped -node_netstat_Icmp_OutErrors 0 # HELP node_netstat_Icmp_OutMsgs Statistic IcmpOutMsgs. # TYPE node_netstat_Icmp_OutMsgs untyped node_netstat_Icmp_OutMsgs 120 -# HELP node_netstat_Icmp_OutParmProbs Statistic IcmpOutParmProbs. -# TYPE node_netstat_Icmp_OutParmProbs untyped -node_netstat_Icmp_OutParmProbs 0 -# HELP node_netstat_Icmp_OutRedirects Statistic IcmpOutRedirects. -# TYPE node_netstat_Icmp_OutRedirects untyped -node_netstat_Icmp_OutRedirects 0 -# HELP node_netstat_Icmp_OutSrcQuenchs Statistic IcmpOutSrcQuenchs. -# TYPE node_netstat_Icmp_OutSrcQuenchs untyped -node_netstat_Icmp_OutSrcQuenchs 0 -# HELP node_netstat_Icmp_OutTimeExcds Statistic IcmpOutTimeExcds. -# TYPE node_netstat_Icmp_OutTimeExcds untyped -node_netstat_Icmp_OutTimeExcds 0 -# HELP node_netstat_Icmp_OutTimestampReps Statistic IcmpOutTimestampReps. -# TYPE node_netstat_Icmp_OutTimestampReps untyped -node_netstat_Icmp_OutTimestampReps 0 -# HELP node_netstat_Icmp_OutTimestamps Statistic IcmpOutTimestamps. -# TYPE node_netstat_Icmp_OutTimestamps untyped -node_netstat_Icmp_OutTimestamps 0 -# HELP node_netstat_Ip6_FragCreates Statistic Ip6FragCreates. -# TYPE node_netstat_Ip6_FragCreates untyped -node_netstat_Ip6_FragCreates 0 -# HELP node_netstat_Ip6_FragFails Statistic Ip6FragFails. -# TYPE node_netstat_Ip6_FragFails untyped -node_netstat_Ip6_FragFails 0 -# HELP node_netstat_Ip6_FragOKs Statistic Ip6FragOKs. -# TYPE node_netstat_Ip6_FragOKs untyped -node_netstat_Ip6_FragOKs 0 -# HELP node_netstat_Ip6_InAddrErrors Statistic Ip6InAddrErrors. -# TYPE node_netstat_Ip6_InAddrErrors untyped -node_netstat_Ip6_InAddrErrors 0 -# HELP node_netstat_Ip6_InBcastOctets Statistic Ip6InBcastOctets. -# TYPE node_netstat_Ip6_InBcastOctets untyped -node_netstat_Ip6_InBcastOctets 0 -# HELP node_netstat_Ip6_InCEPkts Statistic Ip6InCEPkts. -# TYPE node_netstat_Ip6_InCEPkts untyped -node_netstat_Ip6_InCEPkts 0 -# HELP node_netstat_Ip6_InDelivers Statistic Ip6InDelivers. -# TYPE node_netstat_Ip6_InDelivers untyped -node_netstat_Ip6_InDelivers 0 -# HELP node_netstat_Ip6_InDiscards Statistic Ip6InDiscards. -# TYPE node_netstat_Ip6_InDiscards untyped -node_netstat_Ip6_InDiscards 0 -# HELP node_netstat_Ip6_InECT0Pkts Statistic Ip6InECT0Pkts. -# TYPE node_netstat_Ip6_InECT0Pkts untyped -node_netstat_Ip6_InECT0Pkts 0 -# HELP node_netstat_Ip6_InECT1Pkts Statistic Ip6InECT1Pkts. -# TYPE node_netstat_Ip6_InECT1Pkts untyped -node_netstat_Ip6_InECT1Pkts 0 -# HELP node_netstat_Ip6_InHdrErrors Statistic Ip6InHdrErrors. -# TYPE node_netstat_Ip6_InHdrErrors untyped -node_netstat_Ip6_InHdrErrors 0 -# HELP node_netstat_Ip6_InMcastOctets Statistic Ip6InMcastOctets. -# TYPE node_netstat_Ip6_InMcastOctets untyped -node_netstat_Ip6_InMcastOctets 112 -# HELP node_netstat_Ip6_InMcastPkts Statistic Ip6InMcastPkts. -# TYPE node_netstat_Ip6_InMcastPkts untyped -node_netstat_Ip6_InMcastPkts 2 -# HELP node_netstat_Ip6_InNoECTPkts Statistic Ip6InNoECTPkts. -# TYPE node_netstat_Ip6_InNoECTPkts untyped -node_netstat_Ip6_InNoECTPkts 7 -# HELP node_netstat_Ip6_InNoRoutes Statistic Ip6InNoRoutes. -# TYPE node_netstat_Ip6_InNoRoutes untyped -node_netstat_Ip6_InNoRoutes 5 # HELP node_netstat_Ip6_InOctets Statistic Ip6InOctets. # TYPE node_netstat_Ip6_InOctets untyped node_netstat_Ip6_InOctets 460 -# HELP node_netstat_Ip6_InReceives Statistic Ip6InReceives. -# TYPE node_netstat_Ip6_InReceives untyped -node_netstat_Ip6_InReceives 7 -# HELP node_netstat_Ip6_InTooBigErrors Statistic Ip6InTooBigErrors. -# TYPE node_netstat_Ip6_InTooBigErrors untyped -node_netstat_Ip6_InTooBigErrors 0 -# HELP node_netstat_Ip6_InTruncatedPkts Statistic Ip6InTruncatedPkts. -# TYPE node_netstat_Ip6_InTruncatedPkts untyped -node_netstat_Ip6_InTruncatedPkts 0 -# HELP node_netstat_Ip6_InUnknownProtos Statistic Ip6InUnknownProtos. -# TYPE node_netstat_Ip6_InUnknownProtos untyped -node_netstat_Ip6_InUnknownProtos 0 -# HELP node_netstat_Ip6_OutBcastOctets Statistic Ip6OutBcastOctets. -# TYPE node_netstat_Ip6_OutBcastOctets untyped -node_netstat_Ip6_OutBcastOctets 0 -# HELP node_netstat_Ip6_OutDiscards Statistic Ip6OutDiscards. -# TYPE node_netstat_Ip6_OutDiscards untyped -node_netstat_Ip6_OutDiscards 0 -# HELP node_netstat_Ip6_OutForwDatagrams Statistic Ip6OutForwDatagrams. -# TYPE node_netstat_Ip6_OutForwDatagrams untyped -node_netstat_Ip6_OutForwDatagrams 0 -# HELP node_netstat_Ip6_OutMcastOctets Statistic Ip6OutMcastOctets. -# TYPE node_netstat_Ip6_OutMcastOctets untyped -node_netstat_Ip6_OutMcastOctets 840 -# HELP node_netstat_Ip6_OutMcastPkts Statistic Ip6OutMcastPkts. -# TYPE node_netstat_Ip6_OutMcastPkts untyped -node_netstat_Ip6_OutMcastPkts 12 -# HELP node_netstat_Ip6_OutNoRoutes Statistic Ip6OutNoRoutes. -# TYPE node_netstat_Ip6_OutNoRoutes untyped -node_netstat_Ip6_OutNoRoutes 3003 # HELP node_netstat_Ip6_OutOctets Statistic Ip6OutOctets. # TYPE node_netstat_Ip6_OutOctets untyped node_netstat_Ip6_OutOctets 536 -# HELP node_netstat_Ip6_OutRequests Statistic Ip6OutRequests. -# TYPE node_netstat_Ip6_OutRequests untyped -node_netstat_Ip6_OutRequests 8 -# HELP node_netstat_Ip6_ReasmFails Statistic Ip6ReasmFails. -# TYPE node_netstat_Ip6_ReasmFails untyped -node_netstat_Ip6_ReasmFails 0 -# HELP node_netstat_Ip6_ReasmOKs Statistic Ip6ReasmOKs. -# TYPE node_netstat_Ip6_ReasmOKs untyped -node_netstat_Ip6_ReasmOKs 0 -# HELP node_netstat_Ip6_ReasmReqds Statistic Ip6ReasmReqds. -# TYPE node_netstat_Ip6_ReasmReqds untyped -node_netstat_Ip6_ReasmReqds 0 -# HELP node_netstat_Ip6_ReasmTimeout Statistic Ip6ReasmTimeout. -# TYPE node_netstat_Ip6_ReasmTimeout untyped -node_netstat_Ip6_ReasmTimeout 0 -# HELP node_netstat_IpExt_InBcastOctets Statistic IpExtInBcastOctets. -# TYPE node_netstat_IpExt_InBcastOctets untyped -node_netstat_IpExt_InBcastOctets 0 -# HELP node_netstat_IpExt_InBcastPkts Statistic IpExtInBcastPkts. -# TYPE node_netstat_IpExt_InBcastPkts untyped -node_netstat_IpExt_InBcastPkts 0 -# HELP node_netstat_IpExt_InMcastOctets Statistic IpExtInMcastOctets. -# TYPE node_netstat_IpExt_InMcastOctets untyped -node_netstat_IpExt_InMcastOctets 0 -# HELP node_netstat_IpExt_InMcastPkts Statistic IpExtInMcastPkts. -# TYPE node_netstat_IpExt_InMcastPkts untyped -node_netstat_IpExt_InMcastPkts 0 -# HELP node_netstat_IpExt_InNoRoutes Statistic IpExtInNoRoutes. -# TYPE node_netstat_IpExt_InNoRoutes untyped -node_netstat_IpExt_InNoRoutes 0 # HELP node_netstat_IpExt_InOctets Statistic IpExtInOctets. # TYPE node_netstat_IpExt_InOctets untyped node_netstat_IpExt_InOctets 6.28639697e+09 -# HELP node_netstat_IpExt_InTruncatedPkts Statistic IpExtInTruncatedPkts. -# TYPE node_netstat_IpExt_InTruncatedPkts untyped -node_netstat_IpExt_InTruncatedPkts 0 -# HELP node_netstat_IpExt_OutBcastOctets Statistic IpExtOutBcastOctets. -# TYPE node_netstat_IpExt_OutBcastOctets untyped -node_netstat_IpExt_OutBcastOctets 0 -# HELP node_netstat_IpExt_OutBcastPkts Statistic IpExtOutBcastPkts. -# TYPE node_netstat_IpExt_OutBcastPkts untyped -node_netstat_IpExt_OutBcastPkts 0 -# HELP node_netstat_IpExt_OutMcastOctets Statistic IpExtOutMcastOctets. -# TYPE node_netstat_IpExt_OutMcastOctets untyped -node_netstat_IpExt_OutMcastOctets 0 -# HELP node_netstat_IpExt_OutMcastPkts Statistic IpExtOutMcastPkts. -# TYPE node_netstat_IpExt_OutMcastPkts untyped -node_netstat_IpExt_OutMcastPkts 0 # HELP node_netstat_IpExt_OutOctets Statistic IpExtOutOctets. # TYPE node_netstat_IpExt_OutOctets untyped node_netstat_IpExt_OutOctets 2.786264347e+09 -# HELP node_netstat_Ip_DefaultTTL Statistic IpDefaultTTL. -# TYPE node_netstat_Ip_DefaultTTL untyped -node_netstat_Ip_DefaultTTL 64 -# HELP node_netstat_Ip_ForwDatagrams Statistic IpForwDatagrams. -# TYPE node_netstat_Ip_ForwDatagrams untyped -node_netstat_Ip_ForwDatagrams 397750 # HELP node_netstat_Ip_Forwarding Statistic IpForwarding. # TYPE node_netstat_Ip_Forwarding untyped node_netstat_Ip_Forwarding 1 -# HELP node_netstat_Ip_FragCreates Statistic IpFragCreates. -# TYPE node_netstat_Ip_FragCreates untyped -node_netstat_Ip_FragCreates 0 -# HELP node_netstat_Ip_FragFails Statistic IpFragFails. -# TYPE node_netstat_Ip_FragFails untyped -node_netstat_Ip_FragFails 0 -# HELP node_netstat_Ip_FragOKs Statistic IpFragOKs. -# TYPE node_netstat_Ip_FragOKs untyped -node_netstat_Ip_FragOKs 0 -# HELP node_netstat_Ip_InAddrErrors Statistic IpInAddrErrors. -# TYPE node_netstat_Ip_InAddrErrors untyped -node_netstat_Ip_InAddrErrors 25 -# HELP node_netstat_Ip_InDelivers Statistic IpInDelivers. -# TYPE node_netstat_Ip_InDelivers untyped -node_netstat_Ip_InDelivers 5.7340175e+07 -# HELP node_netstat_Ip_InDiscards Statistic IpInDiscards. -# TYPE node_netstat_Ip_InDiscards untyped -node_netstat_Ip_InDiscards 0 -# HELP node_netstat_Ip_InHdrErrors Statistic IpInHdrErrors. -# TYPE node_netstat_Ip_InHdrErrors untyped -node_netstat_Ip_InHdrErrors 0 -# HELP node_netstat_Ip_InReceives Statistic IpInReceives. -# TYPE node_netstat_Ip_InReceives untyped -node_netstat_Ip_InReceives 5.7740232e+07 -# HELP node_netstat_Ip_InUnknownProtos Statistic IpInUnknownProtos. -# TYPE node_netstat_Ip_InUnknownProtos untyped -node_netstat_Ip_InUnknownProtos 0 -# HELP node_netstat_Ip_OutDiscards Statistic IpOutDiscards. -# TYPE node_netstat_Ip_OutDiscards untyped -node_netstat_Ip_OutDiscards 0 -# HELP node_netstat_Ip_OutNoRoutes Statistic IpOutNoRoutes. -# TYPE node_netstat_Ip_OutNoRoutes untyped -node_netstat_Ip_OutNoRoutes 54 -# HELP node_netstat_Ip_OutRequests Statistic IpOutRequests. -# TYPE node_netstat_Ip_OutRequests untyped -node_netstat_Ip_OutRequests 5.5365537e+07 -# HELP node_netstat_Ip_ReasmFails Statistic IpReasmFails. -# TYPE node_netstat_Ip_ReasmFails untyped -node_netstat_Ip_ReasmFails 0 -# HELP node_netstat_Ip_ReasmOKs Statistic IpReasmOKs. -# TYPE node_netstat_Ip_ReasmOKs untyped -node_netstat_Ip_ReasmOKs 0 -# HELP node_netstat_Ip_ReasmReqds Statistic IpReasmReqds. -# TYPE node_netstat_Ip_ReasmReqds untyped -node_netstat_Ip_ReasmReqds 0 -# HELP node_netstat_Ip_ReasmTimeout Statistic IpReasmTimeout. -# TYPE node_netstat_Ip_ReasmTimeout untyped -node_netstat_Ip_ReasmTimeout 0 -# HELP node_netstat_TcpExt_ArpFilter Statistic TcpExtArpFilter. -# TYPE node_netstat_TcpExt_ArpFilter untyped -node_netstat_TcpExt_ArpFilter 0 -# HELP node_netstat_TcpExt_DelayedACKLocked Statistic TcpExtDelayedACKLocked. -# TYPE node_netstat_TcpExt_DelayedACKLocked untyped -node_netstat_TcpExt_DelayedACKLocked 17 -# HELP node_netstat_TcpExt_DelayedACKLost Statistic TcpExtDelayedACKLost. -# TYPE node_netstat_TcpExt_DelayedACKLost untyped -node_netstat_TcpExt_DelayedACKLost 9 -# HELP node_netstat_TcpExt_DelayedACKs Statistic TcpExtDelayedACKs. -# TYPE node_netstat_TcpExt_DelayedACKs untyped -node_netstat_TcpExt_DelayedACKs 102471 -# HELP node_netstat_TcpExt_EmbryonicRsts Statistic TcpExtEmbryonicRsts. -# TYPE node_netstat_TcpExt_EmbryonicRsts untyped -node_netstat_TcpExt_EmbryonicRsts 0 -# HELP node_netstat_TcpExt_IPReversePathFilter Statistic TcpExtIPReversePathFilter. -# TYPE node_netstat_TcpExt_IPReversePathFilter untyped -node_netstat_TcpExt_IPReversePathFilter 0 # HELP node_netstat_TcpExt_ListenDrops Statistic TcpExtListenDrops. # TYPE node_netstat_TcpExt_ListenDrops untyped node_netstat_TcpExt_ListenDrops 0 # HELP node_netstat_TcpExt_ListenOverflows Statistic TcpExtListenOverflows. # TYPE node_netstat_TcpExt_ListenOverflows untyped node_netstat_TcpExt_ListenOverflows 0 -# HELP node_netstat_TcpExt_LockDroppedIcmps Statistic TcpExtLockDroppedIcmps. -# TYPE node_netstat_TcpExt_LockDroppedIcmps untyped -node_netstat_TcpExt_LockDroppedIcmps 0 -# HELP node_netstat_TcpExt_OfoPruned Statistic TcpExtOfoPruned. -# TYPE node_netstat_TcpExt_OfoPruned untyped -node_netstat_TcpExt_OfoPruned 0 -# HELP node_netstat_TcpExt_OutOfWindowIcmps Statistic TcpExtOutOfWindowIcmps. -# TYPE node_netstat_TcpExt_OutOfWindowIcmps untyped -node_netstat_TcpExt_OutOfWindowIcmps 0 -# HELP node_netstat_TcpExt_PAWSActive Statistic TcpExtPAWSActive. -# TYPE node_netstat_TcpExt_PAWSActive untyped -node_netstat_TcpExt_PAWSActive 0 -# HELP node_netstat_TcpExt_PAWSEstab Statistic TcpExtPAWSEstab. -# TYPE node_netstat_TcpExt_PAWSEstab untyped -node_netstat_TcpExt_PAWSEstab 6 -# HELP node_netstat_TcpExt_PAWSPassive Statistic TcpExtPAWSPassive. -# TYPE node_netstat_TcpExt_PAWSPassive untyped -node_netstat_TcpExt_PAWSPassive 0 -# HELP node_netstat_TcpExt_PruneCalled Statistic TcpExtPruneCalled. -# TYPE node_netstat_TcpExt_PruneCalled untyped -node_netstat_TcpExt_PruneCalled 0 -# HELP node_netstat_TcpExt_RcvPruned Statistic TcpExtRcvPruned. -# TYPE node_netstat_TcpExt_RcvPruned untyped -node_netstat_TcpExt_RcvPruned 0 # HELP node_netstat_TcpExt_SyncookiesFailed Statistic TcpExtSyncookiesFailed. # TYPE node_netstat_TcpExt_SyncookiesFailed untyped node_netstat_TcpExt_SyncookiesFailed 2 @@ -1932,219 +2240,21 @@ node_netstat_TcpExt_SyncookiesRecv 0 # HELP node_netstat_TcpExt_SyncookiesSent Statistic TcpExtSyncookiesSent. # TYPE node_netstat_TcpExt_SyncookiesSent untyped node_netstat_TcpExt_SyncookiesSent 0 -# HELP node_netstat_TcpExt_TCPAbortFailed Statistic TcpExtTCPAbortFailed. -# TYPE node_netstat_TcpExt_TCPAbortFailed untyped -node_netstat_TcpExt_TCPAbortFailed 0 -# HELP node_netstat_TcpExt_TCPAbortOnClose Statistic TcpExtTCPAbortOnClose. -# TYPE node_netstat_TcpExt_TCPAbortOnClose untyped -node_netstat_TcpExt_TCPAbortOnClose 4 -# HELP node_netstat_TcpExt_TCPAbortOnData Statistic TcpExtTCPAbortOnData. -# TYPE node_netstat_TcpExt_TCPAbortOnData untyped -node_netstat_TcpExt_TCPAbortOnData 41 -# HELP node_netstat_TcpExt_TCPAbortOnLinger Statistic TcpExtTCPAbortOnLinger. -# TYPE node_netstat_TcpExt_TCPAbortOnLinger untyped -node_netstat_TcpExt_TCPAbortOnLinger 0 -# HELP node_netstat_TcpExt_TCPAbortOnMemory Statistic TcpExtTCPAbortOnMemory. -# TYPE node_netstat_TcpExt_TCPAbortOnMemory untyped -node_netstat_TcpExt_TCPAbortOnMemory 0 -# HELP node_netstat_TcpExt_TCPAbortOnTimeout Statistic TcpExtTCPAbortOnTimeout. -# TYPE node_netstat_TcpExt_TCPAbortOnTimeout untyped -node_netstat_TcpExt_TCPAbortOnTimeout 0 -# HELP node_netstat_TcpExt_TCPBacklogDrop Statistic TcpExtTCPBacklogDrop. -# TYPE node_netstat_TcpExt_TCPBacklogDrop untyped -node_netstat_TcpExt_TCPBacklogDrop 0 -# HELP node_netstat_TcpExt_TCPChallengeACK Statistic TcpExtTCPChallengeACK. -# TYPE node_netstat_TcpExt_TCPChallengeACK untyped -node_netstat_TcpExt_TCPChallengeACK 2 -# HELP node_netstat_TcpExt_TCPDSACKIgnoredNoUndo Statistic TcpExtTCPDSACKIgnoredNoUndo. -# TYPE node_netstat_TcpExt_TCPDSACKIgnoredNoUndo untyped -node_netstat_TcpExt_TCPDSACKIgnoredNoUndo 1 -# HELP node_netstat_TcpExt_TCPDSACKIgnoredOld Statistic TcpExtTCPDSACKIgnoredOld. -# TYPE node_netstat_TcpExt_TCPDSACKIgnoredOld untyped -node_netstat_TcpExt_TCPDSACKIgnoredOld 0 -# HELP node_netstat_TcpExt_TCPDSACKOfoRecv Statistic TcpExtTCPDSACKOfoRecv. -# TYPE node_netstat_TcpExt_TCPDSACKOfoRecv untyped -node_netstat_TcpExt_TCPDSACKOfoRecv 0 -# HELP node_netstat_TcpExt_TCPDSACKOfoSent Statistic TcpExtTCPDSACKOfoSent. -# TYPE node_netstat_TcpExt_TCPDSACKOfoSent untyped -node_netstat_TcpExt_TCPDSACKOfoSent 0 -# HELP node_netstat_TcpExt_TCPDSACKOldSent Statistic TcpExtTCPDSACKOldSent. -# TYPE node_netstat_TcpExt_TCPDSACKOldSent untyped -node_netstat_TcpExt_TCPDSACKOldSent 9 -# HELP node_netstat_TcpExt_TCPDSACKRecv Statistic TcpExtTCPDSACKRecv. -# TYPE node_netstat_TcpExt_TCPDSACKRecv untyped -node_netstat_TcpExt_TCPDSACKRecv 5 -# HELP node_netstat_TcpExt_TCPDSACKUndo Statistic TcpExtTCPDSACKUndo. -# TYPE node_netstat_TcpExt_TCPDSACKUndo untyped -node_netstat_TcpExt_TCPDSACKUndo 0 -# HELP node_netstat_TcpExt_TCPDeferAcceptDrop Statistic TcpExtTCPDeferAcceptDrop. -# TYPE node_netstat_TcpExt_TCPDeferAcceptDrop untyped -node_netstat_TcpExt_TCPDeferAcceptDrop 0 -# HELP node_netstat_TcpExt_TCPDirectCopyFromBacklog Statistic TcpExtTCPDirectCopyFromBacklog. -# TYPE node_netstat_TcpExt_TCPDirectCopyFromBacklog untyped -node_netstat_TcpExt_TCPDirectCopyFromBacklog 0 -# HELP node_netstat_TcpExt_TCPDirectCopyFromPrequeue Statistic TcpExtTCPDirectCopyFromPrequeue. -# TYPE node_netstat_TcpExt_TCPDirectCopyFromPrequeue untyped -node_netstat_TcpExt_TCPDirectCopyFromPrequeue 168808 -# HELP node_netstat_TcpExt_TCPFACKReorder Statistic TcpExtTCPFACKReorder. -# TYPE node_netstat_TcpExt_TCPFACKReorder untyped -node_netstat_TcpExt_TCPFACKReorder 0 -# HELP node_netstat_TcpExt_TCPFastRetrans Statistic TcpExtTCPFastRetrans. -# TYPE node_netstat_TcpExt_TCPFastRetrans untyped -node_netstat_TcpExt_TCPFastRetrans 1 -# HELP node_netstat_TcpExt_TCPForwardRetrans Statistic TcpExtTCPForwardRetrans. -# TYPE node_netstat_TcpExt_TCPForwardRetrans untyped -node_netstat_TcpExt_TCPForwardRetrans 0 -# HELP node_netstat_TcpExt_TCPFullUndo Statistic TcpExtTCPFullUndo. -# TYPE node_netstat_TcpExt_TCPFullUndo untyped -node_netstat_TcpExt_TCPFullUndo 0 -# HELP node_netstat_TcpExt_TCPHPAcks Statistic TcpExtTCPHPAcks. -# TYPE node_netstat_TcpExt_TCPHPAcks untyped -node_netstat_TcpExt_TCPHPAcks 3.744565e+06 -# HELP node_netstat_TcpExt_TCPHPHits Statistic TcpExtTCPHPHits. -# TYPE node_netstat_TcpExt_TCPHPHits untyped -node_netstat_TcpExt_TCPHPHits 4.471289e+06 -# HELP node_netstat_TcpExt_TCPHPHitsToUser Statistic TcpExtTCPHPHitsToUser. -# TYPE node_netstat_TcpExt_TCPHPHitsToUser untyped -node_netstat_TcpExt_TCPHPHitsToUser 26 -# HELP node_netstat_TcpExt_TCPLoss Statistic TcpExtTCPLoss. -# TYPE node_netstat_TcpExt_TCPLoss untyped -node_netstat_TcpExt_TCPLoss 0 -# HELP node_netstat_TcpExt_TCPLossFailures Statistic TcpExtTCPLossFailures. -# TYPE node_netstat_TcpExt_TCPLossFailures untyped -node_netstat_TcpExt_TCPLossFailures 0 -# HELP node_netstat_TcpExt_TCPLossUndo Statistic TcpExtTCPLossUndo. -# TYPE node_netstat_TcpExt_TCPLossUndo untyped -node_netstat_TcpExt_TCPLossUndo 48 -# HELP node_netstat_TcpExt_TCPLostRetransmit Statistic TcpExtTCPLostRetransmit. -# TYPE node_netstat_TcpExt_TCPLostRetransmit untyped -node_netstat_TcpExt_TCPLostRetransmit 0 -# HELP node_netstat_TcpExt_TCPMD5NotFound Statistic TcpExtTCPMD5NotFound. -# TYPE node_netstat_TcpExt_TCPMD5NotFound untyped -node_netstat_TcpExt_TCPMD5NotFound 0 -# HELP node_netstat_TcpExt_TCPMD5Unexpected Statistic TcpExtTCPMD5Unexpected. -# TYPE node_netstat_TcpExt_TCPMD5Unexpected untyped -node_netstat_TcpExt_TCPMD5Unexpected 0 -# HELP node_netstat_TcpExt_TCPMemoryPressures Statistic TcpExtTCPMemoryPressures. -# TYPE node_netstat_TcpExt_TCPMemoryPressures untyped -node_netstat_TcpExt_TCPMemoryPressures 0 -# HELP node_netstat_TcpExt_TCPMinTTLDrop Statistic TcpExtTCPMinTTLDrop. -# TYPE node_netstat_TcpExt_TCPMinTTLDrop untyped -node_netstat_TcpExt_TCPMinTTLDrop 0 -# HELP node_netstat_TcpExt_TCPPartialUndo Statistic TcpExtTCPPartialUndo. -# TYPE node_netstat_TcpExt_TCPPartialUndo untyped -node_netstat_TcpExt_TCPPartialUndo 0 -# HELP node_netstat_TcpExt_TCPPrequeueDropped Statistic TcpExtTCPPrequeueDropped. -# TYPE node_netstat_TcpExt_TCPPrequeueDropped untyped -node_netstat_TcpExt_TCPPrequeueDropped 0 -# HELP node_netstat_TcpExt_TCPPrequeued Statistic TcpExtTCPPrequeued. -# TYPE node_netstat_TcpExt_TCPPrequeued untyped -node_netstat_TcpExt_TCPPrequeued 80568 -# HELP node_netstat_TcpExt_TCPPureAcks Statistic TcpExtTCPPureAcks. -# TYPE node_netstat_TcpExt_TCPPureAcks untyped -node_netstat_TcpExt_TCPPureAcks 1.43394e+06 -# HELP node_netstat_TcpExt_TCPRcvCollapsed Statistic TcpExtTCPRcvCollapsed. -# TYPE node_netstat_TcpExt_TCPRcvCollapsed untyped -node_netstat_TcpExt_TCPRcvCollapsed 0 -# HELP node_netstat_TcpExt_TCPRenoFailures Statistic TcpExtTCPRenoFailures. -# TYPE node_netstat_TcpExt_TCPRenoFailures untyped -node_netstat_TcpExt_TCPRenoFailures 0 -# HELP node_netstat_TcpExt_TCPRenoRecovery Statistic TcpExtTCPRenoRecovery. -# TYPE node_netstat_TcpExt_TCPRenoRecovery untyped -node_netstat_TcpExt_TCPRenoRecovery 0 -# HELP node_netstat_TcpExt_TCPRenoRecoveryFail Statistic TcpExtTCPRenoRecoveryFail. -# TYPE node_netstat_TcpExt_TCPRenoRecoveryFail untyped -node_netstat_TcpExt_TCPRenoRecoveryFail 0 -# HELP node_netstat_TcpExt_TCPRenoReorder Statistic TcpExtTCPRenoReorder. -# TYPE node_netstat_TcpExt_TCPRenoReorder untyped -node_netstat_TcpExt_TCPRenoReorder 0 -# HELP node_netstat_TcpExt_TCPReqQFullDoCookies Statistic TcpExtTCPReqQFullDoCookies. -# TYPE node_netstat_TcpExt_TCPReqQFullDoCookies untyped -node_netstat_TcpExt_TCPReqQFullDoCookies 0 -# HELP node_netstat_TcpExt_TCPReqQFullDrop Statistic TcpExtTCPReqQFullDrop. -# TYPE node_netstat_TcpExt_TCPReqQFullDrop untyped -node_netstat_TcpExt_TCPReqQFullDrop 0 -# HELP node_netstat_TcpExt_TCPSACKDiscard Statistic TcpExtTCPSACKDiscard. -# TYPE node_netstat_TcpExt_TCPSACKDiscard untyped -node_netstat_TcpExt_TCPSACKDiscard 0 -# HELP node_netstat_TcpExt_TCPSACKReneging Statistic TcpExtTCPSACKReneging. -# TYPE node_netstat_TcpExt_TCPSACKReneging untyped -node_netstat_TcpExt_TCPSACKReneging 0 -# HELP node_netstat_TcpExt_TCPSACKReorder Statistic TcpExtTCPSACKReorder. -# TYPE node_netstat_TcpExt_TCPSACKReorder untyped -node_netstat_TcpExt_TCPSACKReorder 0 -# HELP node_netstat_TcpExt_TCPSYNChallenge Statistic TcpExtTCPSYNChallenge. -# TYPE node_netstat_TcpExt_TCPSYNChallenge untyped -node_netstat_TcpExt_TCPSYNChallenge 2 -# HELP node_netstat_TcpExt_TCPSackFailures Statistic TcpExtTCPSackFailures. -# TYPE node_netstat_TcpExt_TCPSackFailures untyped -node_netstat_TcpExt_TCPSackFailures 1 -# HELP node_netstat_TcpExt_TCPSackMerged Statistic TcpExtTCPSackMerged. -# TYPE node_netstat_TcpExt_TCPSackMerged untyped -node_netstat_TcpExt_TCPSackMerged 2 -# HELP node_netstat_TcpExt_TCPSackRecovery Statistic TcpExtTCPSackRecovery. -# TYPE node_netstat_TcpExt_TCPSackRecovery untyped -node_netstat_TcpExt_TCPSackRecovery 1 -# HELP node_netstat_TcpExt_TCPSackRecoveryFail Statistic TcpExtTCPSackRecoveryFail. -# TYPE node_netstat_TcpExt_TCPSackRecoveryFail untyped -node_netstat_TcpExt_TCPSackRecoveryFail 0 -# HELP node_netstat_TcpExt_TCPSackShiftFallback Statistic TcpExtTCPSackShiftFallback. -# TYPE node_netstat_TcpExt_TCPSackShiftFallback untyped -node_netstat_TcpExt_TCPSackShiftFallback 5 -# HELP node_netstat_TcpExt_TCPSackShifted Statistic TcpExtTCPSackShifted. -# TYPE node_netstat_TcpExt_TCPSackShifted untyped -node_netstat_TcpExt_TCPSackShifted 0 -# HELP node_netstat_TcpExt_TCPSchedulerFailed Statistic TcpExtTCPSchedulerFailed. -# TYPE node_netstat_TcpExt_TCPSchedulerFailed untyped -node_netstat_TcpExt_TCPSchedulerFailed 0 -# HELP node_netstat_TcpExt_TCPSlowStartRetrans Statistic TcpExtTCPSlowStartRetrans. -# TYPE node_netstat_TcpExt_TCPSlowStartRetrans untyped -node_netstat_TcpExt_TCPSlowStartRetrans 1 -# HELP node_netstat_TcpExt_TCPSpuriousRTOs Statistic TcpExtTCPSpuriousRTOs. -# TYPE node_netstat_TcpExt_TCPSpuriousRTOs untyped -node_netstat_TcpExt_TCPSpuriousRTOs 0 -# HELP node_netstat_TcpExt_TCPTSReorder Statistic TcpExtTCPTSReorder. -# TYPE node_netstat_TcpExt_TCPTSReorder untyped -node_netstat_TcpExt_TCPTSReorder 0 -# HELP node_netstat_TcpExt_TCPTimeWaitOverflow Statistic TcpExtTCPTimeWaitOverflow. -# TYPE node_netstat_TcpExt_TCPTimeWaitOverflow untyped -node_netstat_TcpExt_TCPTimeWaitOverflow 0 # HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts. # TYPE node_netstat_TcpExt_TCPTimeouts untyped node_netstat_TcpExt_TCPTimeouts 115 -# HELP node_netstat_TcpExt_TW Statistic TcpExtTW. -# TYPE node_netstat_TcpExt_TW untyped -node_netstat_TcpExt_TW 388812 -# HELP node_netstat_TcpExt_TWKilled Statistic TcpExtTWKilled. -# TYPE node_netstat_TcpExt_TWKilled untyped -node_netstat_TcpExt_TWKilled 0 -# HELP node_netstat_TcpExt_TWRecycled Statistic TcpExtTWRecycled. -# TYPE node_netstat_TcpExt_TWRecycled untyped -node_netstat_TcpExt_TWRecycled 0 # HELP node_netstat_Tcp_ActiveOpens Statistic TcpActiveOpens. # TYPE node_netstat_Tcp_ActiveOpens untyped node_netstat_Tcp_ActiveOpens 3556 -# HELP node_netstat_Tcp_AttemptFails Statistic TcpAttemptFails. -# TYPE node_netstat_Tcp_AttemptFails untyped -node_netstat_Tcp_AttemptFails 341 # HELP node_netstat_Tcp_CurrEstab Statistic TcpCurrEstab. # TYPE node_netstat_Tcp_CurrEstab untyped node_netstat_Tcp_CurrEstab 0 -# HELP node_netstat_Tcp_EstabResets Statistic TcpEstabResets. -# TYPE node_netstat_Tcp_EstabResets untyped -node_netstat_Tcp_EstabResets 161 -# HELP node_netstat_Tcp_InCsumErrors Statistic TcpInCsumErrors. -# TYPE node_netstat_Tcp_InCsumErrors untyped -node_netstat_Tcp_InCsumErrors 0 # HELP node_netstat_Tcp_InErrs Statistic TcpInErrs. # TYPE node_netstat_Tcp_InErrs untyped node_netstat_Tcp_InErrs 5 # HELP node_netstat_Tcp_InSegs Statistic TcpInSegs. # TYPE node_netstat_Tcp_InSegs untyped node_netstat_Tcp_InSegs 5.7252008e+07 -# HELP node_netstat_Tcp_MaxConn Statistic TcpMaxConn. -# TYPE node_netstat_Tcp_MaxConn untyped -node_netstat_Tcp_MaxConn -1 # HELP node_netstat_Tcp_OutRsts Statistic TcpOutRsts. # TYPE node_netstat_Tcp_OutRsts untyped node_netstat_Tcp_OutRsts 1003 @@ -2157,21 +2267,6 @@ node_netstat_Tcp_PassiveOpens 230 # HELP node_netstat_Tcp_RetransSegs Statistic TcpRetransSegs. # TYPE node_netstat_Tcp_RetransSegs untyped node_netstat_Tcp_RetransSegs 227 -# HELP node_netstat_Tcp_RtoAlgorithm Statistic TcpRtoAlgorithm. -# TYPE node_netstat_Tcp_RtoAlgorithm untyped -node_netstat_Tcp_RtoAlgorithm 1 -# HELP node_netstat_Tcp_RtoMax Statistic TcpRtoMax. -# TYPE node_netstat_Tcp_RtoMax untyped -node_netstat_Tcp_RtoMax 120000 -# HELP node_netstat_Tcp_RtoMin Statistic TcpRtoMin. -# TYPE node_netstat_Tcp_RtoMin untyped -node_netstat_Tcp_RtoMin 200 -# HELP node_netstat_Udp6_IgnoredMulti Statistic Udp6IgnoredMulti. -# TYPE node_netstat_Udp6_IgnoredMulti untyped -node_netstat_Udp6_IgnoredMulti 0 -# HELP node_netstat_Udp6_InCsumErrors Statistic Udp6InCsumErrors. -# TYPE node_netstat_Udp6_InCsumErrors untyped -node_netstat_Udp6_InCsumErrors 0 # HELP node_netstat_Udp6_InDatagrams Statistic Udp6InDatagrams. # TYPE node_netstat_Udp6_InDatagrams untyped node_netstat_Udp6_InDatagrams 0 @@ -2186,55 +2281,16 @@ node_netstat_Udp6_NoPorts 0 node_netstat_Udp6_OutDatagrams 0 # HELP node_netstat_Udp6_RcvbufErrors Statistic Udp6RcvbufErrors. # TYPE node_netstat_Udp6_RcvbufErrors untyped -node_netstat_Udp6_RcvbufErrors 0 +node_netstat_Udp6_RcvbufErrors 9 # HELP node_netstat_Udp6_SndbufErrors Statistic Udp6SndbufErrors. # TYPE node_netstat_Udp6_SndbufErrors untyped -node_netstat_Udp6_SndbufErrors 0 -# HELP node_netstat_UdpLite6_InCsumErrors Statistic UdpLite6InCsumErrors. -# TYPE node_netstat_UdpLite6_InCsumErrors untyped -node_netstat_UdpLite6_InCsumErrors 0 -# HELP node_netstat_UdpLite6_InDatagrams Statistic UdpLite6InDatagrams. -# TYPE node_netstat_UdpLite6_InDatagrams untyped -node_netstat_UdpLite6_InDatagrams 0 +node_netstat_Udp6_SndbufErrors 8 # HELP node_netstat_UdpLite6_InErrors Statistic UdpLite6InErrors. # TYPE node_netstat_UdpLite6_InErrors untyped node_netstat_UdpLite6_InErrors 0 -# HELP node_netstat_UdpLite6_NoPorts Statistic UdpLite6NoPorts. -# TYPE node_netstat_UdpLite6_NoPorts untyped -node_netstat_UdpLite6_NoPorts 0 -# HELP node_netstat_UdpLite6_OutDatagrams Statistic UdpLite6OutDatagrams. -# TYPE node_netstat_UdpLite6_OutDatagrams untyped -node_netstat_UdpLite6_OutDatagrams 0 -# HELP node_netstat_UdpLite6_RcvbufErrors Statistic UdpLite6RcvbufErrors. -# TYPE node_netstat_UdpLite6_RcvbufErrors untyped -node_netstat_UdpLite6_RcvbufErrors 0 -# HELP node_netstat_UdpLite6_SndbufErrors Statistic UdpLite6SndbufErrors. -# TYPE node_netstat_UdpLite6_SndbufErrors untyped -node_netstat_UdpLite6_SndbufErrors 0 -# HELP node_netstat_UdpLite_InCsumErrors Statistic UdpLiteInCsumErrors. -# TYPE node_netstat_UdpLite_InCsumErrors untyped -node_netstat_UdpLite_InCsumErrors 0 -# HELP node_netstat_UdpLite_InDatagrams Statistic UdpLiteInDatagrams. -# TYPE node_netstat_UdpLite_InDatagrams untyped -node_netstat_UdpLite_InDatagrams 0 # HELP node_netstat_UdpLite_InErrors Statistic UdpLiteInErrors. # TYPE node_netstat_UdpLite_InErrors untyped node_netstat_UdpLite_InErrors 0 -# HELP node_netstat_UdpLite_NoPorts Statistic UdpLiteNoPorts. -# TYPE node_netstat_UdpLite_NoPorts untyped -node_netstat_UdpLite_NoPorts 0 -# HELP node_netstat_UdpLite_OutDatagrams Statistic UdpLiteOutDatagrams. -# TYPE node_netstat_UdpLite_OutDatagrams untyped -node_netstat_UdpLite_OutDatagrams 0 -# HELP node_netstat_UdpLite_RcvbufErrors Statistic UdpLiteRcvbufErrors. -# TYPE node_netstat_UdpLite_RcvbufErrors untyped -node_netstat_UdpLite_RcvbufErrors 0 -# HELP node_netstat_UdpLite_SndbufErrors Statistic UdpLiteSndbufErrors. -# TYPE node_netstat_UdpLite_SndbufErrors untyped -node_netstat_UdpLite_SndbufErrors 0 -# HELP node_netstat_Udp_InCsumErrors Statistic UdpInCsumErrors. -# TYPE node_netstat_Udp_InCsumErrors untyped -node_netstat_Udp_InCsumErrors 0 # HELP node_netstat_Udp_InDatagrams Statistic UdpInDatagrams. # TYPE node_netstat_Udp_InDatagrams untyped node_netstat_Udp_InDatagrams 88542 @@ -2249,276 +2305,631 @@ node_netstat_Udp_NoPorts 120 node_netstat_Udp_OutDatagrams 53028 # HELP node_netstat_Udp_RcvbufErrors Statistic UdpRcvbufErrors. # TYPE node_netstat_Udp_RcvbufErrors untyped -node_netstat_Udp_RcvbufErrors 0 +node_netstat_Udp_RcvbufErrors 9 # HELP node_netstat_Udp_SndbufErrors Statistic UdpSndbufErrors. # TYPE node_netstat_Udp_SndbufErrors untyped -node_netstat_Udp_SndbufErrors 0 -# HELP node_network_receive_bytes Network device statistic receive_bytes. -# TYPE node_network_receive_bytes gauge -node_network_receive_bytes{device="docker0"} 6.4910168e+07 -node_network_receive_bytes{device="eth0"} 6.8210035552e+10 -node_network_receive_bytes{device="lo"} 4.35303245e+08 -node_network_receive_bytes{device="lxcbr0"} 0 -node_network_receive_bytes{device="tun0"} 1888 -node_network_receive_bytes{device="veth4B09XN"} 648 -node_network_receive_bytes{device="wlan0"} 1.0437182923e+10 -# HELP node_network_receive_compressed Network device statistic receive_compressed. -# TYPE node_network_receive_compressed gauge -node_network_receive_compressed{device="docker0"} 0 -node_network_receive_compressed{device="eth0"} 0 -node_network_receive_compressed{device="lo"} 0 -node_network_receive_compressed{device="lxcbr0"} 0 -node_network_receive_compressed{device="tun0"} 0 -node_network_receive_compressed{device="veth4B09XN"} 0 -node_network_receive_compressed{device="wlan0"} 0 -# HELP node_network_receive_drop Network device statistic receive_drop. -# TYPE node_network_receive_drop gauge -node_network_receive_drop{device="docker0"} 0 -node_network_receive_drop{device="eth0"} 0 -node_network_receive_drop{device="lo"} 0 -node_network_receive_drop{device="lxcbr0"} 0 -node_network_receive_drop{device="tun0"} 0 -node_network_receive_drop{device="veth4B09XN"} 0 -node_network_receive_drop{device="wlan0"} 0 -# HELP node_network_receive_errs Network device statistic receive_errs. -# TYPE node_network_receive_errs gauge -node_network_receive_errs{device="docker0"} 0 -node_network_receive_errs{device="eth0"} 0 -node_network_receive_errs{device="lo"} 0 -node_network_receive_errs{device="lxcbr0"} 0 -node_network_receive_errs{device="tun0"} 0 -node_network_receive_errs{device="veth4B09XN"} 0 -node_network_receive_errs{device="wlan0"} 0 -# HELP node_network_receive_fifo Network device statistic receive_fifo. -# TYPE node_network_receive_fifo gauge -node_network_receive_fifo{device="docker0"} 0 -node_network_receive_fifo{device="eth0"} 0 -node_network_receive_fifo{device="lo"} 0 -node_network_receive_fifo{device="lxcbr0"} 0 -node_network_receive_fifo{device="tun0"} 0 -node_network_receive_fifo{device="veth4B09XN"} 0 -node_network_receive_fifo{device="wlan0"} 0 -# HELP node_network_receive_frame Network device statistic receive_frame. -# TYPE node_network_receive_frame gauge -node_network_receive_frame{device="docker0"} 0 -node_network_receive_frame{device="eth0"} 0 -node_network_receive_frame{device="lo"} 0 -node_network_receive_frame{device="lxcbr0"} 0 -node_network_receive_frame{device="tun0"} 0 -node_network_receive_frame{device="veth4B09XN"} 0 -node_network_receive_frame{device="wlan0"} 0 -# HELP node_network_receive_multicast Network device statistic receive_multicast. -# TYPE node_network_receive_multicast gauge -node_network_receive_multicast{device="docker0"} 0 -node_network_receive_multicast{device="eth0"} 0 -node_network_receive_multicast{device="lo"} 0 -node_network_receive_multicast{device="lxcbr0"} 0 -node_network_receive_multicast{device="tun0"} 0 -node_network_receive_multicast{device="veth4B09XN"} 0 -node_network_receive_multicast{device="wlan0"} 0 -# HELP node_network_receive_packets Network device statistic receive_packets. -# TYPE node_network_receive_packets gauge -node_network_receive_packets{device="docker0"} 1.065585e+06 -node_network_receive_packets{device="eth0"} 5.20993275e+08 -node_network_receive_packets{device="lo"} 1.832522e+06 -node_network_receive_packets{device="lxcbr0"} 0 -node_network_receive_packets{device="tun0"} 24 -node_network_receive_packets{device="veth4B09XN"} 8 -node_network_receive_packets{device="wlan0"} 1.3899359e+07 -# HELP node_network_transmit_bytes Network device statistic transmit_bytes. -# TYPE node_network_transmit_bytes gauge -node_network_transmit_bytes{device="docker0"} 2.681662018e+09 -node_network_transmit_bytes{device="eth0"} 9.315587528e+09 -node_network_transmit_bytes{device="lo"} 4.35303245e+08 -node_network_transmit_bytes{device="lxcbr0"} 2.630299e+06 -node_network_transmit_bytes{device="tun0"} 67120 -node_network_transmit_bytes{device="veth4B09XN"} 1.943284e+06 -node_network_transmit_bytes{device="wlan0"} 2.85164936e+09 -# HELP node_network_transmit_compressed Network device statistic transmit_compressed. -# TYPE node_network_transmit_compressed gauge -node_network_transmit_compressed{device="docker0"} 0 -node_network_transmit_compressed{device="eth0"} 0 -node_network_transmit_compressed{device="lo"} 0 -node_network_transmit_compressed{device="lxcbr0"} 0 -node_network_transmit_compressed{device="tun0"} 0 -node_network_transmit_compressed{device="veth4B09XN"} 0 -node_network_transmit_compressed{device="wlan0"} 0 -# HELP node_network_transmit_drop Network device statistic transmit_drop. -# TYPE node_network_transmit_drop gauge -node_network_transmit_drop{device="docker0"} 0 -node_network_transmit_drop{device="eth0"} 0 -node_network_transmit_drop{device="lo"} 0 -node_network_transmit_drop{device="lxcbr0"} 0 -node_network_transmit_drop{device="tun0"} 0 -node_network_transmit_drop{device="veth4B09XN"} 0 -node_network_transmit_drop{device="wlan0"} 0 -# HELP node_network_transmit_errs Network device statistic transmit_errs. -# TYPE node_network_transmit_errs gauge -node_network_transmit_errs{device="docker0"} 0 -node_network_transmit_errs{device="eth0"} 0 -node_network_transmit_errs{device="lo"} 0 -node_network_transmit_errs{device="lxcbr0"} 0 -node_network_transmit_errs{device="tun0"} 0 -node_network_transmit_errs{device="veth4B09XN"} 0 -node_network_transmit_errs{device="wlan0"} 0 -# HELP node_network_transmit_fifo Network device statistic transmit_fifo. -# TYPE node_network_transmit_fifo gauge -node_network_transmit_fifo{device="docker0"} 0 -node_network_transmit_fifo{device="eth0"} 0 -node_network_transmit_fifo{device="lo"} 0 -node_network_transmit_fifo{device="lxcbr0"} 0 -node_network_transmit_fifo{device="tun0"} 0 -node_network_transmit_fifo{device="veth4B09XN"} 0 -node_network_transmit_fifo{device="wlan0"} 0 -# HELP node_network_transmit_frame Network device statistic transmit_frame. -# TYPE node_network_transmit_frame gauge -node_network_transmit_frame{device="docker0"} 0 -node_network_transmit_frame{device="eth0"} 0 -node_network_transmit_frame{device="lo"} 0 -node_network_transmit_frame{device="lxcbr0"} 0 -node_network_transmit_frame{device="tun0"} 0 -node_network_transmit_frame{device="veth4B09XN"} 0 -node_network_transmit_frame{device="wlan0"} 0 -# HELP node_network_transmit_multicast Network device statistic transmit_multicast. -# TYPE node_network_transmit_multicast gauge -node_network_transmit_multicast{device="docker0"} 0 -node_network_transmit_multicast{device="eth0"} 0 -node_network_transmit_multicast{device="lo"} 0 -node_network_transmit_multicast{device="lxcbr0"} 0 -node_network_transmit_multicast{device="tun0"} 0 -node_network_transmit_multicast{device="veth4B09XN"} 0 -node_network_transmit_multicast{device="wlan0"} 0 -# HELP node_network_transmit_packets Network device statistic transmit_packets. -# TYPE node_network_transmit_packets gauge -node_network_transmit_packets{device="docker0"} 1.929779e+06 -node_network_transmit_packets{device="eth0"} 4.3451486e+07 -node_network_transmit_packets{device="lo"} 1.832522e+06 -node_network_transmit_packets{device="lxcbr0"} 28339 -node_network_transmit_packets{device="tun0"} 934 -node_network_transmit_packets{device="veth4B09XN"} 10640 -node_network_transmit_packets{device="wlan0"} 1.17262e+07 +node_netstat_Udp_SndbufErrors 8 +# HELP node_network_address_assign_type address_assign_type value of /sys/class/net/. +# TYPE node_network_address_assign_type gauge +node_network_address_assign_type{device="bond0"} 3 +node_network_address_assign_type{device="eth0"} 3 +# HELP node_network_carrier carrier value of /sys/class/net/. +# TYPE node_network_carrier gauge +node_network_carrier{device="bond0"} 1 +node_network_carrier{device="eth0"} 1 +# HELP node_network_carrier_changes_total carrier_changes_total value of /sys/class/net/. +# TYPE node_network_carrier_changes_total counter +node_network_carrier_changes_total{device="bond0"} 2 +node_network_carrier_changes_total{device="eth0"} 2 +# HELP node_network_carrier_down_changes_total carrier_down_changes_total value of /sys/class/net/. +# TYPE node_network_carrier_down_changes_total counter +node_network_carrier_down_changes_total{device="bond0"} 1 +node_network_carrier_down_changes_total{device="eth0"} 1 +# HELP node_network_carrier_up_changes_total carrier_up_changes_total value of /sys/class/net/. +# TYPE node_network_carrier_up_changes_total counter +node_network_carrier_up_changes_total{device="bond0"} 1 +node_network_carrier_up_changes_total{device="eth0"} 1 +# HELP node_network_device_id device_id value of /sys/class/net/. +# TYPE node_network_device_id gauge +node_network_device_id{device="bond0"} 32 +node_network_device_id{device="eth0"} 32 +# HELP node_network_dormant dormant value of /sys/class/net/. +# TYPE node_network_dormant gauge +node_network_dormant{device="bond0"} 1 +node_network_dormant{device="eth0"} 1 +# HELP node_network_flags flags value of /sys/class/net/. +# TYPE node_network_flags gauge +node_network_flags{device="bond0"} 4867 +node_network_flags{device="eth0"} 4867 +# HELP node_network_iface_id iface_id value of /sys/class/net/. +# TYPE node_network_iface_id gauge +node_network_iface_id{device="bond0"} 2 +node_network_iface_id{device="eth0"} 2 +# HELP node_network_iface_link iface_link value of /sys/class/net/. +# TYPE node_network_iface_link gauge +node_network_iface_link{device="bond0"} 2 +node_network_iface_link{device="eth0"} 2 +# HELP node_network_iface_link_mode iface_link_mode value of /sys/class/net/. +# TYPE node_network_iface_link_mode gauge +node_network_iface_link_mode{device="bond0"} 1 +node_network_iface_link_mode{device="eth0"} 1 +# HELP node_network_info Non-numeric data from /sys/class/net/, value is always 1. +# TYPE node_network_info gauge +node_network_info{address="01:01:01:01:01:01",broadcast="ff:ff:ff:ff:ff:ff",device="bond0",duplex="full",ifalias="",operstate="up"} 1 +node_network_info{address="01:01:01:01:01:01",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 +# HELP node_network_mtu_bytes mtu_bytes value of /sys/class/net/. +# TYPE node_network_mtu_bytes gauge +node_network_mtu_bytes{device="bond0"} 1500 +node_network_mtu_bytes{device="eth0"} 1500 +# HELP node_network_name_assign_type name_assign_type value of /sys/class/net/. +# TYPE node_network_name_assign_type gauge +node_network_name_assign_type{device="bond0"} 2 +node_network_name_assign_type{device="eth0"} 2 +# HELP node_network_net_dev_group net_dev_group value of /sys/class/net/. +# TYPE node_network_net_dev_group gauge +node_network_net_dev_group{device="bond0"} 0 +node_network_net_dev_group{device="eth0"} 0 +# HELP node_network_protocol_type protocol_type value of /sys/class/net/. +# TYPE node_network_protocol_type gauge +node_network_protocol_type{device="bond0"} 1 +node_network_protocol_type{device="eth0"} 1 +# HELP node_network_receive_bytes_total Network device statistic receive_bytes. +# TYPE node_network_receive_bytes_total counter +node_network_receive_bytes_total{device="docker0"} 6.4910168e+07 +node_network_receive_bytes_total{device="eth0"} 6.8210035552e+10 +node_network_receive_bytes_total{device="flannel.1"} 1.8144009813e+10 +node_network_receive_bytes_total{device="ibr10:30"} 0 +node_network_receive_bytes_total{device="lo"} 4.35303245e+08 +node_network_receive_bytes_total{device="lxcbr0"} 0 +node_network_receive_bytes_total{device="tun0"} 1888 +node_network_receive_bytes_total{device="veth4B09XN"} 648 +node_network_receive_bytes_total{device="wlan0"} 1.0437182923e+10 +node_network_receive_bytes_total{device="💩0"} 5.7750104e+07 +# HELP node_network_receive_compressed_total Network device statistic receive_compressed. +# TYPE node_network_receive_compressed_total counter +node_network_receive_compressed_total{device="docker0"} 0 +node_network_receive_compressed_total{device="eth0"} 0 +node_network_receive_compressed_total{device="flannel.1"} 0 +node_network_receive_compressed_total{device="ibr10:30"} 0 +node_network_receive_compressed_total{device="lo"} 0 +node_network_receive_compressed_total{device="lxcbr0"} 0 +node_network_receive_compressed_total{device="tun0"} 0 +node_network_receive_compressed_total{device="veth4B09XN"} 0 +node_network_receive_compressed_total{device="wlan0"} 0 +node_network_receive_compressed_total{device="💩0"} 0 +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{device="docker0"} 0 +node_network_receive_drop_total{device="eth0"} 0 +node_network_receive_drop_total{device="flannel.1"} 0 +node_network_receive_drop_total{device="ibr10:30"} 0 +node_network_receive_drop_total{device="lo"} 0 +node_network_receive_drop_total{device="lxcbr0"} 0 +node_network_receive_drop_total{device="tun0"} 0 +node_network_receive_drop_total{device="veth4B09XN"} 0 +node_network_receive_drop_total{device="wlan0"} 0 +node_network_receive_drop_total{device="💩0"} 0 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{device="docker0"} 0 +node_network_receive_errs_total{device="eth0"} 0 +node_network_receive_errs_total{device="flannel.1"} 0 +node_network_receive_errs_total{device="ibr10:30"} 0 +node_network_receive_errs_total{device="lo"} 0 +node_network_receive_errs_total{device="lxcbr0"} 0 +node_network_receive_errs_total{device="tun0"} 0 +node_network_receive_errs_total{device="veth4B09XN"} 0 +node_network_receive_errs_total{device="wlan0"} 0 +node_network_receive_errs_total{device="💩0"} 0 +# HELP node_network_receive_fifo_total Network device statistic receive_fifo. +# TYPE node_network_receive_fifo_total counter +node_network_receive_fifo_total{device="docker0"} 0 +node_network_receive_fifo_total{device="eth0"} 0 +node_network_receive_fifo_total{device="flannel.1"} 0 +node_network_receive_fifo_total{device="ibr10:30"} 0 +node_network_receive_fifo_total{device="lo"} 0 +node_network_receive_fifo_total{device="lxcbr0"} 0 +node_network_receive_fifo_total{device="tun0"} 0 +node_network_receive_fifo_total{device="veth4B09XN"} 0 +node_network_receive_fifo_total{device="wlan0"} 0 +node_network_receive_fifo_total{device="💩0"} 0 +# HELP node_network_receive_frame_total Network device statistic receive_frame. +# TYPE node_network_receive_frame_total counter +node_network_receive_frame_total{device="docker0"} 0 +node_network_receive_frame_total{device="eth0"} 0 +node_network_receive_frame_total{device="flannel.1"} 0 +node_network_receive_frame_total{device="ibr10:30"} 0 +node_network_receive_frame_total{device="lo"} 0 +node_network_receive_frame_total{device="lxcbr0"} 0 +node_network_receive_frame_total{device="tun0"} 0 +node_network_receive_frame_total{device="veth4B09XN"} 0 +node_network_receive_frame_total{device="wlan0"} 0 +node_network_receive_frame_total{device="💩0"} 0 +# HELP node_network_receive_multicast_total Network device statistic receive_multicast. +# TYPE node_network_receive_multicast_total counter +node_network_receive_multicast_total{device="docker0"} 0 +node_network_receive_multicast_total{device="eth0"} 0 +node_network_receive_multicast_total{device="flannel.1"} 0 +node_network_receive_multicast_total{device="ibr10:30"} 0 +node_network_receive_multicast_total{device="lo"} 0 +node_network_receive_multicast_total{device="lxcbr0"} 0 +node_network_receive_multicast_total{device="tun0"} 0 +node_network_receive_multicast_total{device="veth4B09XN"} 0 +node_network_receive_multicast_total{device="wlan0"} 0 +node_network_receive_multicast_total{device="💩0"} 72 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +node_network_receive_packets_total{device="docker0"} 1.065585e+06 +node_network_receive_packets_total{device="eth0"} 5.20993275e+08 +node_network_receive_packets_total{device="flannel.1"} 2.28499337e+08 +node_network_receive_packets_total{device="ibr10:30"} 0 +node_network_receive_packets_total{device="lo"} 1.832522e+06 +node_network_receive_packets_total{device="lxcbr0"} 0 +node_network_receive_packets_total{device="tun0"} 24 +node_network_receive_packets_total{device="veth4B09XN"} 8 +node_network_receive_packets_total{device="wlan0"} 1.3899359e+07 +node_network_receive_packets_total{device="💩0"} 105557 +# HELP node_network_speed_bytes speed_bytes value of /sys/class/net/. +# TYPE node_network_speed_bytes gauge +node_network_speed_bytes{device="eth0"} 1.25e+08 +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +node_network_transmit_bytes_total{device="docker0"} 2.681662018e+09 +node_network_transmit_bytes_total{device="eth0"} 9.315587528e+09 +node_network_transmit_bytes_total{device="flannel.1"} 2.0758990068e+10 +node_network_transmit_bytes_total{device="ibr10:30"} 0 +node_network_transmit_bytes_total{device="lo"} 4.35303245e+08 +node_network_transmit_bytes_total{device="lxcbr0"} 2.630299e+06 +node_network_transmit_bytes_total{device="tun0"} 67120 +node_network_transmit_bytes_total{device="veth4B09XN"} 1.943284e+06 +node_network_transmit_bytes_total{device="wlan0"} 2.85164936e+09 +node_network_transmit_bytes_total{device="💩0"} 4.04570255e+08 +# HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. +# TYPE node_network_transmit_carrier_total counter +node_network_transmit_carrier_total{device="docker0"} 0 +node_network_transmit_carrier_total{device="eth0"} 0 +node_network_transmit_carrier_total{device="flannel.1"} 0 +node_network_transmit_carrier_total{device="ibr10:30"} 0 +node_network_transmit_carrier_total{device="lo"} 0 +node_network_transmit_carrier_total{device="lxcbr0"} 0 +node_network_transmit_carrier_total{device="tun0"} 0 +node_network_transmit_carrier_total{device="veth4B09XN"} 0 +node_network_transmit_carrier_total{device="wlan0"} 0 +node_network_transmit_carrier_total{device="💩0"} 0 +# HELP node_network_transmit_colls_total Network device statistic transmit_colls. +# TYPE node_network_transmit_colls_total counter +node_network_transmit_colls_total{device="docker0"} 0 +node_network_transmit_colls_total{device="eth0"} 0 +node_network_transmit_colls_total{device="flannel.1"} 0 +node_network_transmit_colls_total{device="ibr10:30"} 0 +node_network_transmit_colls_total{device="lo"} 0 +node_network_transmit_colls_total{device="lxcbr0"} 0 +node_network_transmit_colls_total{device="tun0"} 0 +node_network_transmit_colls_total{device="veth4B09XN"} 0 +node_network_transmit_colls_total{device="wlan0"} 0 +node_network_transmit_colls_total{device="💩0"} 0 +# HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. +# TYPE node_network_transmit_compressed_total counter +node_network_transmit_compressed_total{device="docker0"} 0 +node_network_transmit_compressed_total{device="eth0"} 0 +node_network_transmit_compressed_total{device="flannel.1"} 0 +node_network_transmit_compressed_total{device="ibr10:30"} 0 +node_network_transmit_compressed_total{device="lo"} 0 +node_network_transmit_compressed_total{device="lxcbr0"} 0 +node_network_transmit_compressed_total{device="tun0"} 0 +node_network_transmit_compressed_total{device="veth4B09XN"} 0 +node_network_transmit_compressed_total{device="wlan0"} 0 +node_network_transmit_compressed_total{device="💩0"} 0 +# HELP node_network_transmit_drop_total Network device statistic transmit_drop. +# TYPE node_network_transmit_drop_total counter +node_network_transmit_drop_total{device="docker0"} 0 +node_network_transmit_drop_total{device="eth0"} 0 +node_network_transmit_drop_total{device="flannel.1"} 64 +node_network_transmit_drop_total{device="ibr10:30"} 0 +node_network_transmit_drop_total{device="lo"} 0 +node_network_transmit_drop_total{device="lxcbr0"} 0 +node_network_transmit_drop_total{device="tun0"} 0 +node_network_transmit_drop_total{device="veth4B09XN"} 0 +node_network_transmit_drop_total{device="wlan0"} 0 +node_network_transmit_drop_total{device="💩0"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{device="docker0"} 0 +node_network_transmit_errs_total{device="eth0"} 0 +node_network_transmit_errs_total{device="flannel.1"} 0 +node_network_transmit_errs_total{device="ibr10:30"} 0 +node_network_transmit_errs_total{device="lo"} 0 +node_network_transmit_errs_total{device="lxcbr0"} 0 +node_network_transmit_errs_total{device="tun0"} 0 +node_network_transmit_errs_total{device="veth4B09XN"} 0 +node_network_transmit_errs_total{device="wlan0"} 0 +node_network_transmit_errs_total{device="💩0"} 0 +# HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. +# TYPE node_network_transmit_fifo_total counter +node_network_transmit_fifo_total{device="docker0"} 0 +node_network_transmit_fifo_total{device="eth0"} 0 +node_network_transmit_fifo_total{device="flannel.1"} 0 +node_network_transmit_fifo_total{device="ibr10:30"} 0 +node_network_transmit_fifo_total{device="lo"} 0 +node_network_transmit_fifo_total{device="lxcbr0"} 0 +node_network_transmit_fifo_total{device="tun0"} 0 +node_network_transmit_fifo_total{device="veth4B09XN"} 0 +node_network_transmit_fifo_total{device="wlan0"} 0 +node_network_transmit_fifo_total{device="💩0"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +node_network_transmit_packets_total{device="docker0"} 1.929779e+06 +node_network_transmit_packets_total{device="eth0"} 4.3451486e+07 +node_network_transmit_packets_total{device="flannel.1"} 2.58369223e+08 +node_network_transmit_packets_total{device="ibr10:30"} 0 +node_network_transmit_packets_total{device="lo"} 1.832522e+06 +node_network_transmit_packets_total{device="lxcbr0"} 28339 +node_network_transmit_packets_total{device="tun0"} 934 +node_network_transmit_packets_total{device="veth4B09XN"} 10640 +node_network_transmit_packets_total{device="wlan0"} 1.17262e+07 +node_network_transmit_packets_total{device="💩0"} 304261 +# HELP node_network_transmit_queue_length transmit_queue_length value of /sys/class/net/. +# TYPE node_network_transmit_queue_length gauge +node_network_transmit_queue_length{device="bond0"} 1000 +node_network_transmit_queue_length{device="eth0"} 1000 +# HELP node_network_up Value is 1 if operstate is 'up', 0 otherwise. +# TYPE node_network_up gauge +node_network_up{device="bond0"} 1 +node_network_up{device="eth0"} 1 # HELP node_nf_conntrack_entries Number of currently allocated flow entries for connection tracking. # TYPE node_nf_conntrack_entries gauge node_nf_conntrack_entries 123 # HELP node_nf_conntrack_entries_limit Maximum size of connection tracking table. # TYPE node_nf_conntrack_entries_limit gauge node_nf_conntrack_entries_limit 65536 -# HELP node_nfs_net_connections Number of connections at the network layer. -# TYPE node_nfs_net_connections counter -node_nfs_net_connections{protocol="tcp"} 45 -# HELP node_nfs_net_reads Number of reads at the network layer. -# TYPE node_nfs_net_reads counter -node_nfs_net_reads{protocol="tcp"} 69 -node_nfs_net_reads{protocol="udp"} 70 -# HELP node_nfs_procedures Number of NFS procedures invoked. -# TYPE node_nfs_procedures counter -node_nfs_procedures{procedure="access",version="3"} 1.17661341e+08 -node_nfs_procedures{procedure="access",version="4"} 58 -node_nfs_procedures{procedure="close",version="4"} 28 -node_nfs_procedures{procedure="commit",version="3"} 23729 -node_nfs_procedures{procedure="commit",version="4"} 83 -node_nfs_procedures{procedure="create",version="2"} 52 -node_nfs_procedures{procedure="create",version="3"} 2.993289e+06 -node_nfs_procedures{procedure="create",version="4"} 15 -node_nfs_procedures{procedure="create_session",version="4"} 32 -node_nfs_procedures{procedure="delegreturn",version="4"} 97 -node_nfs_procedures{procedure="destroy_session",version="4"} 67 -node_nfs_procedures{procedure="exchange_id",version="4"} 58 -node_nfs_procedures{procedure="fs_locations",version="4"} 32 -node_nfs_procedures{procedure="fsid_present",version="4"} 11 -node_nfs_procedures{procedure="fsinfo",version="3"} 2 -node_nfs_procedures{procedure="fsinfo",version="4"} 68 -node_nfs_procedures{procedure="fsstat",version="3"} 13332 -node_nfs_procedures{procedure="get_lease_time",version="4"} 28 -node_nfs_procedures{procedure="getacl",version="4"} 36 -node_nfs_procedures{procedure="getattr",version="2"} 57 -node_nfs_procedures{procedure="getattr",version="3"} 1.061909262e+09 -node_nfs_procedures{procedure="getattr",version="4"} 88 -node_nfs_procedures{procedure="getdeviceinfo",version="4"} 1 -node_nfs_procedures{procedure="layoutcommit",version="4"} 26 -node_nfs_procedures{procedure="layoutget",version="4"} 90 -node_nfs_procedures{procedure="layoutreturn",version="4"} 0 -node_nfs_procedures{procedure="link",version="2"} 17 -node_nfs_procedures{procedure="link",version="3"} 0 -node_nfs_procedures{procedure="link",version="4"} 21 -node_nfs_procedures{procedure="lock",version="4"} 39 -node_nfs_procedures{procedure="lockt",version="4"} 68 -node_nfs_procedures{procedure="locku",version="4"} 59 -node_nfs_procedures{procedure="lookup",version="2"} 71 -node_nfs_procedures{procedure="lookup",version="3"} 4.077635e+06 -node_nfs_procedures{procedure="lookup",version="4"} 29 -node_nfs_procedures{procedure="lookup_root",version="4"} 74 -node_nfs_procedures{procedure="mkdir",version="2"} 50 -node_nfs_procedures{procedure="mkdir",version="3"} 590 -node_nfs_procedures{procedure="mknod",version="3"} 0 -node_nfs_procedures{procedure="null",version="2"} 16 -node_nfs_procedures{procedure="null",version="3"} 0 -node_nfs_procedures{procedure="null",version="4"} 98 -node_nfs_procedures{procedure="open",version="4"} 85 -node_nfs_procedures{procedure="open_confirm",version="4"} 23 -node_nfs_procedures{procedure="open_downgrade",version="4"} 1 -node_nfs_procedures{procedure="open_noattr",version="4"} 24 -node_nfs_procedures{procedure="pathconf",version="3"} 1 -node_nfs_procedures{procedure="pathconf",version="4"} 53 -node_nfs_procedures{procedure="read",version="2"} 45 -node_nfs_procedures{procedure="read",version="3"} 2.9391916e+07 -node_nfs_procedures{procedure="read",version="4"} 51 -node_nfs_procedures{procedure="readdir",version="2"} 70 -node_nfs_procedures{procedure="readdir",version="3"} 3983 -node_nfs_procedures{procedure="readdir",version="4"} 66 -node_nfs_procedures{procedure="readdirplus",version="3"} 92385 -node_nfs_procedures{procedure="readlink",version="2"} 73 -node_nfs_procedures{procedure="readlink",version="3"} 5 -node_nfs_procedures{procedure="readlink",version="4"} 54 -node_nfs_procedures{procedure="reclaim_complete",version="4"} 35 -node_nfs_procedures{procedure="release_lockowner",version="4"} 85 -node_nfs_procedures{procedure="remove",version="2"} 83 -node_nfs_procedures{procedure="remove",version="3"} 7815 -node_nfs_procedures{procedure="remove",version="4"} 69 -node_nfs_procedures{procedure="rename",version="2"} 61 -node_nfs_procedures{procedure="rename",version="3"} 1130 -node_nfs_procedures{procedure="rename",version="4"} 96 -node_nfs_procedures{procedure="renew",version="4"} 83 -node_nfs_procedures{procedure="rmdir",version="2"} 23 -node_nfs_procedures{procedure="rmdir",version="3"} 15 -node_nfs_procedures{procedure="root",version="2"} 52 -node_nfs_procedures{procedure="secinfo",version="4"} 81 -node_nfs_procedures{procedure="sequence",version="4"} 13 -node_nfs_procedures{procedure="server_caps",version="4"} 56 -node_nfs_procedures{procedure="setacl",version="4"} 49 -node_nfs_procedures{procedure="setattr",version="2"} 74 -node_nfs_procedures{procedure="setattr",version="3"} 48906 -node_nfs_procedures{procedure="setattr",version="4"} 73 -node_nfs_procedures{procedure="setclientid",version="4"} 12 -node_nfs_procedures{procedure="setclientid_confirm",version="4"} 84 -node_nfs_procedures{procedure="statfs",version="2"} 82 -node_nfs_procedures{procedure="statfs",version="4"} 86 -node_nfs_procedures{procedure="symlink",version="2"} 53 -node_nfs_procedures{procedure="symlink",version="3"} 0 -node_nfs_procedures{procedure="symlink",version="4"} 84 -node_nfs_procedures{procedure="write",version="2"} 0 -node_nfs_procedures{procedure="write",version="3"} 2.570425e+06 -node_nfs_procedures{procedure="write",version="4"} 54 -node_nfs_procedures{procedure="writecache",version="2"} 86 -# HELP node_nfs_rpc_authentication_refreshes Number of RPC authentication refreshes performed. -# TYPE node_nfs_rpc_authentication_refreshes counter -node_nfs_rpc_authentication_refreshes 1.218815394e+09 -# HELP node_nfs_rpc_operations Number of RPCs performed. -# TYPE node_nfs_rpc_operations counter -node_nfs_rpc_operations 1.218785755e+09 -# HELP node_nfs_rpc_retransmissions Number of RPC transmissions performed. -# TYPE node_nfs_rpc_retransmissions counter -node_nfs_rpc_retransmissions 374636 +# HELP node_nf_conntrack_stat_drop Number of packets dropped due to conntrack failure. +# TYPE node_nf_conntrack_stat_drop gauge +node_nf_conntrack_stat_drop 0 +# HELP node_nf_conntrack_stat_early_drop Number of dropped conntrack entries to make room for new ones, if maximum table size was reached. +# TYPE node_nf_conntrack_stat_early_drop gauge +node_nf_conntrack_stat_early_drop 0 +# HELP node_nf_conntrack_stat_found Number of searched entries which were successful. +# TYPE node_nf_conntrack_stat_found gauge +node_nf_conntrack_stat_found 0 +# HELP node_nf_conntrack_stat_ignore Number of packets seen which are already connected to a conntrack entry. +# TYPE node_nf_conntrack_stat_ignore gauge +node_nf_conntrack_stat_ignore 89738 +# HELP node_nf_conntrack_stat_insert Number of entries inserted into the list. +# TYPE node_nf_conntrack_stat_insert gauge +node_nf_conntrack_stat_insert 0 +# HELP node_nf_conntrack_stat_insert_failed Number of entries for which list insertion was attempted but failed. +# TYPE node_nf_conntrack_stat_insert_failed gauge +node_nf_conntrack_stat_insert_failed 0 +# HELP node_nf_conntrack_stat_invalid Number of packets seen which can not be tracked. +# TYPE node_nf_conntrack_stat_invalid gauge +node_nf_conntrack_stat_invalid 53 +# HELP node_nf_conntrack_stat_search_restart Number of conntrack table lookups which had to be restarted due to hashtable resizes. +# TYPE node_nf_conntrack_stat_search_restart gauge +node_nf_conntrack_stat_search_restart 7 +# HELP node_nfs_connections_total Total number of NFSd TCP connections. +# TYPE node_nfs_connections_total counter +node_nfs_connections_total 45 +# HELP node_nfs_packets_total Total NFSd network packets (sent+received) by protocol type. +# TYPE node_nfs_packets_total counter +node_nfs_packets_total{protocol="tcp"} 69 +node_nfs_packets_total{protocol="udp"} 70 +# HELP node_nfs_requests_total Number of NFS procedures invoked. +# TYPE node_nfs_requests_total counter +node_nfs_requests_total{method="Access",proto="3"} 1.17661341e+08 +node_nfs_requests_total{method="Access",proto="4"} 58 +node_nfs_requests_total{method="Allocate",proto="4"} 0 +node_nfs_requests_total{method="BindConnToSession",proto="4"} 0 +node_nfs_requests_total{method="Clone",proto="4"} 0 +node_nfs_requests_total{method="Close",proto="4"} 28 +node_nfs_requests_total{method="Commit",proto="3"} 23729 +node_nfs_requests_total{method="Commit",proto="4"} 83 +node_nfs_requests_total{method="Create",proto="2"} 52 +node_nfs_requests_total{method="Create",proto="3"} 2.993289e+06 +node_nfs_requests_total{method="Create",proto="4"} 15 +node_nfs_requests_total{method="CreateSession",proto="4"} 32 +node_nfs_requests_total{method="DeAllocate",proto="4"} 0 +node_nfs_requests_total{method="DelegReturn",proto="4"} 97 +node_nfs_requests_total{method="DestroyClientID",proto="4"} 0 +node_nfs_requests_total{method="DestroySession",proto="4"} 67 +node_nfs_requests_total{method="ExchangeID",proto="4"} 58 +node_nfs_requests_total{method="FreeStateID",proto="4"} 0 +node_nfs_requests_total{method="FsInfo",proto="3"} 2 +node_nfs_requests_total{method="FsInfo",proto="4"} 68 +node_nfs_requests_total{method="FsLocations",proto="4"} 32 +node_nfs_requests_total{method="FsStat",proto="2"} 82 +node_nfs_requests_total{method="FsStat",proto="3"} 13332 +node_nfs_requests_total{method="FsidPresent",proto="4"} 11 +node_nfs_requests_total{method="GetACL",proto="4"} 36 +node_nfs_requests_total{method="GetAttr",proto="2"} 57 +node_nfs_requests_total{method="GetAttr",proto="3"} 1.061909262e+09 +node_nfs_requests_total{method="GetDeviceInfo",proto="4"} 1 +node_nfs_requests_total{method="GetDeviceList",proto="4"} 0 +node_nfs_requests_total{method="GetLeaseTime",proto="4"} 28 +node_nfs_requests_total{method="Getattr",proto="4"} 88 +node_nfs_requests_total{method="LayoutCommit",proto="4"} 26 +node_nfs_requests_total{method="LayoutGet",proto="4"} 90 +node_nfs_requests_total{method="LayoutReturn",proto="4"} 0 +node_nfs_requests_total{method="LayoutStats",proto="4"} 0 +node_nfs_requests_total{method="Link",proto="2"} 17 +node_nfs_requests_total{method="Link",proto="3"} 0 +node_nfs_requests_total{method="Link",proto="4"} 21 +node_nfs_requests_total{method="Lock",proto="4"} 39 +node_nfs_requests_total{method="Lockt",proto="4"} 68 +node_nfs_requests_total{method="Locku",proto="4"} 59 +node_nfs_requests_total{method="Lookup",proto="2"} 71 +node_nfs_requests_total{method="Lookup",proto="3"} 4.077635e+06 +node_nfs_requests_total{method="Lookup",proto="4"} 29 +node_nfs_requests_total{method="LookupRoot",proto="4"} 74 +node_nfs_requests_total{method="MkDir",proto="2"} 50 +node_nfs_requests_total{method="MkDir",proto="3"} 590 +node_nfs_requests_total{method="MkNod",proto="3"} 0 +node_nfs_requests_total{method="Null",proto="2"} 16 +node_nfs_requests_total{method="Null",proto="3"} 0 +node_nfs_requests_total{method="Null",proto="4"} 98 +node_nfs_requests_total{method="Open",proto="4"} 85 +node_nfs_requests_total{method="OpenConfirm",proto="4"} 23 +node_nfs_requests_total{method="OpenDowngrade",proto="4"} 1 +node_nfs_requests_total{method="OpenNoattr",proto="4"} 24 +node_nfs_requests_total{method="PathConf",proto="3"} 1 +node_nfs_requests_total{method="Pathconf",proto="4"} 53 +node_nfs_requests_total{method="Read",proto="2"} 45 +node_nfs_requests_total{method="Read",proto="3"} 2.9391916e+07 +node_nfs_requests_total{method="Read",proto="4"} 51 +node_nfs_requests_total{method="ReadDir",proto="2"} 70 +node_nfs_requests_total{method="ReadDir",proto="3"} 3983 +node_nfs_requests_total{method="ReadDir",proto="4"} 66 +node_nfs_requests_total{method="ReadDirPlus",proto="3"} 92385 +node_nfs_requests_total{method="ReadLink",proto="2"} 73 +node_nfs_requests_total{method="ReadLink",proto="3"} 5 +node_nfs_requests_total{method="ReadLink",proto="4"} 54 +node_nfs_requests_total{method="ReclaimComplete",proto="4"} 35 +node_nfs_requests_total{method="ReleaseLockowner",proto="4"} 85 +node_nfs_requests_total{method="Remove",proto="2"} 83 +node_nfs_requests_total{method="Remove",proto="3"} 7815 +node_nfs_requests_total{method="Remove",proto="4"} 69 +node_nfs_requests_total{method="Rename",proto="2"} 61 +node_nfs_requests_total{method="Rename",proto="3"} 1130 +node_nfs_requests_total{method="Rename",proto="4"} 96 +node_nfs_requests_total{method="Renew",proto="4"} 83 +node_nfs_requests_total{method="RmDir",proto="2"} 23 +node_nfs_requests_total{method="RmDir",proto="3"} 15 +node_nfs_requests_total{method="Root",proto="2"} 52 +node_nfs_requests_total{method="Secinfo",proto="4"} 81 +node_nfs_requests_total{method="SecinfoNoName",proto="4"} 0 +node_nfs_requests_total{method="Seek",proto="4"} 0 +node_nfs_requests_total{method="Sequence",proto="4"} 13 +node_nfs_requests_total{method="ServerCaps",proto="4"} 56 +node_nfs_requests_total{method="SetACL",proto="4"} 49 +node_nfs_requests_total{method="SetAttr",proto="2"} 74 +node_nfs_requests_total{method="SetAttr",proto="3"} 48906 +node_nfs_requests_total{method="SetClientID",proto="4"} 12 +node_nfs_requests_total{method="SetClientIDConfirm",proto="4"} 84 +node_nfs_requests_total{method="Setattr",proto="4"} 73 +node_nfs_requests_total{method="StatFs",proto="4"} 86 +node_nfs_requests_total{method="SymLink",proto="2"} 53 +node_nfs_requests_total{method="SymLink",proto="3"} 0 +node_nfs_requests_total{method="Symlink",proto="4"} 84 +node_nfs_requests_total{method="TestStateID",proto="4"} 0 +node_nfs_requests_total{method="WrCache",proto="2"} 86 +node_nfs_requests_total{method="Write",proto="2"} 0 +node_nfs_requests_total{method="Write",proto="3"} 2.570425e+06 +node_nfs_requests_total{method="Write",proto="4"} 54 +# HELP node_nfs_rpc_authentication_refreshes_total Number of RPC authentication refreshes performed. +# TYPE node_nfs_rpc_authentication_refreshes_total counter +node_nfs_rpc_authentication_refreshes_total 1.218815394e+09 +# HELP node_nfs_rpc_retransmissions_total Number of RPC transmissions performed. +# TYPE node_nfs_rpc_retransmissions_total counter +node_nfs_rpc_retransmissions_total 374636 +# HELP node_nfs_rpcs_total Total number of RPCs performed. +# TYPE node_nfs_rpcs_total counter +node_nfs_rpcs_total 1.218785755e+09 +# HELP node_nfsd_connections_total Total number of NFSd TCP connections. +# TYPE node_nfsd_connections_total counter +node_nfsd_connections_total 1 +# HELP node_nfsd_disk_bytes_read_total Total NFSd bytes read. +# TYPE node_nfsd_disk_bytes_read_total counter +node_nfsd_disk_bytes_read_total 1.572864e+08 +# HELP node_nfsd_disk_bytes_written_total Total NFSd bytes written. +# TYPE node_nfsd_disk_bytes_written_total counter +node_nfsd_disk_bytes_written_total 72864 +# HELP node_nfsd_file_handles_stale_total Total number of NFSd stale file handles +# TYPE node_nfsd_file_handles_stale_total counter +node_nfsd_file_handles_stale_total 0 +# HELP node_nfsd_packets_total Total NFSd network packets (sent+received) by protocol type. +# TYPE node_nfsd_packets_total counter +node_nfsd_packets_total{proto="tcp"} 917 +node_nfsd_packets_total{proto="udp"} 55 +# HELP node_nfsd_read_ahead_cache_not_found_total Total number of NFSd read ahead cache not found. +# TYPE node_nfsd_read_ahead_cache_not_found_total counter +node_nfsd_read_ahead_cache_not_found_total 0 +# HELP node_nfsd_read_ahead_cache_size_blocks How large the read ahead cache is in blocks. +# TYPE node_nfsd_read_ahead_cache_size_blocks gauge +node_nfsd_read_ahead_cache_size_blocks 32 +# HELP node_nfsd_reply_cache_hits_total Total number of NFSd Reply Cache hits (client lost server response). +# TYPE node_nfsd_reply_cache_hits_total counter +node_nfsd_reply_cache_hits_total 0 +# HELP node_nfsd_reply_cache_misses_total Total number of NFSd Reply Cache an operation that requires caching (idempotent). +# TYPE node_nfsd_reply_cache_misses_total counter +node_nfsd_reply_cache_misses_total 6 +# HELP node_nfsd_reply_cache_nocache_total Total number of NFSd Reply Cache non-idempotent operations (rename/delete/…). +# TYPE node_nfsd_reply_cache_nocache_total counter +node_nfsd_reply_cache_nocache_total 18622 +# HELP node_nfsd_requests_total Total number NFSd Requests by method and protocol. +# TYPE node_nfsd_requests_total counter +node_nfsd_requests_total{method="Access",proto="3"} 111 +node_nfsd_requests_total{method="Access",proto="4"} 1098 +node_nfsd_requests_total{method="Close",proto="4"} 2 +node_nfsd_requests_total{method="Commit",proto="3"} 0 +node_nfsd_requests_total{method="Commit",proto="4"} 0 +node_nfsd_requests_total{method="Create",proto="2"} 0 +node_nfsd_requests_total{method="Create",proto="3"} 0 +node_nfsd_requests_total{method="Create",proto="4"} 0 +node_nfsd_requests_total{method="DelegPurge",proto="4"} 0 +node_nfsd_requests_total{method="DelegReturn",proto="4"} 0 +node_nfsd_requests_total{method="FsInfo",proto="3"} 2 +node_nfsd_requests_total{method="FsStat",proto="2"} 2 +node_nfsd_requests_total{method="FsStat",proto="3"} 0 +node_nfsd_requests_total{method="GetAttr",proto="2"} 69 +node_nfsd_requests_total{method="GetAttr",proto="3"} 112 +node_nfsd_requests_total{method="GetAttr",proto="4"} 8179 +node_nfsd_requests_total{method="GetFH",proto="4"} 5896 +node_nfsd_requests_total{method="Link",proto="2"} 0 +node_nfsd_requests_total{method="Link",proto="3"} 0 +node_nfsd_requests_total{method="Link",proto="4"} 0 +node_nfsd_requests_total{method="Lock",proto="4"} 0 +node_nfsd_requests_total{method="Lockt",proto="4"} 0 +node_nfsd_requests_total{method="Locku",proto="4"} 0 +node_nfsd_requests_total{method="Lookup",proto="2"} 4410 +node_nfsd_requests_total{method="Lookup",proto="3"} 2719 +node_nfsd_requests_total{method="Lookup",proto="4"} 5900 +node_nfsd_requests_total{method="LookupRoot",proto="4"} 0 +node_nfsd_requests_total{method="MkDir",proto="2"} 0 +node_nfsd_requests_total{method="MkDir",proto="3"} 0 +node_nfsd_requests_total{method="MkNod",proto="3"} 0 +node_nfsd_requests_total{method="Nverify",proto="4"} 0 +node_nfsd_requests_total{method="Open",proto="4"} 2 +node_nfsd_requests_total{method="OpenAttr",proto="4"} 0 +node_nfsd_requests_total{method="OpenConfirm",proto="4"} 2 +node_nfsd_requests_total{method="OpenDgrd",proto="4"} 0 +node_nfsd_requests_total{method="PathConf",proto="3"} 1 +node_nfsd_requests_total{method="PutFH",proto="4"} 9609 +node_nfsd_requests_total{method="Read",proto="2"} 0 +node_nfsd_requests_total{method="Read",proto="3"} 0 +node_nfsd_requests_total{method="Read",proto="4"} 150 +node_nfsd_requests_total{method="ReadDir",proto="2"} 99 +node_nfsd_requests_total{method="ReadDir",proto="3"} 27 +node_nfsd_requests_total{method="ReadDir",proto="4"} 1272 +node_nfsd_requests_total{method="ReadDirPlus",proto="3"} 216 +node_nfsd_requests_total{method="ReadLink",proto="2"} 0 +node_nfsd_requests_total{method="ReadLink",proto="3"} 0 +node_nfsd_requests_total{method="ReadLink",proto="4"} 0 +node_nfsd_requests_total{method="RelLockOwner",proto="4"} 0 +node_nfsd_requests_total{method="Remove",proto="2"} 0 +node_nfsd_requests_total{method="Remove",proto="3"} 0 +node_nfsd_requests_total{method="Remove",proto="4"} 0 +node_nfsd_requests_total{method="Rename",proto="2"} 0 +node_nfsd_requests_total{method="Rename",proto="3"} 0 +node_nfsd_requests_total{method="Rename",proto="4"} 0 +node_nfsd_requests_total{method="Renew",proto="4"} 1236 +node_nfsd_requests_total{method="RestoreFH",proto="4"} 0 +node_nfsd_requests_total{method="RmDir",proto="2"} 0 +node_nfsd_requests_total{method="RmDir",proto="3"} 0 +node_nfsd_requests_total{method="Root",proto="2"} 0 +node_nfsd_requests_total{method="SaveFH",proto="4"} 0 +node_nfsd_requests_total{method="SecInfo",proto="4"} 0 +node_nfsd_requests_total{method="SetAttr",proto="2"} 0 +node_nfsd_requests_total{method="SetAttr",proto="3"} 0 +node_nfsd_requests_total{method="SetAttr",proto="4"} 0 +node_nfsd_requests_total{method="SymLink",proto="2"} 0 +node_nfsd_requests_total{method="SymLink",proto="3"} 0 +node_nfsd_requests_total{method="Verify",proto="4"} 3 +node_nfsd_requests_total{method="WrCache",proto="2"} 0 +node_nfsd_requests_total{method="Write",proto="2"} 0 +node_nfsd_requests_total{method="Write",proto="3"} 0 +node_nfsd_requests_total{method="Write",proto="4"} 3 +# HELP node_nfsd_rpc_errors_total Total number of NFSd RPC errors by error type. +# TYPE node_nfsd_rpc_errors_total counter +node_nfsd_rpc_errors_total{error="auth"} 2 +node_nfsd_rpc_errors_total{error="cInt"} 0 +node_nfsd_rpc_errors_total{error="fmt"} 1 +# HELP node_nfsd_server_rpcs_total Total number of NFSd RPCs. +# TYPE node_nfsd_server_rpcs_total counter +node_nfsd_server_rpcs_total 18628 +# HELP node_nfsd_server_threads Total number of NFSd kernel threads that are running. +# TYPE node_nfsd_server_threads gauge +node_nfsd_server_threads 8 +# HELP node_nvme_info Non-numeric data from /sys/class/nvme/, value is always 1. +# TYPE node_nvme_info gauge +node_nvme_info{device="nvme0",firmware_revision="1B2QEXP7",model="Samsung SSD 970 PRO 512GB",serial="S680HF8N190894I",state="live"} 1 +# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. +# TYPE node_os_info gauge +node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 +# HELP node_os_version Metric containing the major.minor part of the OS version. +# TYPE node_os_version gauge +node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 +# HELP node_power_supply_capacity capacity value of /sys/class/power_supply/. +# TYPE node_power_supply_capacity gauge +node_power_supply_capacity{power_supply="BAT0"} 81 +# HELP node_power_supply_cyclecount cyclecount value of /sys/class/power_supply/. +# TYPE node_power_supply_cyclecount gauge +node_power_supply_cyclecount{power_supply="BAT0"} 0 +# HELP node_power_supply_energy_full energy_full value of /sys/class/power_supply/. +# TYPE node_power_supply_energy_full gauge +node_power_supply_energy_full{power_supply="BAT0"} 45.07 +# HELP node_power_supply_energy_full_design energy_full_design value of /sys/class/power_supply/. +# TYPE node_power_supply_energy_full_design gauge +node_power_supply_energy_full_design{power_supply="BAT0"} 47.52 +# HELP node_power_supply_energy_watthour energy_watthour value of /sys/class/power_supply/. +# TYPE node_power_supply_energy_watthour gauge +node_power_supply_energy_watthour{power_supply="BAT0"} 36.58 +# HELP node_power_supply_info info of /sys/class/power_supply/. +# TYPE node_power_supply_info gauge +node_power_supply_info{power_supply="AC",type="Mains"} 1 +node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1�",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 +# HELP node_power_supply_online online value of /sys/class/power_supply/. +# TYPE node_power_supply_online gauge +node_power_supply_online{power_supply="AC"} 0 +# HELP node_power_supply_power_watt power_watt value of /sys/class/power_supply/. +# TYPE node_power_supply_power_watt gauge +node_power_supply_power_watt{power_supply="BAT0"} 5.002 +# HELP node_power_supply_present present value of /sys/class/power_supply/. +# TYPE node_power_supply_present gauge +node_power_supply_present{power_supply="BAT0"} 1 +# HELP node_power_supply_voltage_min_design voltage_min_design value of /sys/class/power_supply/. +# TYPE node_power_supply_voltage_min_design gauge +node_power_supply_voltage_min_design{power_supply="BAT0"} 10.8 +# HELP node_power_supply_voltage_volt voltage_volt value of /sys/class/power_supply/. +# TYPE node_power_supply_voltage_volt gauge +node_power_supply_voltage_volt{power_supply="BAT0"} 11.66 +# HELP node_pressure_cpu_waiting_seconds_total Total time in seconds that processes have waited for CPU time +# TYPE node_pressure_cpu_waiting_seconds_total counter +node_pressure_cpu_waiting_seconds_total 14.036781000000001 +# HELP node_pressure_io_stalled_seconds_total Total time in seconds no process could make progress due to IO congestion +# TYPE node_pressure_io_stalled_seconds_total counter +node_pressure_io_stalled_seconds_total 159.229614 +# HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion +# TYPE node_pressure_io_waiting_seconds_total counter +node_pressure_io_waiting_seconds_total 159.886802 +# HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion +# TYPE node_pressure_memory_stalled_seconds_total counter +node_pressure_memory_stalled_seconds_total 0 +# HELP node_pressure_memory_waiting_seconds_total Total time in seconds that processes have waited for memory +# TYPE node_pressure_memory_waiting_seconds_total counter +node_pressure_memory_waiting_seconds_total 0 +# HELP node_processes_max_processes Number of max PIDs limit +# TYPE node_processes_max_processes gauge +node_processes_max_processes 123 +# HELP node_processes_max_threads Limit of threads in the system +# TYPE node_processes_max_threads gauge +node_processes_max_threads 7801 +# HELP node_processes_pids Number of PIDs +# TYPE node_processes_pids gauge +node_processes_pids 3 +# HELP node_processes_state Number of processes in each state. +# TYPE node_processes_state gauge +node_processes_state{state="I"} 1 +node_processes_state{state="S"} 2 +# HELP node_processes_threads Allocated threads in system +# TYPE node_processes_threads gauge +node_processes_threads 3 # HELP node_procs_blocked Number of processes blocked waiting for I/O to complete. # TYPE node_procs_blocked gauge node_procs_blocked 0 # HELP node_procs_running Number of processes in runnable state. # TYPE node_procs_running gauge node_procs_running 2 +# HELP node_qdisc_backlog Number of bytes currently in queue to be sent. +# TYPE node_qdisc_backlog gauge +node_qdisc_backlog{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_backlog{device="wlan0",kind="fq"} 0 # HELP node_qdisc_bytes_total Number of bytes sent. # TYPE node_qdisc_bytes_total counter node_qdisc_bytes_total{device="eth0",kind="pfifo_fast"} 83 node_qdisc_bytes_total{device="wlan0",kind="fq"} 42 +# HELP node_qdisc_current_queue_length Number of packets currently in queue to be sent. +# TYPE node_qdisc_current_queue_length gauge +node_qdisc_current_queue_length{device="eth0",kind="pfifo_fast"} 0 +node_qdisc_current_queue_length{device="wlan0",kind="fq"} 0 # HELP node_qdisc_drops_total Number of packets dropped. # TYPE node_qdisc_drops_total counter node_qdisc_drops_total{device="eth0",kind="pfifo_fast"} 0 @@ -2535,6 +2946,24 @@ node_qdisc_packets_total{device="wlan0",kind="fq"} 42 # TYPE node_qdisc_requeues_total counter node_qdisc_requeues_total{device="eth0",kind="pfifo_fast"} 2 node_qdisc_requeues_total{device="wlan0",kind="fq"} 1 +# HELP node_rapl_core_joules_total Current RAPL core value in joules +# TYPE node_rapl_core_joules_total counter +node_rapl_core_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0:0"} 118821.284256 +# HELP node_rapl_package_joules_total Current RAPL package value in joules +# TYPE node_rapl_package_joules_total counter +node_rapl_package_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0"} 240422.366267 +# HELP node_schedstat_running_seconds_total Number of seconds CPU spent running a process. +# TYPE node_schedstat_running_seconds_total counter +node_schedstat_running_seconds_total{cpu="0"} 2.045936778163039e+06 +node_schedstat_running_seconds_total{cpu="1"} 1.904686152592476e+06 +# HELP node_schedstat_timeslices_total Number of timeslices executed by CPU. +# TYPE node_schedstat_timeslices_total counter +node_schedstat_timeslices_total{cpu="0"} 4.767485306e+09 +node_schedstat_timeslices_total{cpu="1"} 5.145567945e+09 +# HELP node_schedstat_waiting_seconds_total Number of seconds spent by processing waiting for this CPU. +# TYPE node_schedstat_waiting_seconds_total counter +node_schedstat_waiting_seconds_total{cpu="0"} 343796.328169361 +node_schedstat_waiting_seconds_total{cpu="1"} 364107.263788241 # HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. # TYPE node_scrape_collector_duration_seconds gauge # HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. @@ -2542,43 +2971,76 @@ node_qdisc_requeues_total{device="wlan0",kind="fq"} 1 node_scrape_collector_success{collector="arp"} 1 node_scrape_collector_success{collector="bcache"} 1 node_scrape_collector_success{collector="bonding"} 1 +node_scrape_collector_success{collector="btrfs"} 1 node_scrape_collector_success{collector="buddyinfo"} 1 node_scrape_collector_success{collector="conntrack"} 1 node_scrape_collector_success{collector="cpu"} 1 +node_scrape_collector_success{collector="cpufreq"} 1 node_scrape_collector_success{collector="diskstats"} 1 +node_scrape_collector_success{collector="dmi"} 1 node_scrape_collector_success{collector="drbd"} 1 node_scrape_collector_success{collector="edac"} 1 node_scrape_collector_success{collector="entropy"} 1 +node_scrape_collector_success{collector="fibrechannel"} 1 node_scrape_collector_success{collector="filefd"} 1 node_scrape_collector_success{collector="hwmon"} 1 node_scrape_collector_success{collector="infiniband"} 1 +node_scrape_collector_success{collector="interrupts"} 1 node_scrape_collector_success{collector="ipvs"} 1 node_scrape_collector_success{collector="ksmd"} 1 +node_scrape_collector_success{collector="lnstat"} 1 node_scrape_collector_success{collector="loadavg"} 1 node_scrape_collector_success{collector="mdadm"} 1 -node_scrape_collector_success{collector="megacli"} 1 node_scrape_collector_success{collector="meminfo"} 1 node_scrape_collector_success{collector="meminfo_numa"} 1 node_scrape_collector_success{collector="mountstats"} 1 +node_scrape_collector_success{collector="netclass"} 1 node_scrape_collector_success{collector="netdev"} 1 node_scrape_collector_success{collector="netstat"} 1 node_scrape_collector_success{collector="nfs"} 1 +node_scrape_collector_success{collector="nfsd"} 1 +node_scrape_collector_success{collector="nvme"} 1 +node_scrape_collector_success{collector="os"} 1 +node_scrape_collector_success{collector="powersupplyclass"} 1 +node_scrape_collector_success{collector="pressure"} 1 +node_scrape_collector_success{collector="processes"} 1 node_scrape_collector_success{collector="qdisc"} 1 +node_scrape_collector_success{collector="rapl"} 1 +node_scrape_collector_success{collector="schedstat"} 1 node_scrape_collector_success{collector="sockstat"} 1 +node_scrape_collector_success{collector="softnet"} 1 node_scrape_collector_success{collector="stat"} 1 +node_scrape_collector_success{collector="tapestats"} 1 node_scrape_collector_success{collector="textfile"} 1 +node_scrape_collector_success{collector="thermal_zone"} 1 +node_scrape_collector_success{collector="time"} 1 +node_scrape_collector_success{collector="udp_queues"} 1 +node_scrape_collector_success{collector="vmstat"} 1 node_scrape_collector_success{collector="wifi"} 1 node_scrape_collector_success{collector="xfs"} 1 node_scrape_collector_success{collector="zfs"} 1 +node_scrape_collector_success{collector="zoneinfo"} 1 +# HELP node_sockstat_FRAG6_inuse Number of FRAG6 sockets in state inuse. +# TYPE node_sockstat_FRAG6_inuse gauge +node_sockstat_FRAG6_inuse 0 +# HELP node_sockstat_FRAG6_memory Number of FRAG6 sockets in state memory. +# TYPE node_sockstat_FRAG6_memory gauge +node_sockstat_FRAG6_memory 0 # HELP node_sockstat_FRAG_inuse Number of FRAG sockets in state inuse. # TYPE node_sockstat_FRAG_inuse gauge node_sockstat_FRAG_inuse 0 # HELP node_sockstat_FRAG_memory Number of FRAG sockets in state memory. # TYPE node_sockstat_FRAG_memory gauge node_sockstat_FRAG_memory 0 +# HELP node_sockstat_RAW6_inuse Number of RAW6 sockets in state inuse. +# TYPE node_sockstat_RAW6_inuse gauge +node_sockstat_RAW6_inuse 1 # HELP node_sockstat_RAW_inuse Number of RAW sockets in state inuse. # TYPE node_sockstat_RAW_inuse gauge node_sockstat_RAW_inuse 0 +# HELP node_sockstat_TCP6_inuse Number of TCP6 sockets in state inuse. +# TYPE node_sockstat_TCP6_inuse gauge +node_sockstat_TCP6_inuse 17 # HELP node_sockstat_TCP_alloc Number of TCP sockets in state alloc. # TYPE node_sockstat_TCP_alloc gauge node_sockstat_TCP_alloc 17 @@ -2597,6 +3059,12 @@ node_sockstat_TCP_orphan 0 # HELP node_sockstat_TCP_tw Number of TCP sockets in state tw. # TYPE node_sockstat_TCP_tw gauge node_sockstat_TCP_tw 4 +# HELP node_sockstat_UDP6_inuse Number of UDP6 sockets in state inuse. +# TYPE node_sockstat_UDP6_inuse gauge +node_sockstat_UDP6_inuse 9 +# HELP node_sockstat_UDPLITE6_inuse Number of UDPLITE6 sockets in state inuse. +# TYPE node_sockstat_UDPLITE6_inuse gauge +node_sockstat_UDPLITE6_inuse 0 # HELP node_sockstat_UDPLITE_inuse Number of UDPLITE sockets in state inuse. # TYPE node_sockstat_UDPLITE_inuse gauge node_sockstat_UDPLITE_inuse 0 @@ -2609,45 +3077,149 @@ node_sockstat_UDP_mem 0 # HELP node_sockstat_UDP_mem_bytes Number of UDP sockets in state mem_bytes. # TYPE node_sockstat_UDP_mem_bytes gauge node_sockstat_UDP_mem_bytes 0 -# HELP node_sockstat_sockets_used Number of sockets sockets in state used. +# HELP node_sockstat_sockets_used Number of IPv4 sockets in use. # TYPE node_sockstat_sockets_used gauge node_sockstat_sockets_used 229 -# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read. -# TYPE node_textfile_mtime gauge +# HELP node_softnet_dropped_total Number of dropped packets +# TYPE node_softnet_dropped_total counter +node_softnet_dropped_total{cpu="0"} 0 +node_softnet_dropped_total{cpu="1"} 41 +node_softnet_dropped_total{cpu="2"} 0 +node_softnet_dropped_total{cpu="3"} 0 +# HELP node_softnet_processed_total Number of processed packets +# TYPE node_softnet_processed_total counter +node_softnet_processed_total{cpu="0"} 299641 +node_softnet_processed_total{cpu="1"} 916354 +node_softnet_processed_total{cpu="2"} 5.577791e+06 +node_softnet_processed_total{cpu="3"} 3.113785e+06 +# HELP node_softnet_times_squeezed_total Number of times processing packets ran out of quota +# TYPE node_softnet_times_squeezed_total counter +node_softnet_times_squeezed_total{cpu="0"} 1 +node_softnet_times_squeezed_total{cpu="1"} 10 +node_softnet_times_squeezed_total{cpu="2"} 85 +node_softnet_times_squeezed_total{cpu="3"} 50 +# HELP node_tape_io_now The number of I/Os currently outstanding to this device. +# TYPE node_tape_io_now gauge +node_tape_io_now{device="st0"} 1 +# HELP node_tape_io_others_total The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total +# TYPE node_tape_io_others_total counter +node_tape_io_others_total{device="st0"} 1409 +# HELP node_tape_io_time_seconds_total The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used. +# TYPE node_tape_io_time_seconds_total counter +node_tape_io_time_seconds_total{device="st0"} 9247.01108772 +# HELP node_tape_read_bytes_total The number of bytes read from the tape drive. +# TYPE node_tape_read_bytes_total counter +node_tape_read_bytes_total{device="st0"} 9.79383912e+08 +# HELP node_tape_read_time_seconds_total The amount of time spent waiting for read requests to complete. +# TYPE node_tape_read_time_seconds_total counter +node_tape_read_time_seconds_total{device="st0"} 33.788355744 +# HELP node_tape_reads_completed_total The number of read requests issued to the tape drive. +# TYPE node_tape_reads_completed_total counter +node_tape_reads_completed_total{device="st0"} 3741 +# HELP node_tape_residual_total The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape. +# TYPE node_tape_residual_total counter +node_tape_residual_total{device="st0"} 19 +# HELP node_tape_write_time_seconds_total The amount of time spent waiting for write requests to complete. +# TYPE node_tape_write_time_seconds_total counter +node_tape_write_time_seconds_total{device="st0"} 5233.597394395 +# HELP node_tape_writes_completed_total The number of write requests issued to the tape drive. +# TYPE node_tape_writes_completed_total counter +node_tape_writes_completed_total{device="st0"} 5.3772916e+07 +# HELP node_tape_written_bytes_total The number of bytes written to the tape drive. +# TYPE node_tape_written_bytes_total counter +node_tape_written_bytes_total{device="st0"} 1.496246784e+12 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 +# HELP node_thermal_zone_temp Zone temperature in Celsius +# TYPE node_thermal_zone_temp gauge +node_thermal_zone_temp{type="cpu-thermal",zone="0"} 12.376 +# HELP node_time_clocksource_available_info Available clocksources read from '/sys/devices/system/clocksource'. +# TYPE node_time_clocksource_available_info gauge +node_time_clocksource_available_info{clocksource="acpi_pm",device="0"} 1 +node_time_clocksource_available_info{clocksource="hpet",device="0"} 1 +node_time_clocksource_available_info{clocksource="tsc",device="0"} 1 +# HELP node_time_clocksource_current_info Current clocksource read from '/sys/devices/system/clocksource'. +# TYPE node_time_clocksource_current_info gauge +node_time_clocksource_current_info{clocksource="tsc",device="0"} 1 +# HELP node_time_seconds System time in seconds since epoch (1970). +# TYPE node_time_seconds gauge +# HELP node_time_zone_offset_seconds System time zone offset in seconds. +# TYPE node_time_zone_offset_seconds gauge +# HELP node_udp_queues Number of allocated memory in the kernel for UDP datagrams in bytes. +# TYPE node_udp_queues gauge +node_udp_queues{ip="v4",queue="rx"} 0 +node_udp_queues{ip="v4",queue="tx"} 21 +# HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. +# TYPE node_vmstat_oom_kill untyped +node_vmstat_oom_kill 0 +# HELP node_vmstat_pgfault /proc/vmstat information field pgfault. +# TYPE node_vmstat_pgfault untyped +node_vmstat_pgfault 2.320168809e+09 +# HELP node_vmstat_pgmajfault /proc/vmstat information field pgmajfault. +# TYPE node_vmstat_pgmajfault untyped +node_vmstat_pgmajfault 507162 +# HELP node_vmstat_pgpgin /proc/vmstat information field pgpgin. +# TYPE node_vmstat_pgpgin untyped +node_vmstat_pgpgin 7.344136e+06 +# HELP node_vmstat_pgpgout /proc/vmstat information field pgpgout. +# TYPE node_vmstat_pgpgout untyped +node_vmstat_pgpgout 1.541180581e+09 +# HELP node_vmstat_pswpin /proc/vmstat information field pswpin. +# TYPE node_vmstat_pswpin untyped +node_vmstat_pswpin 1476 +# HELP node_vmstat_pswpout /proc/vmstat information field pswpout. +# TYPE node_vmstat_pswpout untyped +node_vmstat_pswpout 35045 # HELP node_wifi_interface_frequency_hertz The current frequency a WiFi interface is operating at, in hertz. # TYPE node_wifi_interface_frequency_hertz gauge node_wifi_interface_frequency_hertz{device="wlan0"} 2.412e+09 node_wifi_interface_frequency_hertz{device="wlan1"} 2.412e+09 # HELP node_wifi_station_beacon_loss_total The total number of times a station has detected a beacon loss. # TYPE node_wifi_station_beacon_loss_total counter -node_wifi_station_beacon_loss_total{device="wlan0"} 1 +node_wifi_station_beacon_loss_total{device="wlan0",mac_address="01:02:03:04:05:06"} 2 +node_wifi_station_beacon_loss_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1 # HELP node_wifi_station_connected_seconds_total The total number of seconds a station has been connected to an access point. # TYPE node_wifi_station_connected_seconds_total counter -node_wifi_station_connected_seconds_total{device="wlan0"} 30 +node_wifi_station_connected_seconds_total{device="wlan0",mac_address="01:02:03:04:05:06"} 60 +node_wifi_station_connected_seconds_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 30 # HELP node_wifi_station_inactive_seconds The number of seconds since any wireless activity has occurred on a station. # TYPE node_wifi_station_inactive_seconds gauge -node_wifi_station_inactive_seconds{device="wlan0"} 0.4 +node_wifi_station_inactive_seconds{device="wlan0",mac_address="01:02:03:04:05:06"} 0.8 +node_wifi_station_inactive_seconds{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0.4 # HELP node_wifi_station_info Labeled WiFi interface station information as provided by the operating system. # TYPE node_wifi_station_info gauge node_wifi_station_info{bssid="00:11:22:33:44:55",device="wlan0",mode="client",ssid="Example"} 1 # HELP node_wifi_station_receive_bits_per_second The current WiFi receive bitrate of a station, in bits per second. # TYPE node_wifi_station_receive_bits_per_second gauge -node_wifi_station_receive_bits_per_second{device="wlan0"} 1.28e+08 +node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 2.56e+08 +node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.28e+08 +# HELP node_wifi_station_receive_bytes_total The total number of bytes received by a WiFi station. +# TYPE node_wifi_station_receive_bytes_total counter +node_wifi_station_receive_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_receive_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_signal_dbm The current WiFi signal strength, in decibel-milliwatts (dBm). # TYPE node_wifi_station_signal_dbm gauge -node_wifi_station_signal_dbm{device="wlan0"} -52 +node_wifi_station_signal_dbm{device="wlan0",mac_address="01:02:03:04:05:06"} -26 +node_wifi_station_signal_dbm{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} -52 # HELP node_wifi_station_transmit_bits_per_second The current WiFi transmit bitrate of a station, in bits per second. # TYPE node_wifi_station_transmit_bits_per_second gauge -node_wifi_station_transmit_bits_per_second{device="wlan0"} 1.64e+08 +node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 3.28e+08 +node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.64e+08 +# HELP node_wifi_station_transmit_bytes_total The total number of bytes transmitted by a WiFi station. +# TYPE node_wifi_station_transmit_bytes_total counter +node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 +node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_transmit_failed_total The total number of times a station has failed to send a packet. # TYPE node_wifi_station_transmit_failed_total counter -node_wifi_station_transmit_failed_total{device="wlan0"} 2 +node_wifi_station_transmit_failed_total{device="wlan0",mac_address="01:02:03:04:05:06"} 4 +node_wifi_station_transmit_failed_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 2 # HELP node_wifi_station_transmit_retries_total The total number of times a station has had to retry while sending a packet. # TYPE node_wifi_station_transmit_retries_total counter -node_wifi_station_transmit_retries_total{device="wlan0"} 10 +node_wifi_station_transmit_retries_total{device="wlan0",mac_address="01:02:03:04:05:06"} 20 +node_wifi_station_transmit_retries_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 10 # HELP node_xfs_allocation_btree_compares_total Number of allocation B-tree compares for a filesystem. # TYPE node_xfs_allocation_btree_compares_total counter node_xfs_allocation_btree_compares_total{device="sda1"} 0 @@ -2660,18 +3232,18 @@ node_xfs_allocation_btree_records_deleted_total{device="sda1"} 0 # HELP node_xfs_allocation_btree_records_inserted_total Number of allocation B-tree records inserted for a filesystem. # TYPE node_xfs_allocation_btree_records_inserted_total counter node_xfs_allocation_btree_records_inserted_total{device="sda1"} 0 -# HELP node_xfs_block_mapping_btree_compares_total Number of block map B-tree compares for a filesystem. -# TYPE node_xfs_block_mapping_btree_compares_total counter -node_xfs_block_mapping_btree_compares_total{device="sda1"} 0 -# HELP node_xfs_block_mapping_btree_lookups_total Number of block map B-tree lookups for a filesystem. -# TYPE node_xfs_block_mapping_btree_lookups_total counter -node_xfs_block_mapping_btree_lookups_total{device="sda1"} 0 -# HELP node_xfs_block_mapping_btree_records_deleted_total Number of block map B-tree records deleted for a filesystem. -# TYPE node_xfs_block_mapping_btree_records_deleted_total counter -node_xfs_block_mapping_btree_records_deleted_total{device="sda1"} 0 -# HELP node_xfs_block_mapping_btree_records_inserted_total Number of block map B-tree records inserted for a filesystem. -# TYPE node_xfs_block_mapping_btree_records_inserted_total counter -node_xfs_block_mapping_btree_records_inserted_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_compares_total Number of block map B-tree compares for a filesystem. +# TYPE node_xfs_block_map_btree_compares_total counter +node_xfs_block_map_btree_compares_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_lookups_total Number of block map B-tree lookups for a filesystem. +# TYPE node_xfs_block_map_btree_lookups_total counter +node_xfs_block_map_btree_lookups_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_records_deleted_total Number of block map B-tree records deleted for a filesystem. +# TYPE node_xfs_block_map_btree_records_deleted_total counter +node_xfs_block_map_btree_records_deleted_total{device="sda1"} 0 +# HELP node_xfs_block_map_btree_records_inserted_total Number of block map B-tree records inserted for a filesystem. +# TYPE node_xfs_block_map_btree_records_inserted_total counter +node_xfs_block_map_btree_records_inserted_total{device="sda1"} 0 # HELP node_xfs_block_mapping_extent_list_compares_total Number of extent list compares for a filesystem. # TYPE node_xfs_block_mapping_extent_list_compares_total counter node_xfs_block_mapping_extent_list_compares_total{device="sda1"} 0 @@ -2693,6 +3265,18 @@ node_xfs_block_mapping_unmaps_total{device="sda1"} 1 # HELP node_xfs_block_mapping_writes_total Number of block map for write operations for a filesystem. # TYPE node_xfs_block_mapping_writes_total counter node_xfs_block_mapping_writes_total{device="sda1"} 29 +# HELP node_xfs_directory_operation_create_total Number of times a new directory entry was created for a filesystem. +# TYPE node_xfs_directory_operation_create_total counter +node_xfs_directory_operation_create_total{device="sda1"} 2 +# HELP node_xfs_directory_operation_getdents_total Number of times the directory getdents operation was performed for a filesystem. +# TYPE node_xfs_directory_operation_getdents_total counter +node_xfs_directory_operation_getdents_total{device="sda1"} 52 +# HELP node_xfs_directory_operation_lookup_total Number of file name directory lookups which miss the operating systems directory name lookup cache. +# TYPE node_xfs_directory_operation_lookup_total counter +node_xfs_directory_operation_lookup_total{device="sda1"} 3 +# HELP node_xfs_directory_operation_remove_total Number of times an existing directory entry was created for a filesystem. +# TYPE node_xfs_directory_operation_remove_total counter +node_xfs_directory_operation_remove_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_blocks_allocated_total Number of blocks allocated for a filesystem. # TYPE node_xfs_extent_allocation_blocks_allocated_total counter node_xfs_extent_allocation_blocks_allocated_total{device="sda1"} 872 @@ -2705,6 +3289,117 @@ node_xfs_extent_allocation_extents_allocated_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_extents_freed_total Number of extents freed for a filesystem. # TYPE node_xfs_extent_allocation_extents_freed_total counter node_xfs_extent_allocation_extents_freed_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_attempts_total Number of times the OS looked for an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_attempts_total counter +node_xfs_inode_operation_attempts_total{device="sda1"} 5 +# HELP node_xfs_inode_operation_attribute_changes_total Number of times the OS explicitly changed the attributes of an XFS inode. +# TYPE node_xfs_inode_operation_attribute_changes_total counter +node_xfs_inode_operation_attribute_changes_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_duplicates_total Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process. +# TYPE node_xfs_inode_operation_duplicates_total counter +node_xfs_inode_operation_duplicates_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_found_total Number of times the OS looked for and found an XFS inode in the inode cache. +# TYPE node_xfs_inode_operation_found_total counter +node_xfs_inode_operation_found_total{device="sda1"} 1 +# HELP node_xfs_inode_operation_missed_total Number of times the OS looked for an XFS inode in the cache, but did not find it. +# TYPE node_xfs_inode_operation_missed_total counter +node_xfs_inode_operation_missed_total{device="sda1"} 4 +# HELP node_xfs_inode_operation_reclaims_total Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose. +# TYPE node_xfs_inode_operation_reclaims_total counter +node_xfs_inode_operation_reclaims_total{device="sda1"} 0 +# HELP node_xfs_inode_operation_recycled_total Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled. +# TYPE node_xfs_inode_operation_recycled_total counter +node_xfs_inode_operation_recycled_total{device="sda1"} 0 +# HELP node_xfs_read_calls_total Number of read(2) system calls made to files in a filesystem. +# TYPE node_xfs_read_calls_total counter +node_xfs_read_calls_total{device="sda1"} 0 +# HELP node_xfs_vnode_active_total Number of vnodes not on free lists for a filesystem. +# TYPE node_xfs_vnode_active_total counter +node_xfs_vnode_active_total{device="sda1"} 4 +# HELP node_xfs_vnode_allocate_total Number of times vn_alloc called for a filesystem. +# TYPE node_xfs_vnode_allocate_total counter +node_xfs_vnode_allocate_total{device="sda1"} 0 +# HELP node_xfs_vnode_get_total Number of times vn_get called for a filesystem. +# TYPE node_xfs_vnode_get_total counter +node_xfs_vnode_get_total{device="sda1"} 0 +# HELP node_xfs_vnode_hold_total Number of times vn_hold called for a filesystem. +# TYPE node_xfs_vnode_hold_total counter +node_xfs_vnode_hold_total{device="sda1"} 0 +# HELP node_xfs_vnode_reclaim_total Number of times vn_reclaim called for a filesystem. +# TYPE node_xfs_vnode_reclaim_total counter +node_xfs_vnode_reclaim_total{device="sda1"} 1 +# HELP node_xfs_vnode_release_total Number of times vn_rele called for a filesystem. +# TYPE node_xfs_vnode_release_total counter +node_xfs_vnode_release_total{device="sda1"} 1 +# HELP node_xfs_vnode_remove_total Number of times vn_remove called for a filesystem. +# TYPE node_xfs_vnode_remove_total counter +node_xfs_vnode_remove_total{device="sda1"} 1 +# HELP node_xfs_write_calls_total Number of write(2) system calls made to files in a filesystem. +# TYPE node_xfs_write_calls_total counter +node_xfs_write_calls_total{device="sda1"} 28 +# HELP node_zfs_abd_linear_cnt kstat.zfs.misc.abdstats.linear_cnt +# TYPE node_zfs_abd_linear_cnt untyped +node_zfs_abd_linear_cnt 62 +# HELP node_zfs_abd_linear_data_size kstat.zfs.misc.abdstats.linear_data_size +# TYPE node_zfs_abd_linear_data_size untyped +node_zfs_abd_linear_data_size 223232 +# HELP node_zfs_abd_scatter_chunk_waste kstat.zfs.misc.abdstats.scatter_chunk_waste +# TYPE node_zfs_abd_scatter_chunk_waste untyped +node_zfs_abd_scatter_chunk_waste 0 +# HELP node_zfs_abd_scatter_cnt kstat.zfs.misc.abdstats.scatter_cnt +# TYPE node_zfs_abd_scatter_cnt untyped +node_zfs_abd_scatter_cnt 1 +# HELP node_zfs_abd_scatter_data_size kstat.zfs.misc.abdstats.scatter_data_size +# TYPE node_zfs_abd_scatter_data_size untyped +node_zfs_abd_scatter_data_size 16384 +# HELP node_zfs_abd_scatter_order_0 kstat.zfs.misc.abdstats.scatter_order_0 +# TYPE node_zfs_abd_scatter_order_0 untyped +node_zfs_abd_scatter_order_0 0 +# HELP node_zfs_abd_scatter_order_1 kstat.zfs.misc.abdstats.scatter_order_1 +# TYPE node_zfs_abd_scatter_order_1 untyped +node_zfs_abd_scatter_order_1 0 +# HELP node_zfs_abd_scatter_order_10 kstat.zfs.misc.abdstats.scatter_order_10 +# TYPE node_zfs_abd_scatter_order_10 untyped +node_zfs_abd_scatter_order_10 0 +# HELP node_zfs_abd_scatter_order_2 kstat.zfs.misc.abdstats.scatter_order_2 +# TYPE node_zfs_abd_scatter_order_2 untyped +node_zfs_abd_scatter_order_2 1 +# HELP node_zfs_abd_scatter_order_3 kstat.zfs.misc.abdstats.scatter_order_3 +# TYPE node_zfs_abd_scatter_order_3 untyped +node_zfs_abd_scatter_order_3 0 +# HELP node_zfs_abd_scatter_order_4 kstat.zfs.misc.abdstats.scatter_order_4 +# TYPE node_zfs_abd_scatter_order_4 untyped +node_zfs_abd_scatter_order_4 0 +# HELP node_zfs_abd_scatter_order_5 kstat.zfs.misc.abdstats.scatter_order_5 +# TYPE node_zfs_abd_scatter_order_5 untyped +node_zfs_abd_scatter_order_5 0 +# HELP node_zfs_abd_scatter_order_6 kstat.zfs.misc.abdstats.scatter_order_6 +# TYPE node_zfs_abd_scatter_order_6 untyped +node_zfs_abd_scatter_order_6 0 +# HELP node_zfs_abd_scatter_order_7 kstat.zfs.misc.abdstats.scatter_order_7 +# TYPE node_zfs_abd_scatter_order_7 untyped +node_zfs_abd_scatter_order_7 0 +# HELP node_zfs_abd_scatter_order_8 kstat.zfs.misc.abdstats.scatter_order_8 +# TYPE node_zfs_abd_scatter_order_8 untyped +node_zfs_abd_scatter_order_8 0 +# HELP node_zfs_abd_scatter_order_9 kstat.zfs.misc.abdstats.scatter_order_9 +# TYPE node_zfs_abd_scatter_order_9 untyped +node_zfs_abd_scatter_order_9 0 +# HELP node_zfs_abd_scatter_page_alloc_retry kstat.zfs.misc.abdstats.scatter_page_alloc_retry +# TYPE node_zfs_abd_scatter_page_alloc_retry untyped +node_zfs_abd_scatter_page_alloc_retry 0 +# HELP node_zfs_abd_scatter_page_multi_chunk kstat.zfs.misc.abdstats.scatter_page_multi_chunk +# TYPE node_zfs_abd_scatter_page_multi_chunk untyped +node_zfs_abd_scatter_page_multi_chunk 0 +# HELP node_zfs_abd_scatter_page_multi_zone kstat.zfs.misc.abdstats.scatter_page_multi_zone +# TYPE node_zfs_abd_scatter_page_multi_zone untyped +node_zfs_abd_scatter_page_multi_zone 0 +# HELP node_zfs_abd_scatter_sg_table_retry kstat.zfs.misc.abdstats.scatter_sg_table_retry +# TYPE node_zfs_abd_scatter_sg_table_retry untyped +node_zfs_abd_scatter_sg_table_retry 0 +# HELP node_zfs_abd_struct_size kstat.zfs.misc.abdstats.struct_size +# TYPE node_zfs_abd_struct_size untyped +node_zfs_abd_struct_size 2520 # HELP node_zfs_arc_anon_evictable_data kstat.zfs.misc.arcstats.anon_evictable_data # TYPE node_zfs_arc_anon_evictable_data untyped node_zfs_arc_anon_evictable_data 0 @@ -2978,6 +3673,195 @@ node_zfs_arc_prefetch_metadata_misses 16071 # HELP node_zfs_arc_size kstat.zfs.misc.arcstats.size # TYPE node_zfs_arc_size untyped node_zfs_arc_size 1.603939792e+09 +# HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbuf_stats.dbuf_cache_count +# TYPE node_zfs_dbuf_dbuf_cache_count untyped +node_zfs_dbuf_dbuf_cache_count 27 +# HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_hiwater_bytes +# TYPE node_zfs_dbuf_dbuf_cache_hiwater_bytes untyped +node_zfs_dbuf_dbuf_cache_hiwater_bytes 6.9117804e+07 +# HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0 +# TYPE node_zfs_dbuf_dbuf_cache_level_0 untyped +node_zfs_dbuf_dbuf_cache_level_0 27 +# HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_0_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_0_bytes untyped +node_zfs_dbuf_dbuf_cache_level_0_bytes 302080 +# HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1 +# TYPE node_zfs_dbuf_dbuf_cache_level_1 untyped +node_zfs_dbuf_dbuf_cache_level_1 0 +# HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10 +# TYPE node_zfs_dbuf_dbuf_cache_level_10 untyped +node_zfs_dbuf_dbuf_cache_level_10 0 +# HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_10_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_10_bytes untyped +node_zfs_dbuf_dbuf_cache_level_10_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11 +# TYPE node_zfs_dbuf_dbuf_cache_level_11 untyped +node_zfs_dbuf_dbuf_cache_level_11 0 +# HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_11_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_11_bytes untyped +node_zfs_dbuf_dbuf_cache_level_11_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_1_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_1_bytes untyped +node_zfs_dbuf_dbuf_cache_level_1_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2 +# TYPE node_zfs_dbuf_dbuf_cache_level_2 untyped +node_zfs_dbuf_dbuf_cache_level_2 0 +# HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_2_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_2_bytes untyped +node_zfs_dbuf_dbuf_cache_level_2_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3 +# TYPE node_zfs_dbuf_dbuf_cache_level_3 untyped +node_zfs_dbuf_dbuf_cache_level_3 0 +# HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_3_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_3_bytes untyped +node_zfs_dbuf_dbuf_cache_level_3_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4 +# TYPE node_zfs_dbuf_dbuf_cache_level_4 untyped +node_zfs_dbuf_dbuf_cache_level_4 0 +# HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_4_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_4_bytes untyped +node_zfs_dbuf_dbuf_cache_level_4_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5 +# TYPE node_zfs_dbuf_dbuf_cache_level_5 untyped +node_zfs_dbuf_dbuf_cache_level_5 0 +# HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_5_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_5_bytes untyped +node_zfs_dbuf_dbuf_cache_level_5_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6 +# TYPE node_zfs_dbuf_dbuf_cache_level_6 untyped +node_zfs_dbuf_dbuf_cache_level_6 0 +# HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_6_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_6_bytes untyped +node_zfs_dbuf_dbuf_cache_level_6_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7 +# TYPE node_zfs_dbuf_dbuf_cache_level_7 untyped +node_zfs_dbuf_dbuf_cache_level_7 0 +# HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_7_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_7_bytes untyped +node_zfs_dbuf_dbuf_cache_level_7_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8 +# TYPE node_zfs_dbuf_dbuf_cache_level_8 untyped +node_zfs_dbuf_dbuf_cache_level_8 0 +# HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_8_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_8_bytes untyped +node_zfs_dbuf_dbuf_cache_level_8_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9 +# TYPE node_zfs_dbuf_dbuf_cache_level_9 untyped +node_zfs_dbuf_dbuf_cache_level_9 0 +# HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_level_9_bytes +# TYPE node_zfs_dbuf_dbuf_cache_level_9_bytes untyped +node_zfs_dbuf_dbuf_cache_level_9_bytes 0 +# HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_lowater_bytes +# TYPE node_zfs_dbuf_dbuf_cache_lowater_bytes untyped +node_zfs_dbuf_dbuf_cache_lowater_bytes 5.6550932e+07 +# HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbuf_stats.dbuf_cache_max_bytes +# TYPE node_zfs_dbuf_dbuf_cache_max_bytes untyped +node_zfs_dbuf_dbuf_cache_max_bytes 6.2834368e+07 +# HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbuf_stats.dbuf_cache_size +# TYPE node_zfs_dbuf_dbuf_cache_size untyped +node_zfs_dbuf_dbuf_cache_size 302080 +# HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbuf_stats.dbuf_cache_size_max +# TYPE node_zfs_dbuf_dbuf_cache_size_max untyped +node_zfs_dbuf_dbuf_cache_size_max 394240 +# HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbuf_stats.dbuf_cache_total_evicts +# TYPE node_zfs_dbuf_dbuf_cache_total_evicts untyped +node_zfs_dbuf_dbuf_cache_total_evicts 0 +# HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbuf_stats.hash_chain_max +# TYPE node_zfs_dbuf_hash_chain_max untyped +node_zfs_dbuf_hash_chain_max 0 +# HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbuf_stats.hash_chains +# TYPE node_zfs_dbuf_hash_chains untyped +node_zfs_dbuf_hash_chains 0 +# HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbuf_stats.hash_collisions +# TYPE node_zfs_dbuf_hash_collisions untyped +node_zfs_dbuf_hash_collisions 0 +# HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0 +# TYPE node_zfs_dbuf_hash_dbuf_level_0 untyped +node_zfs_dbuf_hash_dbuf_level_0 37 +# HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_0_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_0_bytes untyped +node_zfs_dbuf_hash_dbuf_level_0_bytes 465920 +# HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1 +# TYPE node_zfs_dbuf_hash_dbuf_level_1 untyped +node_zfs_dbuf_hash_dbuf_level_1 10 +# HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10 +# TYPE node_zfs_dbuf_hash_dbuf_level_10 untyped +node_zfs_dbuf_hash_dbuf_level_10 0 +# HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_10_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_10_bytes untyped +node_zfs_dbuf_hash_dbuf_level_10_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11 +# TYPE node_zfs_dbuf_hash_dbuf_level_11 untyped +node_zfs_dbuf_hash_dbuf_level_11 0 +# HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_11_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_11_bytes untyped +node_zfs_dbuf_hash_dbuf_level_11_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_1_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_1_bytes untyped +node_zfs_dbuf_hash_dbuf_level_1_bytes 1.31072e+06 +# HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2 +# TYPE node_zfs_dbuf_hash_dbuf_level_2 untyped +node_zfs_dbuf_hash_dbuf_level_2 2 +# HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_2_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_2_bytes untyped +node_zfs_dbuf_hash_dbuf_level_2_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3 +# TYPE node_zfs_dbuf_hash_dbuf_level_3 untyped +node_zfs_dbuf_hash_dbuf_level_3 2 +# HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_3_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_3_bytes untyped +node_zfs_dbuf_hash_dbuf_level_3_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4 +# TYPE node_zfs_dbuf_hash_dbuf_level_4 untyped +node_zfs_dbuf_hash_dbuf_level_4 2 +# HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_4_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_4_bytes untyped +node_zfs_dbuf_hash_dbuf_level_4_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5 +# TYPE node_zfs_dbuf_hash_dbuf_level_5 untyped +node_zfs_dbuf_hash_dbuf_level_5 2 +# HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_5_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_5_bytes untyped +node_zfs_dbuf_hash_dbuf_level_5_bytes 262144 +# HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6 +# TYPE node_zfs_dbuf_hash_dbuf_level_6 untyped +node_zfs_dbuf_hash_dbuf_level_6 0 +# HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_6_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_6_bytes untyped +node_zfs_dbuf_hash_dbuf_level_6_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7 +# TYPE node_zfs_dbuf_hash_dbuf_level_7 untyped +node_zfs_dbuf_hash_dbuf_level_7 0 +# HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_7_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_7_bytes untyped +node_zfs_dbuf_hash_dbuf_level_7_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8 +# TYPE node_zfs_dbuf_hash_dbuf_level_8 untyped +node_zfs_dbuf_hash_dbuf_level_8 0 +# HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_8_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_8_bytes untyped +node_zfs_dbuf_hash_dbuf_level_8_bytes 0 +# HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9 +# TYPE node_zfs_dbuf_hash_dbuf_level_9 untyped +node_zfs_dbuf_hash_dbuf_level_9 0 +# HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbuf_stats.hash_dbuf_level_9_bytes +# TYPE node_zfs_dbuf_hash_dbuf_level_9_bytes untyped +node_zfs_dbuf_hash_dbuf_level_9_bytes 0 +# HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbuf_stats.hash_elements +# TYPE node_zfs_dbuf_hash_elements untyped +node_zfs_dbuf_hash_elements 55 +# HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbuf_stats.hash_elements_max +# TYPE node_zfs_dbuf_hash_elements_max untyped +node_zfs_dbuf_hash_elements_max 55 +# HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbuf_stats.hash_hits +# TYPE node_zfs_dbuf_hash_hits untyped +node_zfs_dbuf_hash_hits 108807 +# HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbuf_stats.hash_insert_race +# TYPE node_zfs_dbuf_hash_insert_race untyped +node_zfs_dbuf_hash_insert_race 0 +# HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbuf_stats.hash_misses +# TYPE node_zfs_dbuf_hash_misses untyped +node_zfs_dbuf_hash_misses 1851 # HELP node_zfs_dmu_tx_dmu_tx_assigned kstat.zfs.misc.dmu_tx.dmu_tx_assigned # TYPE node_zfs_dmu_tx_dmu_tx_assigned untyped node_zfs_dmu_tx_dmu_tx_assigned 3.532844e+06 @@ -3011,6 +3895,90 @@ node_zfs_dmu_tx_dmu_tx_quota 0 # HELP node_zfs_dmu_tx_dmu_tx_suspended kstat.zfs.misc.dmu_tx.dmu_tx_suspended # TYPE node_zfs_dmu_tx_dmu_tx_suspended untyped node_zfs_dmu_tx_dmu_tx_suspended 0 +# HELP node_zfs_dnode_dnode_alloc_next_block kstat.zfs.misc.dnodestats.dnode_alloc_next_block +# TYPE node_zfs_dnode_dnode_alloc_next_block untyped +node_zfs_dnode_dnode_alloc_next_block 0 +# HELP node_zfs_dnode_dnode_alloc_next_chunk kstat.zfs.misc.dnodestats.dnode_alloc_next_chunk +# TYPE node_zfs_dnode_dnode_alloc_next_chunk untyped +node_zfs_dnode_dnode_alloc_next_chunk 0 +# HELP node_zfs_dnode_dnode_alloc_race kstat.zfs.misc.dnodestats.dnode_alloc_race +# TYPE node_zfs_dnode_dnode_alloc_race untyped +node_zfs_dnode_dnode_alloc_race 0 +# HELP node_zfs_dnode_dnode_allocate kstat.zfs.misc.dnodestats.dnode_allocate +# TYPE node_zfs_dnode_dnode_allocate untyped +node_zfs_dnode_dnode_allocate 0 +# HELP node_zfs_dnode_dnode_buf_evict kstat.zfs.misc.dnodestats.dnode_buf_evict +# TYPE node_zfs_dnode_dnode_buf_evict untyped +node_zfs_dnode_dnode_buf_evict 17 +# HELP node_zfs_dnode_dnode_hold_alloc_hits kstat.zfs.misc.dnodestats.dnode_hold_alloc_hits +# TYPE node_zfs_dnode_dnode_hold_alloc_hits untyped +node_zfs_dnode_dnode_hold_alloc_hits 37617 +# HELP node_zfs_dnode_dnode_hold_alloc_interior kstat.zfs.misc.dnodestats.dnode_hold_alloc_interior +# TYPE node_zfs_dnode_dnode_hold_alloc_interior untyped +node_zfs_dnode_dnode_hold_alloc_interior 0 +# HELP node_zfs_dnode_dnode_hold_alloc_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_misses +# TYPE node_zfs_dnode_dnode_hold_alloc_lock_misses untyped +node_zfs_dnode_dnode_hold_alloc_lock_misses 0 +# HELP node_zfs_dnode_dnode_hold_alloc_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_retry +# TYPE node_zfs_dnode_dnode_hold_alloc_lock_retry untyped +node_zfs_dnode_dnode_hold_alloc_lock_retry 0 +# HELP node_zfs_dnode_dnode_hold_alloc_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_misses +# TYPE node_zfs_dnode_dnode_hold_alloc_misses untyped +node_zfs_dnode_dnode_hold_alloc_misses 0 +# HELP node_zfs_dnode_dnode_hold_alloc_type_none kstat.zfs.misc.dnodestats.dnode_hold_alloc_type_none +# TYPE node_zfs_dnode_dnode_hold_alloc_type_none untyped +node_zfs_dnode_dnode_hold_alloc_type_none 0 +# HELP node_zfs_dnode_dnode_hold_dbuf_hold kstat.zfs.misc.dnodestats.dnode_hold_dbuf_hold +# TYPE node_zfs_dnode_dnode_hold_dbuf_hold untyped +node_zfs_dnode_dnode_hold_dbuf_hold 0 +# HELP node_zfs_dnode_dnode_hold_dbuf_read kstat.zfs.misc.dnodestats.dnode_hold_dbuf_read +# TYPE node_zfs_dnode_dnode_hold_dbuf_read untyped +node_zfs_dnode_dnode_hold_dbuf_read 0 +# HELP node_zfs_dnode_dnode_hold_free_hits kstat.zfs.misc.dnodestats.dnode_hold_free_hits +# TYPE node_zfs_dnode_dnode_hold_free_hits untyped +node_zfs_dnode_dnode_hold_free_hits 0 +# HELP node_zfs_dnode_dnode_hold_free_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_free_lock_misses +# TYPE node_zfs_dnode_dnode_hold_free_lock_misses untyped +node_zfs_dnode_dnode_hold_free_lock_misses 0 +# HELP node_zfs_dnode_dnode_hold_free_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_free_lock_retry +# TYPE node_zfs_dnode_dnode_hold_free_lock_retry untyped +node_zfs_dnode_dnode_hold_free_lock_retry 0 +# HELP node_zfs_dnode_dnode_hold_free_misses kstat.zfs.misc.dnodestats.dnode_hold_free_misses +# TYPE node_zfs_dnode_dnode_hold_free_misses untyped +node_zfs_dnode_dnode_hold_free_misses 0 +# HELP node_zfs_dnode_dnode_hold_free_overflow kstat.zfs.misc.dnodestats.dnode_hold_free_overflow +# TYPE node_zfs_dnode_dnode_hold_free_overflow untyped +node_zfs_dnode_dnode_hold_free_overflow 0 +# HELP node_zfs_dnode_dnode_hold_free_refcount kstat.zfs.misc.dnodestats.dnode_hold_free_refcount +# TYPE node_zfs_dnode_dnode_hold_free_refcount untyped +node_zfs_dnode_dnode_hold_free_refcount 0 +# HELP node_zfs_dnode_dnode_hold_free_txg kstat.zfs.misc.dnodestats.dnode_hold_free_txg +# TYPE node_zfs_dnode_dnode_hold_free_txg untyped +node_zfs_dnode_dnode_hold_free_txg 0 +# HELP node_zfs_dnode_dnode_move_active kstat.zfs.misc.dnodestats.dnode_move_active +# TYPE node_zfs_dnode_dnode_move_active untyped +node_zfs_dnode_dnode_move_active 0 +# HELP node_zfs_dnode_dnode_move_handle kstat.zfs.misc.dnodestats.dnode_move_handle +# TYPE node_zfs_dnode_dnode_move_handle untyped +node_zfs_dnode_dnode_move_handle 0 +# HELP node_zfs_dnode_dnode_move_invalid kstat.zfs.misc.dnodestats.dnode_move_invalid +# TYPE node_zfs_dnode_dnode_move_invalid untyped +node_zfs_dnode_dnode_move_invalid 0 +# HELP node_zfs_dnode_dnode_move_recheck1 kstat.zfs.misc.dnodestats.dnode_move_recheck1 +# TYPE node_zfs_dnode_dnode_move_recheck1 untyped +node_zfs_dnode_dnode_move_recheck1 0 +# HELP node_zfs_dnode_dnode_move_recheck2 kstat.zfs.misc.dnodestats.dnode_move_recheck2 +# TYPE node_zfs_dnode_dnode_move_recheck2 untyped +node_zfs_dnode_dnode_move_recheck2 0 +# HELP node_zfs_dnode_dnode_move_rwlock kstat.zfs.misc.dnodestats.dnode_move_rwlock +# TYPE node_zfs_dnode_dnode_move_rwlock untyped +node_zfs_dnode_dnode_move_rwlock 0 +# HELP node_zfs_dnode_dnode_move_special kstat.zfs.misc.dnodestats.dnode_move_special +# TYPE node_zfs_dnode_dnode_move_special untyped +node_zfs_dnode_dnode_move_special 0 +# HELP node_zfs_dnode_dnode_reallocate kstat.zfs.misc.dnodestats.dnode_reallocate +# TYPE node_zfs_dnode_dnode_reallocate untyped +node_zfs_dnode_dnode_reallocate 0 # HELP node_zfs_fm_erpt_dropped kstat.zfs.misc.fm.erpt-dropped # TYPE node_zfs_fm_erpt_dropped untyped node_zfs_fm_erpt_dropped 18 @@ -3032,6 +4000,27 @@ node_zfs_vdev_cache_hits 0 # HELP node_zfs_vdev_cache_misses kstat.zfs.misc.vdev_cache_stats.misses # TYPE node_zfs_vdev_cache_misses untyped node_zfs_vdev_cache_misses 0 +# HELP node_zfs_vdev_mirror_non_rotating_linear kstat.zfs.misc.vdev_mirror_stats.non_rotating_linear +# TYPE node_zfs_vdev_mirror_non_rotating_linear untyped +node_zfs_vdev_mirror_non_rotating_linear 0 +# HELP node_zfs_vdev_mirror_non_rotating_seek kstat.zfs.misc.vdev_mirror_stats.non_rotating_seek +# TYPE node_zfs_vdev_mirror_non_rotating_seek untyped +node_zfs_vdev_mirror_non_rotating_seek 0 +# HELP node_zfs_vdev_mirror_preferred_found kstat.zfs.misc.vdev_mirror_stats.preferred_found +# TYPE node_zfs_vdev_mirror_preferred_found untyped +node_zfs_vdev_mirror_preferred_found 0 +# HELP node_zfs_vdev_mirror_preferred_not_found kstat.zfs.misc.vdev_mirror_stats.preferred_not_found +# TYPE node_zfs_vdev_mirror_preferred_not_found untyped +node_zfs_vdev_mirror_preferred_not_found 94 +# HELP node_zfs_vdev_mirror_rotating_linear kstat.zfs.misc.vdev_mirror_stats.rotating_linear +# TYPE node_zfs_vdev_mirror_rotating_linear untyped +node_zfs_vdev_mirror_rotating_linear 0 +# HELP node_zfs_vdev_mirror_rotating_offset kstat.zfs.misc.vdev_mirror_stats.rotating_offset +# TYPE node_zfs_vdev_mirror_rotating_offset untyped +node_zfs_vdev_mirror_rotating_offset 0 +# HELP node_zfs_vdev_mirror_rotating_seek kstat.zfs.misc.vdev_mirror_stats.rotating_seek +# TYPE node_zfs_vdev_mirror_rotating_seek untyped +node_zfs_vdev_mirror_rotating_seek 0 # HELP node_zfs_xuio_onloan_read_buf kstat.zfs.misc.xuio_stats.onloan_read_buf # TYPE node_zfs_xuio_onloan_read_buf untyped node_zfs_xuio_onloan_read_buf 32 @@ -3118,10 +4107,46 @@ node_zfs_zil_zil_itx_metaslab_slog_bytes 0 node_zfs_zil_zil_itx_metaslab_slog_count 0 # HELP node_zfs_zil_zil_itx_needcopy_bytes kstat.zfs.misc.zil.zil_itx_needcopy_bytes # TYPE node_zfs_zil_zil_itx_needcopy_bytes untyped -node_zfs_zil_zil_itx_needcopy_bytes 0 +node_zfs_zil_zil_itx_needcopy_bytes 1.8446744073709537e+19 # HELP node_zfs_zil_zil_itx_needcopy_count kstat.zfs.misc.zil.zil_itx_needcopy_count # TYPE node_zfs_zil_zil_itx_needcopy_count untyped node_zfs_zil_zil_itx_needcopy_count 0 +# HELP node_zfs_zpool_dataset_nread kstat.zfs.misc.objset.nread +# TYPE node_zfs_zpool_dataset_nread untyped +node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28 +node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28 +# HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked +# TYPE node_zfs_zpool_dataset_nunlinked untyped +node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14 +# HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks +# TYPE node_zfs_zpool_dataset_nunlinks untyped +node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3 +node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14 +# HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten +# TYPE node_zfs_zpool_dataset_nwritten untyped +node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302 +node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806 +# HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads +# TYPE node_zfs_zpool_dataset_reads untyped +node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2 +node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2 +# HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes +# TYPE node_zfs_zpool_dataset_writes untyped +node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0 +node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4 +node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0 +node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10 # HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread # TYPE node_zfs_zpool_nread untyped node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06 @@ -3150,6 +4175,20 @@ node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09 # TYPE node_zfs_zpool_rupdate untyped node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13 node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14 +# HELP node_zfs_zpool_state kstat.zfs.misc.state +# TYPE node_zfs_zpool_state gauge +node_zfs_zpool_state{state="degraded",zpool="pool1"} 0 +node_zfs_zpool_state{state="degraded",zpool="poolz1"} 1 +node_zfs_zpool_state{state="faulted",zpool="pool1"} 0 +node_zfs_zpool_state{state="faulted",zpool="poolz1"} 0 +node_zfs_zpool_state{state="offline",zpool="pool1"} 0 +node_zfs_zpool_state{state="offline",zpool="poolz1"} 0 +node_zfs_zpool_state{state="online",zpool="pool1"} 1 +node_zfs_zpool_state{state="online",zpool="poolz1"} 0 +node_zfs_zpool_state{state="removed",zpool="pool1"} 0 +node_zfs_zpool_state{state="removed",zpool="poolz1"} 0 +node_zfs_zpool_state{state="unavail",zpool="pool1"} 0 +node_zfs_zpool_state{state="unavail",zpool="poolz1"} 0 # HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt # TYPE node_zfs_zpool_wcnt untyped node_zfs_zpool_wcnt{zpool="pool1"} 0 @@ -3170,6 +4209,177 @@ node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09 # TYPE node_zfs_zpool_wupdate untyped node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13 node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 +# HELP node_zoneinfo_high_pages Zone watermark pages_high +# TYPE node_zoneinfo_high_pages gauge +node_zoneinfo_high_pages{node="0",zone="DMA"} 14 +node_zoneinfo_high_pages{node="0",zone="DMA32"} 2122 +node_zoneinfo_high_pages{node="0",zone="Device"} 0 +node_zoneinfo_high_pages{node="0",zone="Movable"} 0 +node_zoneinfo_high_pages{node="0",zone="Normal"} 31113 +# HELP node_zoneinfo_low_pages Zone watermark pages_low +# TYPE node_zoneinfo_low_pages gauge +node_zoneinfo_low_pages{node="0",zone="DMA"} 11 +node_zoneinfo_low_pages{node="0",zone="DMA32"} 1600 +node_zoneinfo_low_pages{node="0",zone="Device"} 0 +node_zoneinfo_low_pages{node="0",zone="Movable"} 0 +node_zoneinfo_low_pages{node="0",zone="Normal"} 23461 +# HELP node_zoneinfo_managed_pages Present pages managed by the buddy system +# TYPE node_zoneinfo_managed_pages gauge +node_zoneinfo_managed_pages{node="0",zone="DMA"} 3973 +node_zoneinfo_managed_pages{node="0",zone="DMA32"} 530339 +node_zoneinfo_managed_pages{node="0",zone="Device"} 0 +node_zoneinfo_managed_pages{node="0",zone="Movable"} 0 +node_zoneinfo_managed_pages{node="0",zone="Normal"} 7.654794e+06 +# HELP node_zoneinfo_min_pages Zone watermark pages_min +# TYPE node_zoneinfo_min_pages gauge +node_zoneinfo_min_pages{node="0",zone="DMA"} 8 +node_zoneinfo_min_pages{node="0",zone="DMA32"} 1078 +node_zoneinfo_min_pages{node="0",zone="Device"} 0 +node_zoneinfo_min_pages{node="0",zone="Movable"} 0 +node_zoneinfo_min_pages{node="0",zone="Normal"} 15809 +# HELP node_zoneinfo_nr_active_anon_pages Number of anonymous pages recently more used +# TYPE node_zoneinfo_nr_active_anon_pages gauge +node_zoneinfo_nr_active_anon_pages{node="0",zone="DMA"} 1.175853e+06 +# HELP node_zoneinfo_nr_active_file_pages Number of active pages with file-backing +# TYPE node_zoneinfo_nr_active_file_pages gauge +node_zoneinfo_nr_active_file_pages{node="0",zone="DMA"} 688810 +# HELP node_zoneinfo_nr_anon_pages Number of anonymous pages currently used by the system +# TYPE node_zoneinfo_nr_anon_pages gauge +node_zoneinfo_nr_anon_pages{node="0",zone="DMA"} 1.156608e+06 +# HELP node_zoneinfo_nr_anon_transparent_hugepages Number of anonymous transparent huge pages currently used by the system +# TYPE node_zoneinfo_nr_anon_transparent_hugepages gauge +node_zoneinfo_nr_anon_transparent_hugepages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_dirtied_total Page dirtyings since bootup +# TYPE node_zoneinfo_nr_dirtied_total counter +node_zoneinfo_nr_dirtied_total{node="0",zone="DMA"} 1.189097e+06 +# HELP node_zoneinfo_nr_dirty_pages Number of dirty pages +# TYPE node_zoneinfo_nr_dirty_pages gauge +node_zoneinfo_nr_dirty_pages{node="0",zone="DMA"} 103 +# HELP node_zoneinfo_nr_file_pages Number of file pages +# TYPE node_zoneinfo_nr_file_pages gauge +node_zoneinfo_nr_file_pages{node="0",zone="DMA"} 1.740118e+06 +# HELP node_zoneinfo_nr_free_pages Total number of free pages in the zone +# TYPE node_zoneinfo_nr_free_pages gauge +node_zoneinfo_nr_free_pages{node="0",zone="DMA"} 2949 +node_zoneinfo_nr_free_pages{node="0",zone="DMA32"} 528427 +node_zoneinfo_nr_free_pages{node="0",zone="Normal"} 4.539739e+06 +# HELP node_zoneinfo_nr_inactive_anon_pages Number of anonymous pages recently less used +# TYPE node_zoneinfo_nr_inactive_anon_pages gauge +node_zoneinfo_nr_inactive_anon_pages{node="0",zone="DMA"} 95612 +# HELP node_zoneinfo_nr_inactive_file_pages Number of inactive pages with file-backing +# TYPE node_zoneinfo_nr_inactive_file_pages gauge +node_zoneinfo_nr_inactive_file_pages{node="0",zone="DMA"} 723339 +# HELP node_zoneinfo_nr_isolated_anon_pages Temporary isolated pages from anon lru +# TYPE node_zoneinfo_nr_isolated_anon_pages gauge +node_zoneinfo_nr_isolated_anon_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_isolated_file_pages Temporary isolated pages from file lru +# TYPE node_zoneinfo_nr_isolated_file_pages gauge +node_zoneinfo_nr_isolated_file_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_kernel_stacks Number of kernel stacks +# TYPE node_zoneinfo_nr_kernel_stacks gauge +node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA"} 0 +node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA32"} 0 +node_zoneinfo_nr_kernel_stacks{node="0",zone="Normal"} 18864 +# HELP node_zoneinfo_nr_mapped_pages Number of mapped pages +# TYPE node_zoneinfo_nr_mapped_pages gauge +node_zoneinfo_nr_mapped_pages{node="0",zone="DMA"} 423143 +# HELP node_zoneinfo_nr_shmem_pages Number of shmem pages (included tmpfs/GEM pages) +# TYPE node_zoneinfo_nr_shmem_pages gauge +node_zoneinfo_nr_shmem_pages{node="0",zone="DMA"} 330517 +# HELP node_zoneinfo_nr_slab_reclaimable_pages Number of reclaimable slab pages +# TYPE node_zoneinfo_nr_slab_reclaimable_pages gauge +node_zoneinfo_nr_slab_reclaimable_pages{node="0",zone="DMA"} 121763 +# HELP node_zoneinfo_nr_slab_unreclaimable_pages Number of unreclaimable slab pages +# TYPE node_zoneinfo_nr_slab_unreclaimable_pages gauge +node_zoneinfo_nr_slab_unreclaimable_pages{node="0",zone="DMA"} 56182 +# HELP node_zoneinfo_nr_unevictable_pages Number of unevictable pages +# TYPE node_zoneinfo_nr_unevictable_pages gauge +node_zoneinfo_nr_unevictable_pages{node="0",zone="DMA"} 213111 +# HELP node_zoneinfo_nr_writeback_pages Number of writeback pages +# TYPE node_zoneinfo_nr_writeback_pages gauge +node_zoneinfo_nr_writeback_pages{node="0",zone="DMA"} 0 +# HELP node_zoneinfo_nr_written_total Page writings since bootup +# TYPE node_zoneinfo_nr_written_total counter +node_zoneinfo_nr_written_total{node="0",zone="DMA"} 1.181554e+06 +# HELP node_zoneinfo_numa_foreign_total Was intended here, hit elsewhere +# TYPE node_zoneinfo_numa_foreign_total counter +node_zoneinfo_numa_foreign_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_foreign_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_foreign_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_numa_hit_total Allocated in intended node +# TYPE node_zoneinfo_numa_hit_total counter +node_zoneinfo_numa_hit_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_hit_total{node="0",zone="DMA32"} 13 +node_zoneinfo_numa_hit_total{node="0",zone="Normal"} 6.2836441e+07 +# HELP node_zoneinfo_numa_interleave_total Interleaver preferred this zone +# TYPE node_zoneinfo_numa_interleave_total counter +node_zoneinfo_numa_interleave_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_interleave_total{node="0",zone="DMA32"} 1 +node_zoneinfo_numa_interleave_total{node="0",zone="Normal"} 23174 +# HELP node_zoneinfo_numa_local_total Allocation from local node +# TYPE node_zoneinfo_numa_local_total counter +node_zoneinfo_numa_local_total{node="0",zone="DMA"} 1 +node_zoneinfo_numa_local_total{node="0",zone="DMA32"} 13 +node_zoneinfo_numa_local_total{node="0",zone="Normal"} 6.2836441e+07 +# HELP node_zoneinfo_numa_miss_total Allocated in non intended node +# TYPE node_zoneinfo_numa_miss_total counter +node_zoneinfo_numa_miss_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_miss_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_miss_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_numa_other_total Allocation from other node +# TYPE node_zoneinfo_numa_other_total counter +node_zoneinfo_numa_other_total{node="0",zone="DMA"} 0 +node_zoneinfo_numa_other_total{node="0",zone="DMA32"} 0 +node_zoneinfo_numa_other_total{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_present_pages Physical pages existing within the zone +# TYPE node_zoneinfo_present_pages gauge +node_zoneinfo_present_pages{node="0",zone="DMA"} 3997 +node_zoneinfo_present_pages{node="0",zone="DMA32"} 546847 +node_zoneinfo_present_pages{node="0",zone="Device"} 0 +node_zoneinfo_present_pages{node="0",zone="Movable"} 0 +node_zoneinfo_present_pages{node="0",zone="Normal"} 7.806976e+06 +# HELP node_zoneinfo_protection_0 Protection array 0. field +# TYPE node_zoneinfo_protection_0 gauge +node_zoneinfo_protection_0{node="0",zone="DMA"} 0 +node_zoneinfo_protection_0{node="0",zone="DMA32"} 0 +node_zoneinfo_protection_0{node="0",zone="Device"} 0 +node_zoneinfo_protection_0{node="0",zone="Movable"} 0 +node_zoneinfo_protection_0{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_1 Protection array 1. field +# TYPE node_zoneinfo_protection_1 gauge +node_zoneinfo_protection_1{node="0",zone="DMA"} 2039 +node_zoneinfo_protection_1{node="0",zone="DMA32"} 0 +node_zoneinfo_protection_1{node="0",zone="Device"} 0 +node_zoneinfo_protection_1{node="0",zone="Movable"} 0 +node_zoneinfo_protection_1{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_2 Protection array 2. field +# TYPE node_zoneinfo_protection_2 gauge +node_zoneinfo_protection_2{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_2{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_2{node="0",zone="Device"} 0 +node_zoneinfo_protection_2{node="0",zone="Movable"} 0 +node_zoneinfo_protection_2{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_3 Protection array 3. field +# TYPE node_zoneinfo_protection_3 gauge +node_zoneinfo_protection_3{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_3{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_3{node="0",zone="Device"} 0 +node_zoneinfo_protection_3{node="0",zone="Movable"} 0 +node_zoneinfo_protection_3{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_protection_4 Protection array 4. field +# TYPE node_zoneinfo_protection_4 gauge +node_zoneinfo_protection_4{node="0",zone="DMA"} 31932 +node_zoneinfo_protection_4{node="0",zone="DMA32"} 29893 +node_zoneinfo_protection_4{node="0",zone="Device"} 0 +node_zoneinfo_protection_4{node="0",zone="Movable"} 0 +node_zoneinfo_protection_4{node="0",zone="Normal"} 0 +# HELP node_zoneinfo_spanned_pages Total pages spanned by the zone, including holes +# TYPE node_zoneinfo_spanned_pages gauge +node_zoneinfo_spanned_pages{node="0",zone="DMA"} 4095 +node_zoneinfo_spanned_pages{node="0",zone="DMA32"} 1.04448e+06 +node_zoneinfo_spanned_pages{node="0",zone="Device"} 0 +node_zoneinfo_spanned_pages{node="0",zone="Movable"} 0 +node_zoneinfo_spanned_pages{node="0",zone="Normal"} 7.806976e+06 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter # HELP process_max_fds Maximum number of open file descriptors. @@ -3182,6 +4392,20 @@ node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 # TYPE process_start_time_seconds gauge # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 # HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom # TYPE testmetric1_1 untyped testmetric1_1{foo="bar"} 10 @@ -3190,7 +4414,7 @@ testmetric1_1{foo="bar"} 10 testmetric1_2{foo="baz"} 20 # HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_1 untyped -testmetric2_1{foo="bar"} 30 1441205977284 +testmetric2_1{foo="bar"} 30 # HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_2 untyped -testmetric2_2{foo="baz"} 40 1441205977284 +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/ethtool/bond0/statistics b/collector/fixtures/ethtool/bond0/statistics new file mode 100644 index 0000000000..42e4a141d6 --- /dev/null +++ b/collector/fixtures/ethtool/bond0/statistics @@ -0,0 +1 @@ +ERROR: 1 \ No newline at end of file diff --git a/collector/fixtures/ethtool/eth0/driver b/collector/fixtures/ethtool/eth0/driver new file mode 100644 index 0000000000..7ec84a81ed --- /dev/null +++ b/collector/fixtures/ethtool/eth0/driver @@ -0,0 +1,11 @@ +# ethtool -i eth0 +driver: e1000e +version: 5.11.0-22-generic +firmware-version: 0.5-4 +expansion-rom-version: +bus-info: 0000:00:1f.6 +supports-statistics: yes +supports-test: yes +supports-eeprom-access: yes +supports-register-dump: yes +supports-priv-flags: yes diff --git a/collector/fixtures/ethtool/eth0/settings b/collector/fixtures/ethtool/eth0/settings new file mode 100644 index 0000000000..47989a816e --- /dev/null +++ b/collector/fixtures/ethtool/eth0/settings @@ -0,0 +1,27 @@ +# ethtool eth0 +Settings for eth0: + Supported ports: [ TP MII ] + Supported link modes: 10baseT/Half 10baseT/Full + 100baseT/Half 100baseT/Full + 1000baseT/Full 10000baseT/Full + Supported pause frame use: Symmetric + Supports auto-negotiation: Yes + Supported FEC modes: Not reported + Advertised link modes: 10baseT/Half 10baseT/Full + 100baseT/Half 100baseT/Full + 1000baseT/Full + Advertised pause frame use: Symmetric + Advertised auto-negotiation: Yes + Advertised FEC modes: Not reported + Speed: 1000Mb/s + Duplex: Full + Auto-negotiation: on + Port: Twisted Pair + PHYAD: 1 + Transceiver: internal + MDI-X: off (auto) +netlink error: Operation not permitted + Current message level: 0x00000007 (7) + drv probe link + Link detected: yes + diff --git a/collector/fixtures/ethtool/eth0/statistics b/collector/fixtures/ethtool/eth0/statistics new file mode 100644 index 0000000000..80423bd6c3 --- /dev/null +++ b/collector/fixtures/ethtool/eth0/statistics @@ -0,0 +1,17 @@ +# ethtool -S eth0 +NIC statistics: + tx_packets: 961500 + rx_packets: 1260062 + tx_errors: 0 + rx_errors: 0 + rx_missed: 401 + align_errors: 0 + tx_single_collisions: 0 + tx_multi_collisions: 0 + rx_unicast: 1230297 + rx_broadcast: 5792 + rx_multicast: 23973 + tx_aborted: 0 + tx_underrun: 0 + duplicate metric: 1 + duplicate_metric: 2 diff --git a/collector/fixtures/ip_vs_result.txt b/collector/fixtures/ip_vs_result.txt index 2202dc8a5c..2d141bae64 100644 --- a/collector/fixtures/ip_vs_result.txt +++ b/collector/fixtures/ip_vs_result.txt @@ -1,33 +1,39 @@ # HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge -node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 -node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 -node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 -node_ipvs_backend_connections_active{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 -node_ipvs_backend_connections_active{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 -node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 -node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 -node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 321 +node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 64 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 +node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge -node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 -node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 -node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 -node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 -node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 5 +node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 1 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 +node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge -node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.55",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 -node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 -node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 +node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 20 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 diff --git a/collector/fixtures/ip_vs_result_lbs_local_address_local_port.txt b/collector/fixtures/ip_vs_result_lbs_local_address_local_port.txt new file mode 100644 index 0000000000..61c77c7e39 --- /dev/null +++ b/collector/fixtures/ip_vs_result_lbs_local_address_local_port.txt @@ -0,0 +1,33 @@ +# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. +# TYPE node_ipvs_backend_connections_active gauge +node_ipvs_backend_connections_active{local_address="",local_port="0"} 385 +node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306"} 744 +node_ipvs_backend_connections_active{local_address="192.168.0.55",local_port="3306"} 0 +node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306"} 2997 +# HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. +# TYPE node_ipvs_backend_connections_inactive gauge +node_ipvs_backend_connections_inactive{local_address="",local_port="0"} 6 +node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306"} 5 +node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_port="3306"} 0 +node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306"} 0 +# HELP node_ipvs_backend_weight The current backend weight by local and remote address. +# TYPE node_ipvs_backend_weight gauge +node_ipvs_backend_weight{local_address="",local_port="0"} 120 +node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306"} 300 +node_ipvs_backend_weight{local_address="192.168.0.55",local_port="3306"} 100 +node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306"} 200 +# HELP node_ipvs_connections_total The total number of connections made. +# TYPE node_ipvs_connections_total counter +node_ipvs_connections_total 2.3765872e+07 +# HELP node_ipvs_incoming_bytes_total The total amount of incoming data. +# TYPE node_ipvs_incoming_bytes_total counter +node_ipvs_incoming_bytes_total 8.9991519156915e+13 +# HELP node_ipvs_incoming_packets_total The total number of incoming packets. +# TYPE node_ipvs_incoming_packets_total counter +node_ipvs_incoming_packets_total 3.811989221e+09 +# HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. +# TYPE node_ipvs_outgoing_bytes_total counter +node_ipvs_outgoing_bytes_total 0 +# HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. +# TYPE node_ipvs_outgoing_packets_total counter +node_ipvs_outgoing_packets_total 0 diff --git a/collector/fixtures/ip_vs_result_lbs_local_port.txt b/collector/fixtures/ip_vs_result_lbs_local_port.txt new file mode 100644 index 0000000000..ef931ffbec --- /dev/null +++ b/collector/fixtures/ip_vs_result_lbs_local_port.txt @@ -0,0 +1,27 @@ +# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. +# TYPE node_ipvs_backend_connections_active gauge +node_ipvs_backend_connections_active{local_port="0"} 385 +node_ipvs_backend_connections_active{local_port="3306"} 3741 +# HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. +# TYPE node_ipvs_backend_connections_inactive gauge +node_ipvs_backend_connections_inactive{local_port="0"} 6 +node_ipvs_backend_connections_inactive{local_port="3306"} 5 +# HELP node_ipvs_backend_weight The current backend weight by local and remote address. +# TYPE node_ipvs_backend_weight gauge +node_ipvs_backend_weight{local_port="0"} 120 +node_ipvs_backend_weight{local_port="3306"} 600 +# HELP node_ipvs_connections_total The total number of connections made. +# TYPE node_ipvs_connections_total counter +node_ipvs_connections_total 2.3765872e+07 +# HELP node_ipvs_incoming_bytes_total The total amount of incoming data. +# TYPE node_ipvs_incoming_bytes_total counter +node_ipvs_incoming_bytes_total 8.9991519156915e+13 +# HELP node_ipvs_incoming_packets_total The total number of incoming packets. +# TYPE node_ipvs_incoming_packets_total counter +node_ipvs_incoming_packets_total 3.811989221e+09 +# HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. +# TYPE node_ipvs_outgoing_bytes_total counter +node_ipvs_outgoing_bytes_total 0 +# HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. +# TYPE node_ipvs_outgoing_packets_total counter +node_ipvs_outgoing_packets_total 0 diff --git a/collector/fixtures/ip_vs_result_lbs_none.txt b/collector/fixtures/ip_vs_result_lbs_none.txt new file mode 100644 index 0000000000..621aa54776 --- /dev/null +++ b/collector/fixtures/ip_vs_result_lbs_none.txt @@ -0,0 +1,24 @@ +# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. +# TYPE node_ipvs_backend_connections_active gauge +node_ipvs_backend_connections_active 4126 +# HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. +# TYPE node_ipvs_backend_connections_inactive gauge +node_ipvs_backend_connections_inactive 11 +# HELP node_ipvs_backend_weight The current backend weight by local and remote address. +# TYPE node_ipvs_backend_weight gauge +node_ipvs_backend_weight 720 +# HELP node_ipvs_connections_total The total number of connections made. +# TYPE node_ipvs_connections_total counter +node_ipvs_connections_total 2.3765872e+07 +# HELP node_ipvs_incoming_bytes_total The total amount of incoming data. +# TYPE node_ipvs_incoming_bytes_total counter +node_ipvs_incoming_bytes_total 8.9991519156915e+13 +# HELP node_ipvs_incoming_packets_total The total number of incoming packets. +# TYPE node_ipvs_incoming_packets_total counter +node_ipvs_incoming_packets_total 3.811989221e+09 +# HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. +# TYPE node_ipvs_outgoing_bytes_total counter +node_ipvs_outgoing_bytes_total 0 +# HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. +# TYPE node_ipvs_outgoing_packets_total counter +node_ipvs_outgoing_packets_total 0 diff --git a/collector/fixtures/megacli b/collector/fixtures/megacli deleted file mode 100755 index e912e36c3e..0000000000 --- a/collector/fixtures/megacli +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -cat "$(dirname "$0")/megacli_disks.txt" diff --git a/collector/fixtures/megacli_adapter.txt b/collector/fixtures/megacli_adapter.txt deleted file mode 100644 index 97c55766a6..0000000000 --- a/collector/fixtures/megacli_adapter.txt +++ /dev/null @@ -1,280 +0,0 @@ -Adapter #0 - -============================================================================== - Versions - ================ -Product Name : PERC 6/i Integrated -Serial No : 1234567890123456 -FW Package Build: 6.3.3.0002 - - Mfg. Data - ================ -Mfg. Date : 06/24/08 -Rework Date : 06/24/08 -Revision No : -Battery FRU : N/A - - Image Versions in Flash: - ================ -FW Version : 1.22.52-1909 -BIOS Version : 2.04.00 -WebBIOS Version : 1.1-46-e_15-Rel -Ctrl-R Version : 1.02-015B -Preboot CLI Version: 01.00-022:#%00005 -Boot Block Version : 1.00.00.01-0011 - - Pending Images in Flash - ================ -None - - PCI Info - ================ -Controller Id : 0000 -Vendor Id : 1000 -Device Id : 0060 -SubVendorId : 1028 -SubDeviceId : 1f0c - -Host Interface : PCIE - -Link Speed : 0 -Number of Frontend Port: 0 -Device Interface : PCIE - -Number of Backend Port: 8 -Port : Address -0 5000c50028f2083d -1 5000c50023cb3f39 -2 5000c50023cea805 -3 5000c50029124491 -4 0000000000000000 -5 0000000000000000 -6 0000000000000000 -7 0000000000000000 - - HW Configuration - ================ -SAS Address : 5a4badb01e219100 -BBU : Present -Alarm : Absent -NVRAM : Present -Serial Debugger : Present -Memory : Present -Flash : Present -Memory Size : 256MB -TPM : Absent -On board Expander: Absent -Upgrade Key : Absent -Temperature sensor for ROC : Absent -Temperature sensor for controller : Absent - - - Settings - ================ -Current Time : 14:55:23 7/4, 2014 -Predictive Fail Poll Interval : 300sec -Interrupt Throttle Active Count : 16 -Interrupt Throttle Completion : 50us -Rebuild Rate : 30% -PR Rate : 30% -BGI Rate : 30% -Check Consistency Rate : 30% -Reconstruction Rate : 30% -Cache Flush Interval : 4s -Max Drives to Spinup at One Time : 2 -Delay Among Spinup Groups : 12s -Physical Drive Coercion Mode : 128MB -Cluster Mode : Disabled -Alarm : Disabled -Auto Rebuild : Enabled -Battery Warning : Enabled -Ecc Bucket Size : 15 -Ecc Bucket Leak Rate : 1440 Minutes -Restore HotSpare on Insertion : Disabled -Expose Enclosure Devices : Disabled -Maintain PD Fail History : Disabled -Host Request Reordering : Enabled -Auto Detect BackPlane Enabled : SGPIO/i2c SEP -Load Balance Mode : Auto -Use FDE Only : No -Security Key Assigned : No -Security Key Failed : No -Security Key Not Backedup : No -Default LD PowerSave Policy : Controller Defined -Maximum number of direct attached drives to spin up in 1 min : 0 -Auto Enhanced Import : No -Any Offline VD Cache Preserved : No -Allow Boot with Preserved Cache : No -Disable Online Controller Reset : No -PFK in NVRAM : No -Use disk activity for locate : No -POST delay : 90 seconds -BIOS Error Handling : Stop On Errors -Current Boot Mode :Normal - Capabilities - ================ -RAID Level Supported : RAID0, RAID1, RAID5, RAID6, RAID00, RAID10, RAID50, RAID60, PRL 11, PRL 11 with spanning, SRL 3 supported, PRL11-RLQ0 DDF layout with no span, PRL11-RLQ0 DDF layout with span -Supported Drives : SAS, SATA - -Allowed Mixing: - -Mix in Enclosure Allowed - - Status - ================ -ECC Bucket Count : 0 - - Limitations - ================ -Max Arms Per VD : 32 -Max Spans Per VD : 8 -Max Arrays : 128 -Max Number of VDs : 64 -Max Parallel Commands : 1008 -Max SGE Count : 80 -Max Data Transfer Size : 8192 sectors -Max Strips PerIO : 42 -Max LD per array : 16 -Min Strip Size : 8 KB -Max Strip Size : 1.0 MB -Max Configurable CacheCade Size: 0 GB -Current Size of CacheCade : 0 GB -Current Size of FW Cache : 0 MB - - Device Present - ================ -Virtual Drives : 1 - Degraded : 0 - Offline : 0 -Physical Devices : 5 - Disks : 4 - Critical Disks : 0 - Failed Disks : 0 - - Supported Adapter Operations - ================ -Rebuild Rate : Yes -CC Rate : Yes -BGI Rate : Yes -Reconstruct Rate : Yes -Patrol Read Rate : Yes -Alarm Control : Yes -Cluster Support : No -BBU : Yes -Spanning : Yes -Dedicated Hot Spare : Yes -Revertible Hot Spares : Yes -Foreign Config Import : Yes -Self Diagnostic : Yes -Allow Mixed Redundancy on Array : No -Global Hot Spares : Yes -Deny SCSI Passthrough : No -Deny SMP Passthrough : No -Deny STP Passthrough : No -Support Security : No -Snapshot Enabled : No -Support the OCE without adding drives : No -Support PFK : No -Support PI : No -Support Boot Time PFK Change : No -Disable Online PFK Change : No -Support Shield State : No -Block SSD Write Disk Cache Change: No - - Supported VD Operations - ================ -Read Policy : Yes -Write Policy : Yes -IO Policy : Yes -Access Policy : Yes -Disk Cache Policy : Yes -Reconstruction : Yes -Deny Locate : No -Deny CC : No -Allow Ctrl Encryption: No -Enable LDBBM : No -Support Breakmirror : No -Power Savings : No - - Supported PD Operations - ================ -Force Online : Yes -Force Offline : Yes -Force Rebuild : Yes -Deny Force Failed : No -Deny Force Good/Bad : No -Deny Missing Replace : No -Deny Clear : No -Deny Locate : No -Support Temperature : No -NCQ : No -Disable Copyback : No -Enable JBOD : No -Enable Copyback on SMART : No -Enable Copyback to SSD on SMART Error : No -Enable SSD Patrol Read : No -PR Correct Unconfigured Areas : Yes - Error Counters - ================ -Memory Correctable Errors : 0 -Memory Uncorrectable Errors : 0 - - Cluster Information - ================ -Cluster Permitted : No -Cluster Active : No - - Default Settings - ================ -Phy Polarity : 0 -Phy PolaritySplit : 0 -Background Rate : 30 -Strip Size : 64kB -Flush Time : 4 seconds -Write Policy : WB -Read Policy : None -Cache When BBU Bad : Disabled -Cached IO : No -SMART Mode : Mode 6 -Alarm Disable : No -Coercion Mode : 128MB -ZCR Config : Unknown -Dirty LED Shows Drive Activity : No -BIOS Continue on Error : 0 -Spin Down Mode : None -Allowed Device Type : SAS/SATA Mix -Allow Mix in Enclosure : Yes -Allow HDD SAS/SATA Mix in VD : No -Allow SSD SAS/SATA Mix in VD : No -Allow HDD/SSD Mix in VD : No -Allow SATA in Cluster : No -Max Chained Enclosures : 1 -Disable Ctrl-R : No -Enable Web BIOS : No -Direct PD Mapping : Yes -BIOS Enumerate VDs : Yes -Restore Hot Spare on Insertion : No -Expose Enclosure Devices : No -Maintain PD Fail History : No -Disable Puncturing : No -Zero Based Enclosure Enumeration : Yes -PreBoot CLI Enabled : No -LED Show Drive Activity : No -Cluster Disable : Yes -SAS Disable : No -Auto Detect BackPlane Enable : SGPIO/i2c SEP -Use FDE Only : No -Enable Led Header : No -Delay during POST : 0 -EnableCrashDump : No -Disable Online Controller Reset : No -EnableLDBBM : No -Un-Certified Hard Disk Drives : Block -Treat Single span R1E as R10 : No -Max LD per array : 16 -Power Saving option : All power saving options are enabled -Default spin down time in minutes: 0 -Enable JBOD : No -Time taken to detect CME : 60s - -Exit Code: 0x00 diff --git a/collector/fixtures/megacli_disks.txt b/collector/fixtures/megacli_disks.txt deleted file mode 100644 index e65652743c..0000000000 --- a/collector/fixtures/megacli_disks.txt +++ /dev/null @@ -1,197 +0,0 @@ - -Adapter #0 - -Enclosure Device ID: 32 -Slot Number: 0 -Drive's position: DiskGroup: 0, Span: 0, Arm: 0 -Enclosure position: N/A -Device Id: 0 -WWN: -Sequence Number: 2 -Media Error Count: 0 -Other Error Count: 0 -Predictive Failure Count: 0 -Last Predictive Failure Event Seq Number: 0 -PD Type: SAS - -Raw Size: 419.186 GB [0x3465f870 Sectors] -Non Coerced Size: 418.686 GB [0x3455f870 Sectors] -Coerced Size: 418.625 GB [0x34540000 Sectors] -Sector Size: 0 -Firmware state: Online, Spun Up -Device Firmware Level: ES64 -Shield Counter: 0 -Successful diagnostics completion on : N/A -SAS Address(0): 0x5000c50028f2083d -SAS Address(1): 0x0 -Connected Port Number: 0(path0) -Inquiry Data: SEAGATE ST3450857SS ES643SK26856 -FDE Capable: Not Capable -FDE Enable: Disable -Secured: Unsecured -Locked: Unlocked -Needs EKM Attention: No -Foreign State: None -Device Speed: Unknown -Link Speed: Unknown -Media Type: Hard Disk Device -Drive Temperature :37C (98.60 F) -PI Eligibility: No -Drive is formatted for PI information: No -PI: No PI -Port-0 : -Port status: Active -Port's Linkspeed: Unknown -Port-1 : -Port status: Active -Port's Linkspeed: Unknown -Drive has flagged a S.M.A.R.T alert : No - - - -Enclosure Device ID: 32 -Slot Number: 1 -Drive's position: DiskGroup: 0, Span: 0, Arm: 1 -Enclosure position: N/A -Device Id: 1 -WWN: -Sequence Number: 2 -Media Error Count: 0 -Other Error Count: 0 -Predictive Failure Count: 0 -Last Predictive Failure Event Seq Number: 0 -PD Type: SAS - -Raw Size: 419.186 GB [0x3465f870 Sectors] -Non Coerced Size: 418.686 GB [0x3455f870 Sectors] -Coerced Size: 418.625 GB [0x34540000 Sectors] -Sector Size: 0 -Firmware state: Online, Spun Up -Device Firmware Level: ES62 -Shield Counter: 0 -Successful diagnostics completion on : N/A -SAS Address(0): 0x5000c50023cb3f39 -SAS Address(1): 0x0 -Connected Port Number: 1(path0) -Inquiry Data: SEAGATE ST3450857SS ES623SK16HLC -FDE Capable: Not Capable -FDE Enable: Disable -Secured: Unsecured -Locked: Unlocked -Needs EKM Attention: No -Foreign State: None -Device Speed: Unknown -Link Speed: Unknown -Media Type: Hard Disk Device -Drive Temperature : N/A -PI Eligibility: No -Drive is formatted for PI information: No -PI: No PI -Port-0 : -Port status: Active -Port's Linkspeed: Unknown -Port-1 : -Port status: Active -Port's Linkspeed: Unknown -Drive has flagged a S.M.A.R.T alert : No - - - -Enclosure Device ID: 32 -Slot Number: 2 -Drive's position: DiskGroup: 0, Span: 1, Arm: 0 -Enclosure position: N/A -Device Id: 2 -WWN: -Sequence Number: 2 -Media Error Count: 0 -Other Error Count: 0 -Predictive Failure Count: 0 -Last Predictive Failure Event Seq Number: 0 -PD Type: SAS - -Raw Size: 419.186 GB [0x3465f870 Sectors] -Non Coerced Size: 418.686 GB [0x3455f870 Sectors] -Coerced Size: 418.625 GB [0x34540000 Sectors] -Sector Size: 0 -Firmware state: Online, Spun Up -Device Firmware Level: ES62 -Shield Counter: 0 -Successful diagnostics completion on : N/A -SAS Address(0): 0x5000c50023cea805 -SAS Address(1): 0x0 -Connected Port Number: 2(path0) -Inquiry Data: SEAGATE ST3450857SS ES623SK189BR -FDE Capable: Not Capable -FDE Enable: Disable -Secured: Unsecured -Locked: Unlocked -Needs EKM Attention: No -Foreign State: None -Device Speed: Unknown -Link Speed: Unknown -Media Type: Hard Disk Device -Drive Temperature :39C (102.20 F) -PI Eligibility: No -Drive is formatted for PI information: No -PI: No PI -Port-0 : -Port status: Active -Port's Linkspeed: Unknown -Port-1 : -Port status: Active -Port's Linkspeed: Unknown -Drive has flagged a S.M.A.R.T alert : No - - - -Enclosure Device ID: 32 -Slot Number: 3 -Drive's position: DiskGroup: 0, Span: 1, Arm: 1 -Enclosure position: N/A -Device Id: 3 -WWN: -Sequence Number: 2 -Media Error Count: 0 -Other Error Count: 0 -Predictive Failure Count: 23 -Last Predictive Failure Event Seq Number: 0 -PD Type: SAS - -Raw Size: 419.186 GB [0x3465f870 Sectors] -Non Coerced Size: 418.686 GB [0x3455f870 Sectors] -Coerced Size: 418.625 GB [0x34540000 Sectors] -Sector Size: 0 -Firmware state: Online, Spun Up -Device Firmware Level: ES64 -Shield Counter: 0 -Successful diagnostics completion on : N/A -SAS Address(0): 0x5000c50029124491 -SAS Address(1): 0x0 -Connected Port Number: 3(path0) -Inquiry Data: SEAGATE ST3450857SS ES643SK27GQ9 -FDE Capable: Not Capable -FDE Enable: Disable -Secured: Unsecured -Locked: Unlocked -Needs EKM Attention: No -Foreign State: None -Device Speed: Unknown -Link Speed: Unknown -Media Type: Hard Disk Device -Drive Temperature :38C (100.40 F) -PI Eligibility: No -Drive is formatted for PI information: No -PI: No PI -Port-0 : -Port status: Active -Port's Linkspeed: Unknown -Port-1 : -Port status: Active -Port's Linkspeed: Unknown -Drive has flagged a S.M.A.R.T alert : No - - - - -Exit Code: 0x00 diff --git a/collector/fixtures/proc/mounts b/collector/fixtures/proc/1/mounts similarity index 82% rename from collector/fixtures/proc/mounts rename to collector/fixtures/proc/1/mounts index 97d5f6fc1f..7452d495a6 100644 --- a/collector/fixtures/proc/mounts +++ b/collector/fixtures/proc/1/mounts @@ -28,3 +28,5 @@ rpc_pipefs /run/rpc_pipefs rpc_pipefs rw,relatime 0 0 binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0 tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=808860k,mode=700,uid=1000,gid=1000 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0 +/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0 diff --git a/collector/fixtures/proc/1/stat b/collector/fixtures/proc/1/stat new file mode 100644 index 0000000000..b6bc64d683 --- /dev/null +++ b/collector/fixtures/proc/1/stat @@ -0,0 +1 @@ +1 (systemd) S 0 1 1 0 -1 4194560 9061 9416027 94 2620 36 98 54406 13885 20 0 1 0 29 109604864 2507 18446744073709551615 1 1 0 0 0 0 671173123 4096 1260 0 0 0 17 0 0 0 19 0 0 0 0 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/10/mountinfo b/collector/fixtures/proc/10/mountinfo new file mode 100644 index 0000000000..5ab70c2b83 --- /dev/null +++ b/collector/fixtures/proc/10/mountinfo @@ -0,0 +1,7 @@ +1 1 0:5 / /root rw,nosuid shared:8 - rootfs rootfs rw +16 21 0:16 / /sys rw,nosuid,nodev,noexec,relatime shared:7 - sysfs sysfs rw +17 21 0:4 / /proc rw,nosuid,nodev,noexec,relatime shared:12 - proc proc rw +21 0 8:1 / / rw,relatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=ordered +194 21 0:42 / /mnt/nfs/test rw shared:144 - nfs4 192.168.1.1:/srv/test rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,addr=192.168.1.1,local_lock=none +177 21 0:42 / /mnt/nfs/test rw shared:130 - nfs4 192.168.1.1:/srv/test rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,addr=192.168.1.1,local_lock=none +1398 798 0:44 / /mnt/nfs/test rw,relatime shared:1154 - nfs 192.168.1.1:/srv/test rw,vers=3,rsize=32768,wsize=32768,namlen=255,hard,proto=udp,timeo=11,retrans=3,sec=sys,mountaddr=192.168.1.1,mountvers=3,mountport=49602,mountproto=udp,local_lock=none,addr=192.168.1.1 diff --git a/collector/fixtures/proc/10/mountstats b/collector/fixtures/proc/10/mountstats index fdda67655c..f90f237c1c 100644 --- a/collector/fixtures/proc/10/mountstats +++ b/collector/fixtures/proc/10/mountstats @@ -31,4 +31,20 @@ device 192.168.1.1:/srv/test mounted on /mnt/nfs/test-dupe with fstype nfs4 stat NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 + ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test-dupe with fstype nfs statvers=1.1 + opts: rw,vers=3,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=udp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none,mountaddr=192.168.1.1,mountproto=udp,mountport=47853 + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: udp 832 0 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 diff --git a/collector/fixtures/proc/10/stat b/collector/fixtures/proc/10/stat new file mode 100644 index 0000000000..1451c8a44e --- /dev/null +++ b/collector/fixtures/proc/10/stat @@ -0,0 +1 @@ +17 (khungtaskd) S 2 0 0 0 -1 2129984 0 0 0 0 14 0 0 0 20 0 1 0 24 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 \ No newline at end of file diff --git a/collector/fixtures/proc/11/.missing_stat b/collector/fixtures/proc/11/.missing_stat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/collector/fixtures/proc/11/stat b/collector/fixtures/proc/11/stat new file mode 100644 index 0000000000..1d91e2c19a --- /dev/null +++ b/collector/fixtures/proc/11/stat @@ -0,0 +1 @@ +11 (rcu_preempt) I 2 0 0 0 -1 2129984 0 0 0 0 0 346 0 0 -2 0 1 0 32 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 1 1 0 0 0 0 0 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/cpuinfo b/collector/fixtures/proc/cpuinfo new file mode 100644 index 0000000000..f297405ae9 --- /dev/null +++ b/collector/fixtures/proc/cpuinfo @@ -0,0 +1,216 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.998 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.037 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.010 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.028 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.989 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.083 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.017 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.030 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + diff --git a/collector/fixtures/proc/diskstats b/collector/fixtures/proc/diskstats index 2457197ff6..3a75de515b 100644 --- a/collector/fixtures/proc/diskstats +++ b/collector/fixtures/proc/diskstats @@ -44,3 +44,8 @@ 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 + 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 + 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 + 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 + 8 0 sdc 126552 141 1657779 14 11822 1895 172905 1007 0 10730 17070 18851 0 125173784 11130 1555 1944 + 8 1 sdc1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/mdstat b/collector/fixtures/proc/mdstat index 3f8c9c3e3d..a19bf5e3f0 100644 --- a/collector/fixtures/proc/mdstat +++ b/collector/fixtures/proc/mdstat @@ -1,5 +1,6 @@ Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] md127 : active raid1 sdi2[0] sdj2[1] @@ -8,31 +9,35 @@ md127 : active raid1 sdi2[0] sdj2[1] md0 : active raid1 sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] -md4 : inactive raid1 sda3[0] sdb3[1] +md4 : inactive raid1 sda3[0](F) sdb3[1](S) 4883648 blocks [2/2] [UU] -md6 : active raid1 sdb2[2] sda2[0] +md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] 195310144 blocks [2/1] [U_] [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec -md8 : active raid1 sdb1[1] sda1[0] - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec +md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md201 : active raid1 sda3[0] sdb3[1] + 1993728 blocks super 1.2 [2/2] [UU] + [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] +md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED + resync=DELAYED md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks + 314159265 blocks 64k chunks -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] +md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING + resync=PENDING md12 : active raid0 sdc2[0] sdd2[1] 3886394368 blocks super 1.2 512k chunks @@ -41,9 +46,15 @@ md126 : active raid0 sdb[1] sdc[0] 1855870976 blocks super external:/md127/0 128k chunks md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm + 7932 blocks super external:imsm md00 : active raid0 xvdb[0] 4186624 blocks super 1.2 256k chunks +md120 : active linear sda1[1] sdb1[0] + 2095104 blocks super 1.2 0k rounding + +md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] + 322560 blocks super 1.2 512k chunks + unused devices: diff --git a/collector/fixtures/proc/mdstat_invalid b/collector/fixtures/proc/mdstat_invalid deleted file mode 100644 index c60c77be9f..0000000000 --- a/collector/fixtures/proc/mdstat_invalid +++ /dev/null @@ -1,5 +0,0 @@ -Personalities : [invalid] -md3 : invalid - 314159265 blocks 64k chunks - -unused devices: diff --git a/collector/fixtures/proc/net/dev b/collector/fixtures/proc/net/dev index 96dffa05b9..a3534c1309 100644 --- a/collector/fixtures/proc/net/dev +++ b/collector/fixtures/proc/net/dev @@ -7,3 +7,6 @@ veth4B09XN: 648 8 0 0 0 0 0 0 1943284 lxcbr0: 0 0 0 0 0 0 0 0 2630299 28339 0 0 0 0 0 0 wlan0: 10437182923 13899359 0 0 0 0 0 0 2851649360 11726200 0 0 0 0 0 0 docker0: 64910168 1065585 0 0 0 0 0 0 2681662018 1929779 0 0 0 0 0 0 +ibr10:30: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +flannel.1: 18144009813 228499337 0 0 0 0 0 0 20758990068 258369223 0 64 0 0 0 0 + 💩0: 57750104 105557 0 0 0 0 0 72 404570255 304261 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/net/ip_vs b/collector/fixtures/proc/net/ip_vs index 6a6a97d7d6..e7c56a7294 100644 --- a/collector/fixtures/proc/net/ip_vs +++ b/collector/fixtures/proc/net/ip_vs @@ -12,3 +12,6 @@ TCP C0A80039:0CEA wlc TCP C0A80037:0CEA wlc -> C0A8321A:0CEA Tunnel 0 0 0 -> C0A83120:0CEA Tunnel 100 0 0 +FWM 10001000 wlc + -> C0A8321A:0CEA Tunnel 20 64 1 + -> C0A83120:0CEA Tunnel 100 321 5 diff --git a/collector/fixtures/proc/net/rpc/nfsd b/collector/fixtures/proc/net/rpc/nfsd new file mode 100644 index 0000000000..754f19d9d5 --- /dev/null +++ b/collector/fixtures/proc/net/rpc/nfsd @@ -0,0 +1,11 @@ +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 72864 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 972 55 917 1 +rpc 18628 3 1 2 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/net/snmp b/collector/fixtures/proc/net/snmp index 80a92b4730..271073a642 100644 --- a/collector/fixtures/proc/net/snmp +++ b/collector/fixtures/proc/net/snmp @@ -7,6 +7,6 @@ IcmpMsg: 104 120 Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors Tcp: 1 200 120000 -1 3556 230 341 161 0 57252008 54915039 227 5 1003 0 Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors -Udp: 88542 120 0 53028 0 0 0 +Udp: 88542 120 0 53028 9 8 0 UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors UdpLite: 0 0 0 0 0 0 0 diff --git a/collector/fixtures/proc/net/snmp6 b/collector/fixtures/proc/net/snmp6 index 8442d98e74..3640bb1b4f 100644 --- a/collector/fixtures/proc/net/snmp6 +++ b/collector/fixtures/proc/net/snmp6 @@ -72,8 +72,8 @@ Udp6InDatagrams 0 Udp6NoPorts 0 Udp6InErrors 0 Udp6OutDatagrams 0 -Udp6RcvbufErrors 0 -Udp6SndbufErrors 0 +Udp6RcvbufErrors 9 +Udp6SndbufErrors 8 Udp6InCsumErrors 0 Udp6IgnoredMulti 0 UdpLite6InDatagrams 0 diff --git a/collector/fixtures/proc/net/sockstat6 b/collector/fixtures/proc/net/sockstat6 new file mode 100644 index 0000000000..de5806d8d1 --- /dev/null +++ b/collector/fixtures/proc/net/sockstat6 @@ -0,0 +1,5 @@ +TCP6: inuse 17 +UDP6: inuse 9 +UDPLITE6: inuse 0 +RAW6: inuse 1 +FRAG6: inuse 0 memory 0 diff --git a/collector/fixtures/proc/net/sockstat_rhe4 b/collector/fixtures/proc/net/sockstat_rhe4 deleted file mode 100644 index 1e178f366c..0000000000 --- a/collector/fixtures/proc/net/sockstat_rhe4 +++ /dev/null @@ -1,5 +0,0 @@ -sockets: used 229 -TCP: inuse 4 orphan 0 tw 4 alloc 17 mem 1 -UDP: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 diff --git a/collector/fixtures/proc/net/softnet_stat b/collector/fixtures/proc/net/softnet_stat new file mode 100644 index 0000000000..d5a5a20b7b --- /dev/null +++ b/collector/fixtures/proc/net/softnet_stat @@ -0,0 +1,4 @@ +00049279 00000000 00000001 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +000dfb82 00000029 0000000a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +00551c3f 00000000 00000055 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +002f8339 00000000 00000032 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 \ No newline at end of file diff --git a/collector/fixtures/proc/net/stat/arp_cache b/collector/fixtures/proc/net/stat/arp_cache new file mode 100644 index 0000000000..3176ca6134 --- /dev/null +++ b/collector/fixtures/proc/net/stat/arp_cache @@ -0,0 +1,3 @@ +entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls +00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c +00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 diff --git a/collector/fixtures/proc/net/stat/ndisc_cache b/collector/fixtures/proc/net/stat/ndisc_cache new file mode 100644 index 0000000000..fd4c2f8f49 --- /dev/null +++ b/collector/fixtures/proc/net/stat/ndisc_cache @@ -0,0 +1,3 @@ +entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls +00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb +00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 diff --git a/collector/fixtures/proc/net/stat/nf_conntrack b/collector/fixtures/proc/net/stat/nf_conntrack new file mode 100644 index 0000000000..c833c40e46 --- /dev/null +++ b/collector/fixtures/proc/net/stat/nf_conntrack @@ -0,0 +1,5 @@ +entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart +00000021 00000000 00000000 00000000 00000003 0000588a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +00000021 00000000 00000000 00000000 00000002 000056a4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000002 +00000021 00000000 00000000 00000000 00000001 000058d4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000001 +00000021 00000000 00000000 00000000 0000002f 00005688 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000004 diff --git a/collector/fixtures/proc/net/tcpstat b/collector/fixtures/proc/net/tcpstat index 8b3777a969..352c00bbf3 100644 --- a/collector/fixtures/proc/net/tcpstat +++ b/collector/fixtures/proc/net/tcpstat @@ -1,3 +1,3 @@ sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 00000000:0016 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 0F02000A:0016 0202000A:8B6B 01 00000000:00000000 02:000AC99B 00000000 0 0 3652 4 ffff88003d3ae040 21 4 31 47 46 + 0: 00000000:0016 00000000:0000 0A 00000015:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 0F02000A:0016 0202000A:8B6B 01 00000015:00000001 02:000AC99B 00000000 0 0 3652 4 ffff88003d3ae040 21 4 31 47 46 diff --git a/collector/fixtures/proc/net/udp b/collector/fixtures/proc/net/udp new file mode 100644 index 0000000000..3c5052400a --- /dev/null +++ b/collector/fixtures/proc/net/udp @@ -0,0 +1,2 @@ + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 00000000:0016 00000000:0000 0A 00000015:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 diff --git a/collector/fixtures/proc/pressure/cpu b/collector/fixtures/proc/pressure/cpu new file mode 100644 index 0000000000..14acc3a325 --- /dev/null +++ b/collector/fixtures/proc/pressure/cpu @@ -0,0 +1 @@ +some avg10=0.00 avg60=0.00 avg300=0.00 total=14036781 diff --git a/collector/fixtures/proc/pressure/io b/collector/fixtures/proc/pressure/io new file mode 100644 index 0000000000..4cdc413548 --- /dev/null +++ b/collector/fixtures/proc/pressure/io @@ -0,0 +1,2 @@ +some avg10=0.18 avg60=0.34 avg300=0.10 total=159886802 +full avg10=0.18 avg60=0.34 avg300=0.10 total=159229614 diff --git a/collector/fixtures/proc/pressure/memory b/collector/fixtures/proc/pressure/memory new file mode 100644 index 0000000000..30c03cc404 --- /dev/null +++ b/collector/fixtures/proc/pressure/memory @@ -0,0 +1,2 @@ +some avg10=0.00 avg60=0.00 avg300=0.00 total=0 +full avg10=0.00 avg60=0.00 avg300=0.00 total=0 diff --git a/collector/fixtures/proc/schedstat b/collector/fixtures/proc/schedstat new file mode 100644 index 0000000000..5a555c00fe --- /dev/null +++ b/collector/fixtures/proc/schedstat @@ -0,0 +1,6 @@ +version 15 +timestamp 15819019232 +cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 +domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 +cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 +domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 diff --git a/collector/fixtures/proc/spl/kstat/zfs/abdstats b/collector/fixtures/proc/spl/kstat/zfs/abdstats new file mode 100644 index 0000000000..56e169db4d --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/abdstats @@ -0,0 +1,23 @@ +7 1 0x01 21 5712 73163810083184 309946154984654 +name type data +struct_size 4 2520 +linear_cnt 4 62 +linear_data_size 4 223232 +scatter_cnt 4 1 +scatter_data_size 4 16384 +scatter_chunk_waste 4 0 +scatter_order_0 4 0 +scatter_order_1 4 0 +scatter_order_2 4 1 +scatter_order_3 4 0 +scatter_order_4 4 0 +scatter_order_5 4 0 +scatter_order_6 4 0 +scatter_order_7 4 0 +scatter_order_8 4 0 +scatter_order_9 4 0 +scatter_order_10 4 0 +scatter_page_multi_chunk 4 0 +scatter_page_multi_zone 4 0 +scatter_page_alloc_retry 4 0 +scatter_sg_table_retry 4 0 diff --git a/collector/fixtures/proc/spl/kstat/zfs/dbuf_stats b/collector/fixtures/proc/spl/kstat/zfs/dbuf_stats new file mode 100644 index 0000000000..d627eacd08 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/dbuf_stats @@ -0,0 +1,65 @@ +15 1 0x01 63 17136 73163812943503 309964267073187 +name type data +dbuf_cache_count 4 27 +dbuf_cache_size 4 302080 +dbuf_cache_size_max 4 394240 +dbuf_cache_max_bytes 4 62834368 +dbuf_cache_lowater_bytes 4 56550932 +dbuf_cache_hiwater_bytes 4 69117804 +dbuf_cache_total_evicts 4 0 +dbuf_cache_level_0 4 27 +dbuf_cache_level_1 4 0 +dbuf_cache_level_2 4 0 +dbuf_cache_level_3 4 0 +dbuf_cache_level_4 4 0 +dbuf_cache_level_5 4 0 +dbuf_cache_level_6 4 0 +dbuf_cache_level_7 4 0 +dbuf_cache_level_8 4 0 +dbuf_cache_level_9 4 0 +dbuf_cache_level_10 4 0 +dbuf_cache_level_11 4 0 +dbuf_cache_level_0_bytes 4 302080 +dbuf_cache_level_1_bytes 4 0 +dbuf_cache_level_2_bytes 4 0 +dbuf_cache_level_3_bytes 4 0 +dbuf_cache_level_4_bytes 4 0 +dbuf_cache_level_5_bytes 4 0 +dbuf_cache_level_6_bytes 4 0 +dbuf_cache_level_7_bytes 4 0 +dbuf_cache_level_8_bytes 4 0 +dbuf_cache_level_9_bytes 4 0 +dbuf_cache_level_10_bytes 4 0 +dbuf_cache_level_11_bytes 4 0 +hash_hits 4 108807 +hash_misses 4 1851 +hash_collisions 4 0 +hash_elements 4 55 +hash_elements_max 4 55 +hash_chains 4 0 +hash_chain_max 4 0 +hash_insert_race 4 0 +hash_dbuf_level_0 4 37 +hash_dbuf_level_1 4 10 +hash_dbuf_level_2 4 2 +hash_dbuf_level_3 4 2 +hash_dbuf_level_4 4 2 +hash_dbuf_level_5 4 2 +hash_dbuf_level_6 4 0 +hash_dbuf_level_7 4 0 +hash_dbuf_level_8 4 0 +hash_dbuf_level_9 4 0 +hash_dbuf_level_10 4 0 +hash_dbuf_level_11 4 0 +hash_dbuf_level_0_bytes 4 465920 +hash_dbuf_level_1_bytes 4 1310720 +hash_dbuf_level_2_bytes 4 262144 +hash_dbuf_level_3_bytes 4 262144 +hash_dbuf_level_4_bytes 4 262144 +hash_dbuf_level_5_bytes 4 262144 +hash_dbuf_level_6_bytes 4 0 +hash_dbuf_level_7_bytes 4 0 +hash_dbuf_level_8_bytes 4 0 +hash_dbuf_level_9_bytes 4 0 +hash_dbuf_level_10_bytes 4 0 +hash_dbuf_level_11_bytes 4 0 diff --git a/collector/fixtures/proc/spl/kstat/zfs/dnodestats b/collector/fixtures/proc/spl/kstat/zfs/dnodestats new file mode 100644 index 0000000000..36730fbcf6 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/dnodestats @@ -0,0 +1,30 @@ +10 1 0x01 28 7616 73163810135894 309969103316276 +name type data +dnode_hold_dbuf_hold 4 0 +dnode_hold_dbuf_read 4 0 +dnode_hold_alloc_hits 4 37617 +dnode_hold_alloc_misses 4 0 +dnode_hold_alloc_interior 4 0 +dnode_hold_alloc_lock_retry 4 0 +dnode_hold_alloc_lock_misses 4 0 +dnode_hold_alloc_type_none 4 0 +dnode_hold_free_hits 4 0 +dnode_hold_free_misses 4 0 +dnode_hold_free_lock_misses 4 0 +dnode_hold_free_lock_retry 4 0 +dnode_hold_free_overflow 4 0 +dnode_hold_free_refcount 4 0 +dnode_hold_free_txg 4 0 +dnode_allocate 4 0 +dnode_reallocate 4 0 +dnode_buf_evict 4 17 +dnode_alloc_next_chunk 4 0 +dnode_alloc_race 4 0 +dnode_alloc_next_block 4 0 +dnode_move_invalid 4 0 +dnode_move_recheck1 4 0 +dnode_move_recheck2 4 0 +dnode_move_special 4 0 +dnode_move_handle 4 0 +dnode_move_rwlock 4 0 +dnode_move_active 4 0 diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-1 b/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-1 new file mode 100644 index 0000000000..28a9d2e3ce --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-1 @@ -0,0 +1,9 @@ +23 1 0x01 7 2160 221578688875 6665999035587 +name type data +dataset_name 7 pool1 +writes 4 0 +nwritten 4 0 +reads 4 0 +nread 4 0 +nunlinks 4 0 +nunlinked 4 0 diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-2 b/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-2 new file mode 100644 index 0000000000..79ee31d289 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-2 @@ -0,0 +1,9 @@ +24 1 0x01 7 2160 221611904716 7145015038451 +name type data +dataset_name 7 pool1/dataset1 +writes 4 4 +nwritten 4 12302 +reads 4 2 +nread 4 28 +nunlinks 4 3 +nunlinked 4 3 diff --git a/collector/fixtures/proc/spl/kstat/zfs/pool1/state b/collector/fixtures/proc/spl/kstat/zfs/pool1/state new file mode 100644 index 0000000000..1424865cf5 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/pool1/state @@ -0,0 +1 @@ +ONLINE diff --git a/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-1 b/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-1 new file mode 100644 index 0000000000..189b65550b --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-1 @@ -0,0 +1,9 @@ +30 1 0x01 7 2160 217993779684 2621674546179 +name type data +dataset_name 7 poolz1 +writes 4 0 +nwritten 4 0 +reads 4 0 +nread 4 0 +nunlinks 4 0 +nunlinked 4 0 \ No newline at end of file diff --git a/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-2 b/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-2 new file mode 100644 index 0000000000..3d446235a9 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-2 @@ -0,0 +1,9 @@ +31 1 0x01 7 2160 218133979890 3024169078920 +name type data +dataset_name 7 poolz1/dataset1 +writes 4 10 +nwritten 4 32806 +reads 4 2 +nread 4 28 +nunlinks 4 14 +nunlinked 4 14 \ No newline at end of file diff --git a/collector/fixtures/proc/spl/kstat/zfs/poolz1/state b/collector/fixtures/proc/spl/kstat/zfs/poolz1/state new file mode 100644 index 0000000000..be5b2ef03f --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/poolz1/state @@ -0,0 +1 @@ +DEGRADED diff --git a/collector/fixtures/proc/spl/kstat/zfs/vdev_mirror_stats b/collector/fixtures/proc/spl/kstat/zfs/vdev_mirror_stats new file mode 100644 index 0000000000..7e4be9bd67 --- /dev/null +++ b/collector/fixtures/proc/spl/kstat/zfs/vdev_mirror_stats @@ -0,0 +1,9 @@ +18 1 0x01 7 1904 73163813004224 309980651991187 +name type data +rotating_linear 4 0 +rotating_offset 4 0 +rotating_seek 4 0 +non_rotating_linear 4 0 +non_rotating_seek 4 0 +preferred_found 4 0 +preferred_not_found 4 94 diff --git a/collector/fixtures/proc/spl/kstat/zfs/zil b/collector/fixtures/proc/spl/kstat/zfs/zil index b9f50e8a06..61e95d719e 100644 --- a/collector/fixtures/proc/spl/kstat/zfs/zil +++ b/collector/fixtures/proc/spl/kstat/zfs/zil @@ -8,7 +8,7 @@ zil_itx_indirect_bytes 4 0 zil_itx_copied_count 4 0 zil_itx_copied_bytes 4 0 zil_itx_needcopy_count 4 0 -zil_itx_needcopy_bytes 4 0 +zil_itx_needcopy_bytes 4 18446744073709537686 zil_itx_metaslab_normal_count 4 0 zil_itx_metaslab_normal_bytes 4 0 zil_itx_metaslab_slog_count 4 0 diff --git a/collector/fixtures/proc/stat b/collector/fixtures/proc/stat index dabb96f747..96346503b9 100644 --- a/collector/fixtures/proc/stat +++ b/collector/fixtures/proc/stat @@ -1,12 +1,12 @@ -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +cpu 301854 612 111922 8979004 3552 2 3944 0 44 36 +cpu0 44490 19 21045 1087069 220 1 3410 0 2 1 +cpu1 47869 23 16474 1110787 591 0 46 0 3 2 +cpu2 46504 36 15916 1112321 441 0 326 0 4 3 +cpu3 47054 102 15683 1113230 533 0 60 0 5 4 +cpu4 28413 25 10776 1140321 217 0 8 0 6 5 +cpu5 29271 101 11586 1136270 672 0 30 0 7 6 +cpu6 29152 36 10276 1139721 319 0 29 0 8 7 +cpu7 29098 268 10164 1139282 555 0 31 0 9 8 intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 38014093 btime 1418183276 diff --git a/collector/fixtures/proc/sys/kernel/pid_max b/collector/fixtures/proc/sys/kernel/pid_max new file mode 100644 index 0000000000..190a18037c --- /dev/null +++ b/collector/fixtures/proc/sys/kernel/pid_max @@ -0,0 +1 @@ +123 diff --git a/collector/fixtures/proc/sys/kernel/random/poolsize b/collector/fixtures/proc/sys/kernel/random/poolsize new file mode 100644 index 0000000000..801c306ed3 --- /dev/null +++ b/collector/fixtures/proc/sys/kernel/random/poolsize @@ -0,0 +1 @@ +4096 diff --git a/collector/fixtures/proc/sys/kernel/threads-max b/collector/fixtures/proc/sys/kernel/threads-max new file mode 100644 index 0000000000..0ccbf457eb --- /dev/null +++ b/collector/fixtures/proc/sys/kernel/threads-max @@ -0,0 +1 @@ +7801 \ No newline at end of file diff --git a/collector/fixtures/proc/sys/pid_max b/collector/fixtures/proc/sys/pid_max new file mode 100644 index 0000000000..190a18037c --- /dev/null +++ b/collector/fixtures/proc/sys/pid_max @@ -0,0 +1 @@ +123 diff --git a/collector/fixtures/proc/sys/threads-max b/collector/fixtures/proc/sys/threads-max new file mode 100644 index 0000000000..0ccbf457eb --- /dev/null +++ b/collector/fixtures/proc/sys/threads-max @@ -0,0 +1 @@ +7801 \ No newline at end of file diff --git a/collector/fixtures/proc/vmstat b/collector/fixtures/proc/vmstat new file mode 100644 index 0000000000..d431eb4556 --- /dev/null +++ b/collector/fixtures/proc/vmstat @@ -0,0 +1,120 @@ +nr_free_pages 977769 +nr_alloc_batch 4158 +nr_inactive_anon 125031 +nr_active_anon 622512 +nr_inactive_file 92317 +nr_active_file 324014 +nr_unevictable 12 +nr_mlock 12 +nr_anon_pages 713633 +nr_mapped 118021 +nr_file_pages 450840 +nr_dirty 21 +nr_writeback 0 +nr_slab_reclaimable 85763 +nr_slab_unreclaimable 431112 +nr_page_table_pages 12504 +nr_kernel_stack 1156 +nr_overhead 4956 +nr_unstable 0 +nr_bounce 0 +nr_vmscan_write 35050 +nr_vmscan_immediate_reclaim 27 +nr_writeback_temp 0 +nr_isolated_anon 0 +nr_isolated_file 0 +nr_shmem 20623 +nr_dirtied 11127183 +nr_written 11122061 +nr_pages_scanned 0 +numa_hit 2601972389 +numa_miss 0 +numa_foreign 0 +numa_interleave 32353 +numa_local 2601972389 +numa_other 0 +workingset_refault 157066 +workingset_activate 104270 +workingset_nodereclaim 0 +nr_anon_transparent_hugepages 556 +nr_free_cma 0 +nr_dirty_threshold 270390 +nr_dirty_background_threshold 135030 +pgpgin 7344136 +pgpgout 1541180581 +pswpin 1476 +pswpout 35045 +pgalloc_dma 12 +pgalloc_dma32 611781566 +pgalloc_normal 2287227526 +pgalloc_movable 0 +pgfree 2938719870 +pgactivate 152952989 +pgdeactivate 898450 +pgfault 2320168809 +pgmajfault 507162 +pgrefill_dma 0 +pgrefill_dma32 186367 +pgrefill_normal 603970 +pgrefill_movable 0 +pgsteal_kswapd_dma 0 +pgsteal_kswapd_dma32 78783 +pgsteal_kswapd_normal 254128 +pgsteal_kswapd_movable 0 +pgsteal_direct_dma 0 +pgsteal_direct_dma32 44 +pgsteal_direct_normal 6484 +pgsteal_direct_movable 0 +pgscan_kswapd_dma 0 +pgscan_kswapd_dma32 107656 +pgscan_kswapd_normal 358784 +pgscan_kswapd_movable 0 +pgscan_direct_dma 0 +pgscan_direct_dma32 67 +pgscan_direct_normal 6796 +pgscan_direct_movable 0 +pgscan_direct_throttle 0 +zone_reclaim_failed 0 +pginodesteal 412258 +slabs_scanned 14355346 +kswapd_inodesteal 288891 +kswapd_low_wmark_hit_quickly 109 +kswapd_high_wmark_hit_quickly 45 +pageoutrun 247 +allocstall 83165 +pgrotated 35014 +drop_pagecache 0 +drop_slab 0 +numa_pte_updates 0 +numa_huge_pte_updates 0 +numa_hint_faults 0 +numa_hint_faults_local 0 +numa_pages_migrated 0 +pgmigrate_success 37070309 +pgmigrate_fail 36815 +compact_migrate_scanned 830267783 +compact_free_scanned 12336622550 +compact_isolated 82707414 +compact_stall 210959 +compact_fail 164840 +compact_success 46119 +htlb_buddy_alloc_success 0 +htlb_buddy_alloc_fail 0 +unevictable_pgs_culled 2188 +unevictable_pgs_scanned 0 +unevictable_pgs_rescued 3962 +unevictable_pgs_mlocked 3994 +unevictable_pgs_munlocked 3968 +unevictable_pgs_cleared 14 +unevictable_pgs_stranded 14 +thp_fault_alloc 142261 +thp_fault_fallback 98119 +thp_collapse_alloc 88421 +thp_collapse_alloc_failed 20954 +thp_split 69984 +thp_zero_page_alloc 9 +thp_zero_page_alloc_failed 20 +balloon_inflate 0 +balloon_deflate 0 +balloon_migrate 0 +oom_kill 0 diff --git a/collector/fixtures/proc/zoneinfo b/collector/fixtures/proc/zoneinfo new file mode 100644 index 0000000000..82ff3d6315 --- /dev/null +++ b/collector/fixtures/proc/zoneinfo @@ -0,0 +1,265 @@ +Node 0, zone DMA + per-node stats + nr_inactive_anon 95612 + nr_active_anon 1175853 + nr_inactive_file 723339 + nr_active_file 688810 + nr_unevictable 213111 + nr_slab_reclaimable 121763 + nr_slab_unreclaimable 56182 + nr_isolated_anon 0 + nr_isolated_file 0 + workingset_nodes 0 + workingset_refault 0 + workingset_activate 0 + workingset_restore 0 + workingset_nodereclaim 0 + nr_anon_pages 1156608 + nr_mapped 423143 + nr_file_pages 1740118 + nr_dirty 103 + nr_writeback 0 + nr_writeback_temp 0 + nr_shmem 330517 + nr_shmem_hugepages 0 + nr_shmem_pmdmapped 0 + nr_file_hugepages 0 + nr_file_pmdmapped 0 + nr_anon_transparent_hugepages 0 + nr_vmscan_write 0 + nr_vmscan_immediate_reclaim 0 + nr_dirtied 1189097 + nr_written 1181554 + nr_kernel_misc_reclaimable 0 + nr_foll_pin_acquired 3 + nr_foll_pin_released 3 + pages free 2949 + min 8 + low 11 + high 14 + spanned 4095 + present 3997 + managed 3973 + protection: (0, 2039, 31932, 31932, 31932) + nr_free_pages 2949 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 1 + numa_miss 0 + numa_foreign 0 + numa_interleave 1 + numa_local 1 + numa_other 0 + pagesets + cpu: 0 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 1 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 2 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 3 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 4 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 5 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 6 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 7 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + node_unreclaimable: 0 + start_pfn: 1 +Node 0, zone DMA32 + pages free 528427 + min 1078 + low 1600 + high 2122 + spanned 1044480 + present 546847 + managed 530339 + protection: (0, 0, 29893, 29893, 29893) + nr_free_pages 528427 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 13 + numa_miss 0 + numa_foreign 0 + numa_interleave 1 + numa_local 13 + numa_other 0 + pagesets + cpu: 0 + count: 357 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 1 + count: 0 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 2 + count: 338 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 3 + count: 0 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 4 + count: 62 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 5 + count: 63 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 6 + count: 0 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 7 + count: 63 + high: 378 + batch: 63 + vm stats threshold: 48 + node_unreclaimable: 0 + start_pfn: 4096 +Node 0, zone Normal + pages free 4539739 + min 15809 + low 23461 + high 31113 + spanned 7806976 + present 7806976 + managed 7654794 + protection: (0, 0, 0, 0, 0) + nr_free_pages 4539739 + nr_zone_inactive_anon 95612 + nr_zone_active_anon 1175853 + nr_zone_inactive_file 723339 + nr_zone_active_file 688810 + nr_zone_unevictable 213111 + nr_zone_write_pending 103 + nr_mlock 12 + nr_page_table_pages 13921 + nr_kernel_stack 18864 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 62836441 + numa_miss 0 + numa_foreign 0 + numa_interleave 23174 + numa_local 62836441 + numa_other 0 + pagesets + cpu: 0 + count: 351 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 1 + count: 112 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 2 + count: 368 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 3 + count: 358 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 4 + count: 304 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 5 + count: 112 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 6 + count: 488 + high: 378 + batch: 63 + vm stats threshold: 72 + cpu: 7 + count: 342 + high: 378 + batch: 63 + vm stats threshold: 72 + node_unreclaimable: 0 + start_pfn: 1048576 +Node 0, zone Movable + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Node 0, zone Device + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) \ No newline at end of file diff --git a/collector/fixtures/sys.ttar b/collector/fixtures/sys.ttar index 829425abf1..a08e6b6119 100644 --- a/collector/fixtures/sys.ttar +++ b/collector/fixtures/sys.ttar @@ -11,149 +11,282 @@ Mode: 755 Directory: sys/bus/cpu/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu0 +Path: sys/bus/cpu/devices/cpu0 +SymlinkTo: ../../../devices/system/cpu/cpu0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/bus/cpu/devices/cpu1 +SymlinkTo: ../../../devices/system/cpu/cpu1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/bus/cpu/devices/cpu2 +SymlinkTo: ../../../devices/system/cpu/cpu2 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/bus/cpu/devices/cpu3 +SymlinkTo: ../../../devices/system/cpu/cpu3 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/bus/node +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/bus/node/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu0/cpufreq +Path: sys/bus/node/devices/node0 +SymlinkTo: ../../../devices/system/node/node0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/bus/node/devices/node1 +SymlinkTo: ../../../devices/system/node/node1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu0/cpufreq/scaling_cur_freq +Directory: sys/class/dmi +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/dmi/id +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/bios_date Lines: 1 -1699981 -Mode: 644 +04/12/2021 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu0/cpufreq/scaling_max_freq +Path: sys/class/dmi/id/bios_release Lines: 1 -3700000 -Mode: 644 +2.2 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu0/cpufreq/scaling_min_freq +Path: sys/class/dmi/id/bios_vendor Lines: 1 -800000 -Mode: 644 +Dell Inc. +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu0/thermal_throttle -Mode: 755 +Path: sys/class/dmi/id/bios_version +Lines: 1 +2.2.4 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu0/thermal_throttle/core_throttle_count +Path: sys/class/dmi/id/board_name Lines: 1 -5 -Mode: 644 +07PXPY +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu0/thermal_throttle/package_throttle_count +Path: sys/class/dmi/id/board_serial Lines: 1 -30 +.7N62AI2.GRTCL6944100GP. +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/board_vendor +Lines: 1 +Dell Inc. +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/board_version +Lines: 1 +A01 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/chassis_asset_tag +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/chassis_serial +Lines: 1 +7N62AI2 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/chassis_type +Lines: 1 +23 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/chassis_vendor +Lines: 1 +Dell Inc. +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/chassis_version +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/modalias +Lines: 1 +dmi:bvnDellInc.:bvr2.2.4:bd04/12/2021:br2.2:svnDellInc.:pnPowerEdgeR6515:pvr:rvnDellInc.:rn07PXPY:rvrA01:cvnDellInc.:ct23:cvr: +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/product_family +Lines: 1 +PowerEdge +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/product_name +Lines: 1 +PowerEdge R6515 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/product_serial +Lines: 1 +7N62AI2 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/product_sku +Lines: 1 +SKU=NotProvided;ModelName=PowerEdge R6515 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/product_uuid +Lines: 1 +83340ca8-cb49-4474-8c29-d2088ca84dd9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/product_version +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/sys_vendor +Lines: 1 +Dell Inc. +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/dmi/id/uevent +Lines: 1 +MODALIAS=dmi:bvnDellInc.:bvr2.2.4:bd04/12/2021:br2.2:svnDellInc.:pnPowerEdgeR6515:pvr:rvnDellInc.:rn07PXPY:rvrA01:cvnDellInc.:ct23:cvr: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu1 +Directory: sys/class/fc_host Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu1/cpufreq +Directory: sys/class/fc_host/host0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu1/cpufreq/scaling_cur_freq +Path: sys/class/fc_host/host0/dev_loss_tmo Lines: 1 -1699981 +30 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu1/cpufreq/scaling_max_freq +Path: sys/class/fc_host/host0/fabric_name Lines: 1 -3700000 +0x0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu1/cpufreq/scaling_min_freq +Path: sys/class/fc_host/host0/node_name Lines: 1 -800000 +0x2000e0071bce95f2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu1/thermal_throttle -Mode: 755 +Path: sys/class/fc_host/host0/port_id +Lines: 1 +0x000002 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu1/thermal_throttle/core_throttle_count +Path: sys/class/fc_host/host0/port_name Lines: 1 -0 +0x1000e0071bce95f2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu1/thermal_throttle/package_throttle_count +Path: sys/class/fc_host/host0/port_state Lines: 1 -30 +Online Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu2 -Mode: 755 +Path: sys/class/fc_host/host0/port_type +Lines: 1 +Point-To-Point (direct nport connection) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/fc_host/host0/speed +Lines: 1 +16 Gbit +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu2/thermal_throttle +Directory: sys/class/fc_host/host0/statistics Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu2/thermal_throttle/core_throttle_count +Path: sys/class/fc_host/host0/statistics/dumped_frames Lines: 1 -40 +0xffffffffffffffff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu2/thermal_throttle/package_throttle_count +Path: sys/class/fc_host/host0/statistics/error_frames Lines: 1 -6 +0x0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/cpu/devices/cpu3/cpufreq -Mode: 755 +Path: sys/class/fc_host/host0/statistics/fcp_packet_aborts +Lines: 1 +0x13 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu3/cpufreq/scaling_cur_freq +Path: sys/class/fc_host/host0/statistics/invalid_crc_count Lines: 1 -8000 +0x2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu3/cpufreq/scaling_max_freq +Path: sys/class/fc_host/host0/statistics/invalid_tx_word_count Lines: 1 -4200000 +0x8 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/cpu/devices/cpu3/cpufreq/scaling_min_freq +Path: sys/class/fc_host/host0/statistics/link_failure_count Lines: 1 -1000 +0x9 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node -Mode: 755 +Path: sys/class/fc_host/host0/statistics/loss_of_signal_count +Lines: 1 +0x11 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node/devices -Mode: 755 +Path: sys/class/fc_host/host0/statistics/loss_of_sync_count +Lines: 1 +0x10 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node/devices/node0 -Mode: 755 +Path: sys/class/fc_host/host0/statistics/nos_count +Lines: 1 +0x12 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node/devices/node0/cpu0 -Mode: 755 +Path: sys/class/fc_host/host0/statistics/rx_frames +Lines: 1 +0x3 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node/devices/node0/cpu0/thermal_throttle -Mode: 755 +Path: sys/class/fc_host/host0/statistics/rx_words +Lines: 1 +0x4 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/node/devices/node0/cpu0/thermal_throttle/package_throttle_count +Path: sys/class/fc_host/host0/statistics/seconds_since_last_reset Lines: 1 -30 +0x7 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node/devices/node0/cpu1 -Mode: 755 +Path: sys/class/fc_host/host0/statistics/tx_frames +Lines: 1 +0x5 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/bus/node/devices/node0/cpu1/thermal_throttle -Mode: 755 +Path: sys/class/fc_host/host0/statistics/tx_words +Lines: 1 +0x6 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/node/devices/node0/cpu1/thermal_throttle/package_throttle_count +Path: sys/class/fc_host/host0/supported_classes Lines: 1 -30 +Class 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/bus/node/devices/node0/cpulist +Path: sys/class/fc_host/host0/supported_speeds Lines: 1 -0-3 +4 Gbit, 8 Gbit, 16 Gbit Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class -Mode: 755 +Path: sys/class/fc_host/host0/symbolic_name +Lines: 1 +Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/hwmon Mode: 755 @@ -226,9 +359,151 @@ Mode: 644 Directory: sys/class/infiniband Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/infiniband/i40iw0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/board_id +Lines: 1 +I40IW Board ID +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/fw_ver +Lines: 1 +0.2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/hca_type +Lines: 1 +I40IW +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/infiniband/i40iw0/ports +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/infiniband/i40iw0/ports/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/infiniband/i40iw0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/VL15_dropped +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/link_downed +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/link_error_recovery +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/local_link_integrity_errors +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_data +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_errors +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_packets +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_remote_physical_errors +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_switch_relay_errors +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_data +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_discards +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_packets +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_wait +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/counters/symbol_error +Lines: 1 +N/A (no PMA) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/rate +Lines: 1 +10 Gb/sec (4X) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/i40iw0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/board_id +Lines: 1 +SM_1141000001000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/fw_ver +Lines: 1 +2.31.5050 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/hca_type +Lines: 1 +MT4099 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -258,16 +533,56 @@ Lines: 1 16 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data Lines: 1 4631917 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Lines: 1 +6825908347 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data Lines: 1 3733440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Lines: 1 +6235865 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +4294967295 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/unicast_rcv_packets Lines: 1 61148 @@ -321,6 +636,21 @@ Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports/2 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -410,12 +740,42 @@ Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/infiniband/mlx4_0/ports/2/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/bond0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/bond0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/bond0/bonding Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -423,1047 +783,2884 @@ Path: sys/class/net/bond0/bonding/slaves Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/bonding_masters +Path: sys/class/net/bond0/broadcast Lines: 1 -bond0 dmz int +ff:ff:ff:ff:ff:ff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/bonding -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/bonding/slaves +Path: sys/class/net/bond0/carrier Lines: 1 -eth0 eth4 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/slave_eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/slave_eth0/operstate +Path: sys/class/net/bond0/carrier_changes Lines: 1 -up +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/dmz/slave_eth4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/dmz/slave_eth4/operstate +Path: sys/class/net/bond0/carrier_down_count Lines: 1 -up +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/bonding -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/bonding/slaves +Path: sys/class/net/bond0/carrier_up_count Lines: 1 -eth5 eth1 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/slave_eth1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/slave_eth1/operstate +Path: sys/class/net/bond0/dev_id Lines: 1 -down +0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/class/net/int/slave_eth5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/class/net/int/slave_eth5/operstate +Path: sys/class/net/bond0/dormant Lines: 1 -up +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 +Path: sys/class/net/bond0/duplex +Lines: 1 +full +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Path: sys/class/net/bond0/flags Lines: 1 -0 +0x1303 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 +Path: sys/class/net/bond0/ifalias +Lines: 0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Path: sys/class/net/bond0/ifindex Lines: 1 -0 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Path: sys/class/net/bond0/iflink Lines: 1 -0 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Path: sys/class/net/bond0/link_mode Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Path: sys/class/net/bond0/mtu Lines: 1 -100 +1500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Path: sys/class/net/bond0/name_assign_type Lines: 1 -289 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Path: sys/class/net/bond0/netdev_group Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Path: sys/class/net/bond0/operstate Lines: 1 -0 +up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 +Path: sys/class/net/bond0/phys_port_id +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 +Path: sys/class/net/bond0/phys_port_name +Lines: 0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 +Path: sys/class/net/bond0/phys_switch_id +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Path: sys/class/net/bond0/speed Lines: 1 -0 +-1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Path: sys/class/net/bond0/tx_queue_len Lines: 1 -0 +1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Path: sys/class/net/bond0/type Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Path: sys/class/net/bonding_masters Lines: 1 -0 +bond0 dmz int Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Directory: sys/class/net/dmz +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/addr_assign_type Lines: 1 -0 +3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Path: sys/class/net/dmz/addr_len Lines: 1 -0 +6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Path: sys/class/net/dmz/address Lines: 1 -0 +01:01:01:01:01:01 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Directory: sys/class/net/dmz/bonding Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Path: sys/class/net/dmz/bonding/slaves Lines: 1 -0 +eth0 eth4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Path: sys/class/net/dmz/broadcast Lines: 1 -0 +ff:ff:ff:ff:ff:ff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Path: sys/class/net/dmz/carrier Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Path: sys/class/net/dmz/carrier_changes Lines: 1 -0 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Path: sys/class/net/dmz/carrier_down_count Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Path: sys/class/net/dmz/carrier_up_count Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Path: sys/class/net/dmz/dev_id Lines: 1 -0 +0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Path: sys/class/net/dmz/dormant Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 +Path: sys/class/net/dmz/duplex +Lines: 1 +full +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Path: sys/class/net/dmz/flags Lines: 1 -0 +0x1303 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Path: sys/class/net/dmz/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/ifindex Lines: 1 -0 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Path: sys/class/net/dmz/iflink Lines: 1 -0 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Path: sys/class/net/dmz/link_mode Lines: 1 -100 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Path: sys/class/net/dmz/mtu Lines: 1 -546 +1500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Path: sys/class/net/dmz/name_assign_type Lines: 1 -0 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Path: sys/class/net/dmz/netdev_group Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Path: sys/class/net/dmz/operstate Lines: 1 -0 +up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 +Path: sys/class/net/dmz/phys_port_id +Lines: 0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 +Path: sys/class/net/dmz/phys_port_name +Lines: 0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 +Path: sys/class/net/dmz/phys_switch_id +Lines: 0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Directory: sys/class/net/dmz/slave_eth0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Directory: sys/class/net/dmz/slave_eth0/bonding_slave Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Path: sys/class/net/dmz/slave_eth0/bonding_slave/mii_status +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/dmz/slave_eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/dmz/slave_eth4 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Directory: sys/class/net/dmz/slave_eth4/bonding_slave Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Path: sys/class/net/dmz/slave_eth4/bonding_slave/mii_status Lines: 1 -0 +up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Path: sys/class/net/dmz/slave_eth4/operstate Lines: 1 -512 +up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Path: sys/class/net/dmz/speed +Lines: 1 +1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Path: sys/class/net/dmz/tx_queue_len Lines: 1 -0 +1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform -Mode: 755 +Path: sys/class/net/dmz/type +Lines: 1 +1 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/applesmc.768 +Path: sys/class/net/eth0 +SymlinkTo: ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/net/int Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan1_input +Path: sys/class/net/int/addr_assign_type Lines: 1 -0 +3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan1_label +Path: sys/class/net/int/addr_len Lines: 1 -Left side +6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan1_manual +Path: sys/class/net/int/address Lines: 1 -0 +01:01:01:01:01:01 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan1_max +Directory: sys/class/net/int/bonding +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/net/int/bonding/slaves Lines: 1 -6156 +eth5 eth1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan1_min +Path: sys/class/net/int/broadcast Lines: 1 -2160 +ff:ff:ff:ff:ff:ff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan1_output +Path: sys/class/net/int/carrier Lines: 1 -2160 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan1_safe -Lines: 0 +Path: sys/class/net/int/carrier_changes +Lines: 1 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan2_input +Path: sys/class/net/int/carrier_down_count Lines: 1 -1998 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan2_label +Path: sys/class/net/int/carrier_up_count Lines: 1 -Right side +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan2_manual +Path: sys/class/net/int/dev_id Lines: 1 -0 +0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan2_max +Path: sys/class/net/int/dormant Lines: 1 -5700 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan2_min +Path: sys/class/net/int/duplex Lines: 1 -2000 +full Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan2_output +Path: sys/class/net/int/flags Lines: 1 -2000 +0x1303 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/fan2_safe +Path: sys/class/net/int/ifalias Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/applesmc.768/hwmon -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/applesmc.768/hwmon/hwmon2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/hwmon/hwmon2/device -SymlinkTo: ../../../applesmc.768 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/applesmc.768/name +Path: sys/class/net/int/ifindex Lines: 1 -applesmc +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/coretemp.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/coretemp.0/hwmon -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/coretemp.0/hwmon/hwmon0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/device -SymlinkTo: ../../../coretemp.0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/name +Path: sys/class/net/int/iflink Lines: 1 -coretemp +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_crit +Path: sys/class/net/int/link_mode Lines: 1 -100000 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_crit_alarm +Path: sys/class/net/int/mtu Lines: 1 -0 +1500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_input +Path: sys/class/net/int/name_assign_type Lines: 1 -55000 +2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_label +Path: sys/class/net/int/netdev_group Lines: 1 -Physical id 0 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_max +Path: sys/class/net/int/operstate Lines: 1 -84000 +up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_crit -Lines: 1 -100000 +Path: sys/class/net/int/phys_port_id +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_crit_alarm -Lines: 1 -0 +Path: sys/class/net/int/phys_port_name +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_input -Lines: 1 -54000 +Path: sys/class/net/int/phys_switch_id +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_label -Lines: 1 -Core 0 -Mode: 644 +Directory: sys/class/net/int/slave_eth1 +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_max -Lines: 1 -84000 -Mode: 644 +Directory: sys/class/net/int/slave_eth1/bonding_slave +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_crit +Path: sys/class/net/int/slave_eth1/bonding_slave/mii_status Lines: 1 -100000 +down Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_crit_alarm +Path: sys/class/net/int/slave_eth1/operstate Lines: 1 -0 +down Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_input -Lines: 1 -52000 -Mode: 644 +Directory: sys/class/net/int/slave_eth5 +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_label -Lines: 1 -Core 1 -Mode: 644 +Directory: sys/class/net/int/slave_eth5/bonding_slave +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_max +Path: sys/class/net/int/slave_eth5/bonding_slave/mii_status Lines: 1 -84000 +up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_crit +Path: sys/class/net/int/slave_eth5/operstate Lines: 1 -100000 +up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_crit_alarm +Path: sys/class/net/int/speed Lines: 1 -0 +1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_input +Path: sys/class/net/int/tx_queue_len Lines: 1 -53000 +1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_label +Path: sys/class/net/int/type Lines: 1 -Core 2 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_max -Lines: 1 -84000 -Mode: 644 +Directory: sys/class/nvme +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_crit -Lines: 1 -100000 -Mode: 644 +Directory: sys/class/nvme/nvme0 +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_crit_alarm +Path: sys/class/nvme/nvme0/firmware_rev Lines: 1 -0 -Mode: 644 +1B2QEXP7 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_input +Path: sys/class/nvme/nvme0/model Lines: 1 -50000 -Mode: 644 +Samsung SSD 970 PRO 512GB +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_label +Path: sys/class/nvme/nvme0/serial Lines: 1 -Core 3 -Mode: 644 +S680HF8N190894I +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_max +Path: sys/class/nvme/nvme0/state Lines: 1 -84000 -Mode: 644 +live +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/coretemp.1 +Directory: sys/class/power_supply Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/coretemp.1/hwmon +Directory: sys/class/power_supply/AC Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/coretemp.1/hwmon/hwmon1 -Mode: 755 +Path: sys/class/power_supply/AC/online +Lines: 1 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/device -SymlinkTo: ../../../coretemp.1 +Directory: sys/class/power_supply/AC/power +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/name +Path: sys/class/power_supply/AC/power/async Lines: 1 -coretemp +disabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_crit +Path: sys/class/power_supply/AC/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/power/control Lines: 1 -100000 +auto Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_crit_alarm +Path: sys/class/power_supply/AC/power/runtime_active_kids Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_input +Path: sys/class/power_supply/AC/power/runtime_active_time Lines: 1 -55000 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_label +Path: sys/class/power_supply/AC/power/runtime_enabled Lines: 1 -Physical id 0 -Mode: 644 +disabled +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_max +Path: sys/class/power_supply/AC/power/runtime_status Lines: 1 -84000 -Mode: 644 +unsupported +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_crit +Path: sys/class/power_supply/AC/power/runtime_suspended_time Lines: 1 -100000 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_crit_alarm +Path: sys/class/power_supply/AC/power/runtime_usage Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_input +Path: sys/class/power_supply/AC/power/wakeup Lines: 1 -54000 +enabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_label +Path: sys/class/power_supply/AC/power/wakeup_abort_count Lines: 1 -Core 0 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_max +Path: sys/class/power_supply/AC/power/wakeup_active Lines: 1 -84000 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_crit +Path: sys/class/power_supply/AC/power/wakeup_active_count Lines: 1 -100000 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_crit_alarm +Path: sys/class/power_supply/AC/power/wakeup_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_input +Path: sys/class/power_supply/AC/power/wakeup_expire_count Lines: 1 -52000 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_label +Path: sys/class/power_supply/AC/power/wakeup_last_time_ms Lines: 1 -Core 1 -Mode: 644 +7888 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_max +Path: sys/class/power_supply/AC/power/wakeup_max_time_ms Lines: 1 -84000 -Mode: 644 +2 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_crit +Path: sys/class/power_supply/AC/power/wakeup_prevent_sleep_time_ms Lines: 1 -100000 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_crit_alarm +Path: sys/class/power_supply/AC/power/wakeup_total_time_ms Lines: 1 -0 -Mode: 644 +2 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_input +Path: sys/class/power_supply/AC/type Lines: 1 -53000 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_label +Directory: sys/class/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/alarm Lines: 1 -Core 2 +2253000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_max +Path: sys/class/power_supply/BAT0/capacity Lines: 1 -84000 +81 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/capacity_level +Lines: 1 +Normal +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/charge_start_threshold +Lines: 1 +95 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_crit +Path: sys/class/power_supply/BAT0/charge_stop_threshold Lines: 1 -100000 +100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_crit_alarm +Path: sys/class/power_supply/BAT0/cycle_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_input +Path: sys/class/power_supply/BAT0/energy_full Lines: 1 -50000 -Mode: 644 +45070000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_label +Path: sys/class/power_supply/BAT0/energy_full_design Lines: 1 -Core 3 -Mode: 644 +47520000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_max +Path: sys/class/power_supply/BAT0/energy_now Lines: 1 -84000 -Mode: 644 +36580000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/nct6775.656 -Mode: 755 +Path: sys/class/power_supply/BAT0/manufacturer +Lines: 1 +LGC +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/nct6775.656/hwmon -Mode: 755 +Path: sys/class/power_supply/BAT0/model_name +Lines: 1 +LNV-45N1ÀÀ +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/platform/nct6775.656/hwmon/hwmon3 +Directory: sys/class/power_supply/BAT0/power Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_alarm +Path: sys/class/power_supply/BAT0/power/async Lines: 1 -0 +disabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_beep -Lines: 1 -0 +Path: sys/class/power_supply/BAT0/power/autosuspend_delay_ms +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_input +Path: sys/class/power_supply/BAT0/power/control Lines: 1 -1098 +auto Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_min +Path: sys/class/power_supply/BAT0/power/runtime_active_kids Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_pulses +Path: sys/class/power_supply/BAT0/power/runtime_active_time Lines: 1 -2 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_target +Path: sys/class/power_supply/BAT0/power/runtime_enabled Lines: 1 -27000 -Mode: 644 +disabled +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_tolerance +Path: sys/class/power_supply/BAT0/power/runtime_status Lines: 1 -0 -Mode: 644 +unsupported +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_alarm +Path: sys/class/power_supply/BAT0/power/runtime_suspended_time Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_beep +Path: sys/class/power_supply/BAT0/power/runtime_usage Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_input +Path: sys/class/power_supply/BAT0/power_now Lines: 1 -792 -Mode: 644 +5002000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_max +Path: sys/class/power_supply/BAT0/present Lines: 1 -1744 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_min +Path: sys/class/power_supply/BAT0/serial_number Lines: 1 -0 -Mode: 644 +38109 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_alarm +Path: sys/class/power_supply/BAT0/status Lines: 1 -1 -Mode: 644 +Discharging +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_beep +Path: sys/class/power_supply/BAT0/technology Lines: 1 -0 -Mode: 644 +Li-ion +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_input +Path: sys/class/power_supply/BAT0/type Lines: 1 -1024 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=11660000 +POWER_SUPPLY_POWER_NOW=5002000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=45070000 +POWER_SUPPLY_ENERGY_NOW=36580000 +POWER_SUPPLY_CAPACITY=81 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_max +Path: sys/class/power_supply/BAT0/voltage_min_design Lines: 1 -0 -Mode: 644 +10800000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_min +Path: sys/class/power_supply/BAT0/voltage_now Lines: 1 -0 -Mode: 644 +11660000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion0_alarm +Directory: sys/class/powercap +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl/enabled Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion0_beep -Lines: 1 -0 +Path: sys/class/powercap/intel-rapl/uevent +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion1_alarm +Directory: sys/class/powercap/intel-rapl:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw Lines: 1 -1 +95000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion1_beep +Path: sys/class/powercap/intel-rapl:0/constraint_0_name Lines: 1 -0 +long_term Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/name +Path: sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw Lines: 1 -nct6779 +4090000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point1_pwm +Path: sys/class/powercap/intel-rapl:0/constraint_0_time_window_us Lines: 1 -153 +999424 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point1_temp +Path: sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw Lines: 1 -30000 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point2_pwm +Path: sys/class/powercap/intel-rapl:0/constraint_1_name Lines: 1 -255 +short_term Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point2_temp +Path: sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw Lines: 1 -70000 +4090000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point3_pwm +Path: sys/class/powercap/intel-rapl:0/constraint_1_time_window_us Lines: 1 -255 +2440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point3_temp +Path: sys/class/powercap/intel-rapl:0/enabled Lines: 1 -70000 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point4_pwm +Path: sys/class/powercap/intel-rapl:0/energy_uj Lines: 1 -255 +240422366267 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point4_temp +Path: sys/class/powercap/intel-rapl:0/max_energy_range_uj Lines: 1 -70000 +262143328850 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point5_pwm +Path: sys/class/powercap/intel-rapl:0/name Lines: 1 -255 +package-0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point5_temp -Lines: 1 -75000 +Path: sys/class/powercap/intel-rapl:0/uevent +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_crit_temp_tolerance -Lines: 1 -2000 +Directory: sys/class/powercap/intel-rapl:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_enable +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_name Lines: 1 -5 +long_term Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_floor +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_mode +Path: sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us Lines: 1 -1 +976 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_start +Path: sys/class/powercap/intel-rapl:0:0/enabled Lines: 1 -1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_step_down_time +Path: sys/class/powercap/intel-rapl:0:0/energy_uj Lines: 1 -100 +118821284256 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_step_up_time +Path: sys/class/powercap/intel-rapl:0:0/max_energy_range_uj Lines: 1 -100 +262143328850 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_stop_time +Path: sys/class/powercap/intel-rapl:0:0/name Lines: 1 -6000 +core Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_target_temp -Lines: 1 -0 +Path: sys/class/powercap/intel-rapl:0:0/uevent +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_temp_sel +Directory: sys/class/scsi_tape +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0 +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0a +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0l +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/nst0m +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0 +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0a +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0l +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/scsi_tape/st0m +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/thermal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/thermal/cooling_device0 +SymlinkTo: ../../devices/virtual/thermal/cooling_device0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/thermal/thermal_zone0 +SymlinkTo: ../../devices/virtual/thermal/thermal_zone0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight Lines: 1 -7 -Mode: 644 +1EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_temp_tolerance +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns Lines: 1 -0 -Mode: 644 +9247011087720EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_duty_base +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt Lines: 1 -0 -Mode: 644 +1409EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_duty_step +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt Lines: 1 -0 -Mode: 644 +979383912EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_sel +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt Lines: 1 -1 -Mode: 644 +3741EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns Lines: 1 -0 -Mode: 644 +33788355744EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step_base +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt Lines: 1 -0 -Mode: 644 +19EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step_tol +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt Lines: 1 -0 -Mode: 644 +1496246784000EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system -Mode: 755 +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system/edac -Mode: 755 +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system/edac/mc +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system/edac/mc/mc0 +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/edac/mc/mc0/ce_count +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight Lines: 1 -1 -Mode: 644 +1EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/edac/mc/mc0/ce_noinfo_count +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns Lines: 1 -2 -Mode: 644 +9247011087720EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system/edac/mc/mc0/csrow0 -Mode: 755 +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/edac/mc/mc0/csrow0/ce_count +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt Lines: 1 -3 -Mode: 644 +979383912EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/edac/mc/mc0/csrow0/ue_count +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt Lines: 1 -4 -Mode: 644 +3741EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/edac/mc/mc0/ue_count +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns Lines: 1 -5 -Mode: 644 +33788355744EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/edac/mc/mc0/ue_noinfo_count +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt Lines: 1 -6 -Mode: 644 +19EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system/node +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system/node/node0 +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/node/node0/meminfo -Lines: 30 - -Node 0 MemTotal: 134182340 kB -Node 0 MemFree: 53030372 kB -Node 0 MemUsed: 81151968 kB -Node 0 Active: 5456380 kB -Node 0 Inactive: 59150184 kB -Node 0 Active(anon): 691324 kB -Node 0 Inactive(anon): 340456 kB -Node 0 Active(file): 4765056 kB -Node 0 Inactive(file): 58809728 kB -Node 0 Unevictable: 0 kB -Node 0 Mlocked: 0 kB -Node 0 Dirty: 20 kB -Node 0 Writeback: 0 kB -Node 0 FilePages: 70170916 kB -Node 0 Mapped: 894240 kB -Node 0 AnonPages: 788196 kB -Node 0 Shmem: 47860 kB -Node 0 KernelStack: 34016 kB -Node 0 PageTables: 143304 kB -Node 0 NFS_Unstable: 0 kB -Node 0 Bounce: 0 kB -Node 0 WritebackTmp: 0 kB -Node 0 Slab: 6654304 kB -Node 0 SReclaimable: 4473124 kB -Node 0 SUnreclaim: 2181180 kB -Node 0 AnonHugePages: 147456 kB -Node 0 HugePages_Total: 0 -Node 0 HugePages_Free: 0 -Node 0 HugePages_Surp: 0 -Mode: 644 +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight +Lines: 1 +1EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/node/node0/numastat -Lines: 6 -numa_hit 193460335812 -numa_miss 12624528 -numa_foreign 59858623300 -interleave_hit 57146 -local_node 193454780853 -other_node 18179487 -Mode: 644 +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/devices/system/node/node1 -Mode: 755 +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/node/node1/meminfo -Lines: 30 - -Node 1 MemTotal: 134217728 kB -Node 1 MemFree: 39634788 kB -Node 1 MemUsed: 94582940 kB +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/writeback_rate_debug +Lines: 7 +rate: 1.1M/sec +dirty: 20.4G +target: 20.4G +proportional: 427.5k +integral: 790.0k +change: 321.5k/sec +next io: 17ms +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/applesmc.768 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan1_input +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan1_label +Lines: 1 +Left side +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan1_manual +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan1_max +Lines: 1 +6156 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan1_min +Lines: 1 +2160 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan1_output +Lines: 1 +2160 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan1_safe +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan2_input +Lines: 1 +1998 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan2_label +Lines: 1 +Right side +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan2_manual +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan2_max +Lines: 1 +5700 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan2_min +Lines: 1 +2000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan2_output +Lines: 1 +2000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/fan2_safe +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/applesmc.768/hwmon +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/applesmc.768/hwmon/hwmon2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/hwmon/hwmon2/device +SymlinkTo: ../../../applesmc.768 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/applesmc.768/name +Lines: 1 +applesmc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/coretemp.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/coretemp.0/hwmon +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/coretemp.0/hwmon/hwmon0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/device +SymlinkTo: ../../../coretemp.0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/name +Lines: 1 +coretemp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_input +Lines: 1 +55000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_label +Lines: 1 +Physical id 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_input +Lines: 1 +54000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_label +Lines: 1 +Core 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_input +Lines: 1 +52000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_label +Lines: 1 +Core 1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_input +Lines: 1 +53000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_label +Lines: 1 +Core 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_input +Lines: 1 +50000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_label +Lines: 1 +Core 3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/coretemp.1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/coretemp.1/hwmon +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/coretemp.1/hwmon/hwmon1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/device +SymlinkTo: ../../../coretemp.1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/name +Lines: 1 +coretemp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_input +Lines: 1 +55000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_label +Lines: 1 +Physical id 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_input +Lines: 1 +54000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_label +Lines: 1 +Core 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_input +Lines: 1 +52000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_label +Lines: 1 +Core 1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_input +Lines: 1 +53000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_label +Lines: 1 +Core 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_crit +Lines: 1 +100000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_crit_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_input +Lines: 1 +50000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_label +Lines: 1 +Core 3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_max +Lines: 1 +84000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/nct6775.656 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/nct6775.656/hwmon +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/platform/nct6775.656/hwmon/hwmon3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_beep +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_input +Lines: 1 +1098 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_pulses +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_target +Lines: 1 +27000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_tolerance +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_alarm +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_beep +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_input +Lines: 1 +792 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_max +Lines: 1 +1744 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_alarm +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_beep +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_input +Lines: 1 +1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_max +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_min +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion0_alarm +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion0_beep +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion1_alarm +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion1_beep +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/name +Lines: 1 +nct6779 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point1_pwm +Lines: 1 +153 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point1_temp +Lines: 1 +30000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point2_pwm +Lines: 1 +255 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point2_temp +Lines: 1 +70000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point3_pwm +Lines: 1 +255 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point3_temp +Lines: 1 +70000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point4_pwm +Lines: 1 +255 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point4_temp +Lines: 1 +70000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point5_pwm +Lines: 1 +255 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point5_temp +Lines: 1 +75000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_crit_temp_tolerance +Lines: 1 +2000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_enable +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_floor +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_start +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_step_down_time +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_step_up_time +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_stop_time +Lines: 1 +6000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_target_temp +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_temp_sel +Lines: 1 +7 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_temp_tolerance +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_duty_base +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_duty_step +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_sel +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step_base +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step_tol +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/clocksource +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/clocksource/clocksource0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu0/cpufreq +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/related_cpus +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq +Lines: 1 +1699981 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq +Lines: 1 +3700000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu0/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count +Lines: 1 +30 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu0/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/topology/core_id +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu0/topology/physical_package_id +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu1/cpufreq +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_cur_freq +Lines: 1 +1699981 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq +Lines: 1 +3700000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu1/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count +Lines: 1 +30 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu1/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/topology/core_id +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu1/topology/physical_package_id +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu2/cpufreq +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/related_cpus +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_cur_freq +Lines: 1 +8000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_max_freq +Lines: 1 +4200000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu2/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/thermal_throttle/core_throttle_count +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/thermal_throttle/package_throttle_count +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu2/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/topology/core_id +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu2/topology/physical_package_id +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu3/cpufreq +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/related_cpus +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_cur_freq +Lines: 1 +8000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_max_freq +Lines: 1 +4200000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu3/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/thermal_throttle/core_throttle_count +Lines: 1 +9 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/thermal_throttle/package_throttle_count +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/cpu/cpu3/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/topology/core_id +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/cpu/cpu3/topology/physical_package_id +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/edac +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/edac/mc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/edac/mc/mc0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/edac/mc/mc0/ce_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/edac/mc/mc0/ce_noinfo_count +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/edac/mc/mc0/csrow0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/edac/mc/mc0/csrow0/ce_count +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/edac/mc/mc0/csrow0/ue_count +Lines: 1 +4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/edac/mc/mc0/ue_count +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/edac/mc/mc0/ue_noinfo_count +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/node +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/node/node0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node0/cpu0 +SymlinkTo: ../../cpu/cpu0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node0/cpu1 +SymlinkTo: ../../cpu/cpu1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node0/cpulist +Lines: 1 +0-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node0/meminfo +Lines: 29 +Node 0 MemTotal: 134182340 kB +Node 0 MemFree: 53030372 kB +Node 0 MemUsed: 81151968 kB +Node 0 Active: 5456380 kB +Node 0 Inactive: 59150184 kB +Node 0 Active(anon): 691324 kB +Node 0 Inactive(anon): 340456 kB +Node 0 Active(file): 4765056 kB +Node 0 Inactive(file): 58809728 kB +Node 0 Unevictable: 0 kB +Node 0 Mlocked: 0 kB +Node 0 Dirty: 20 kB +Node 0 Writeback: 0 kB +Node 0 FilePages: 70170916 kB +Node 0 Mapped: 894240 kB +Node 0 AnonPages: 788196 kB +Node 0 Shmem: 47860 kB +Node 0 KernelStack: 34016 kB +Node 0 PageTables: 143304 kB +Node 0 NFS_Unstable: 0 kB +Node 0 Bounce: 0 kB +Node 0 WritebackTmp: 0 kB +Node 0 Slab: 6654304 kB +Node 0 SReclaimable: 4473124 kB +Node 0 SUnreclaim: 2181180 kB +Node 0 AnonHugePages: 147456 kB +Node 0 HugePages_Total: 0 +Node 0 HugePages_Free: 0 +Node 0 HugePages_Surp: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node0/numastat +Lines: 6 +numa_hit 193460335812 +numa_miss 12624528 +numa_foreign 59858623300 +interleave_hit 57146 +local_node 193454780853 +other_node 18179487 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/node/node1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node1/cpu2 +SymlinkTo: ../../cpu/cpu2 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node1/cpu3 +SymlinkTo: ../../cpu/cpu3 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node1/cpulist +Lines: 1 +2-3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node1/meminfo +Lines: 29 +Node 1 MemTotal: 134217728 kB +Node 1 MemFree: 39634788 kB +Node 1 MemUsed: 94582940 kB Node 1 Active: 5604496 kB Node 1 Inactive: 71450592 kB Node 1 Active(anon): 590464 kB @@ -1492,256 +3689,944 @@ Node 1 HugePages_Free: 0 Node 1 HugePages_Surp: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/devices/system/node/node1/numastat -Lines: 6 -numa_hit 326720946761 -numa_miss 59858626709 -numa_foreign 12624528 -interleave_hit 57286 -local_node 326719046550 -other_node 59860526920 +Path: sys/devices/system/node/node1/numastat +Lines: 6 +numa_hit 326720946761 +numa_miss 59858626709 +numa_foreign 12624528 +interleave_hit 57286 +local_node 326719046550 +other_node 59860526920 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/system/node/node2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node2/cpulist +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node2/meminfo +Lines: 29 +Node 2 MemTotal: 134217728 kB +Node 2 MemFree: 39634788 kB +Node 2 MemUsed: 94582940 kB +Node 2 Active: 5604496 kB +Node 2 Inactive: 71450592 kB +Node 2 Active(anon): 590464 kB +Node 2 Inactive(anon): 285088 kB +Node 2 Active(file): 5014032 kB +Node 2 Inactive(file): 71165504 kB +Node 2 Unevictable: 0 kB +Node 2 Mlocked: 0 kB +Node 2 Dirty: 120 kB +Node 2 Writeback: 0 kB +Node 2 FilePages: 83579188 kB +Node 2 Mapped: 864112 kB +Node 2 AnonPages: 671932 kB +Node 2 Shmem: 87580 kB +Node 2 KernelStack: 31104 kB +Node 2 PageTables: 124272 kB +Node 2 NFS_Unstable: 0 kB +Node 2 Bounce: 0 kB +Node 2 WritebackTmp: 0 kB +Node 2 Slab: 7020716 kB +Node 2 SReclaimable: 4614084 kB +Node 2 SUnreclaim: 2406632 kB +Node 2 AnonHugePages: 90112 kB +Node 2 HugePages_Total: 0 +Node 2 HugePages_Free: 0 +Node 2 HugePages_Surp: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/system/node/node2/numastat +Lines: 6 +numa_hit 26720946761 +numa_miss 9858626709 +numa_foreign 2624528 +interleave_hit 7286 +local_node 26719046550 +other_node 9860526920 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/virtual +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/virtual/thermal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/virtual/thermal/cooling_device0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/virtual/thermal/cooling_device0/cur_state +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/virtual/thermal/cooling_device0/max_state +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/virtual/thermal/cooling_device0/type +Lines: 1 +Processor +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/devices/virtual/thermal/thermal_zone0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/virtual/thermal/thermal_zone0/policy +Lines: 1 +step_wise +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/virtual/thermal/thermal_zone0/temp +Lines: 1 +12376 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/devices/virtual/thermal/thermal_zone0/type +Lines: 1 +cpu-thermal +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 +SymlinkTo: ../../../devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 +SymlinkTo: ../../../devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Lines: 1 +1305 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Lines: 1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs -Mode: 755 +Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes +Lines: 1 +2147483648 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs/bcache -Mode: 755 +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned +Lines: 1 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -SymlinkTo: ../../../devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly +Lines: 1 +131072 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -SymlinkTo: ../../../devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used +Lines: 1 +933888 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total Lines: 1 -100 -Mode: 644 +2147483648 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used Lines: 1 -0 -Mode: 644 +1867776 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes Lines: 1 -1 -Mode: 644 +1073741824 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes Lines: 1 -0 -Mode: 644 +933888 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes Lines: 1 -1305 -Mode: 644 +1073741824 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use Lines: 1 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used Lines: 1 -100 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total Lines: 1 -289 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used +Lines: 1 +32768 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned Lines: 1 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref +Lines: 1 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref Lines: 1 -0 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label +Lines: 1 +fixture Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid +Lines: 1 +0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use Lines: 1 -0 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly +Lines: 1 +262144 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes Lines: 1 -0 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes Lines: 1 -0 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned Lines: 1 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22/size +Lines: 1 +20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23/size Lines: 1 -100 +20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24/size Lines: 1 -546 +20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25/size Lines: 1 -0 +20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata Lines: 1 -0 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref +Lines: 1 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref Lines: 1 -0 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid +Lines: 1 +7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1833,7 +4718,3 @@ Lines: 1 20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: sys/.unpacked -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/collector/fixtures/textfile/client_side_timestamp.out b/collector/fixtures/textfile/client_side_timestamp.out new file mode 100644 index 0000000000..75ad6924b7 --- /dev/null +++ b/collector/fixtures/textfile/client_side_timestamp.out @@ -0,0 +1,3 @@ +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 1 diff --git a/collector/fixtures/textfile/client_side_timestamp/metrics.prom b/collector/fixtures/textfile/client_side_timestamp/metrics.prom new file mode 100644 index 0000000000..fe3199bca1 --- /dev/null +++ b/collector/fixtures/textfile/client_side_timestamp/metrics.prom @@ -0,0 +1,2 @@ +metric_with_custom_timestamp 1 1441205977284 +normal_metric 2 diff --git a/collector/fixtures/textfile/different_metric_types.out b/collector/fixtures/textfile/different_metric_types.out new file mode 100644 index 0000000000..c01c197cce --- /dev/null +++ b/collector/fixtures/textfile/different_metric_types.out @@ -0,0 +1,32 @@ +# HELP event_duration_seconds_total Query timings +# TYPE event_duration_seconds_total summary +event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 +event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 +event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 +event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 +event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 +event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 +event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 +event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 +event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 +event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 +event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 +event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 +event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 +event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 +# HELP events_total this is a test metric +# TYPE events_total counter +events_total{foo="bar"} 10 +events_total{foo="baz"} 20 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/different_metric_types/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/different_metric_types/metrics.prom b/collector/fixtures/textfile/different_metric_types/metrics.prom new file mode 100644 index 0000000000..3ba8a65a57 --- /dev/null +++ b/collector/fixtures/textfile/different_metric_types/metrics.prom @@ -0,0 +1,28 @@ +# HELP events_total this is a test metric +# TYPE events_total counter +events_total{foo="bar"} 10 +events_total{foo="baz"} 20 + +# HELP event_duration_seconds_total Query timings +# TYPE event_duration_seconds_total summary +event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 +event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 +event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 +event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 +event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 +event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 +event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 +event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 +event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 +event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 +event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 +event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 +event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 +event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 + diff --git a/collector/fixtures/textfile/glob_extra_dimension.out b/collector/fixtures/textfile/glob_extra_dimension.out new file mode 100644 index 0000000000..bbf7f45413 --- /dev/null +++ b/collector/fixtures/textfile/glob_extra_dimension.out @@ -0,0 +1,49 @@ +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/histogram_extra_dimension/metrics.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/summary_extra_dimension/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_seconds summary +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.9"} 0.001765451 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.99"} 0.018672076 +prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="alerting"} 214.85081044700146 +prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="alerting"} 185209 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.5"} 4.3132e-05 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.9"} 8.9295e-05 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.99"} 0.000193657 +prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="recording"} 185091.01317759082 +prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="recording"} 1.0020195e+08 +prometheus_rule_evaluation_duration_seconds{handler="foo",rule_type="alerting",quantile="0.5"} 0.000571464 +prometheus_rule_evaluation_duration_seconds_sum{handler="foo",rule_type="alerting"} 0 +prometheus_rule_evaluation_duration_seconds_count{handler="foo",rule_type="alerting"} 0 +# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range histogram +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06 diff --git a/collector/fixtures/textfile/histogram.out b/collector/fixtures/textfile/histogram.out new file mode 100644 index 0000000000..f649e19a2f --- /dev/null +++ b/collector/fixtures/textfile/histogram.out @@ -0,0 +1,21 @@ +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/histogram/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range histogram +prometheus_tsdb_compaction_chunk_range_bucket{le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count 1.691853e+06 diff --git a/collector/fixtures/textfile/histogram/metrics.prom b/collector/fixtures/textfile/histogram/metrics.prom new file mode 100644 index 0000000000..ac8377a2af --- /dev/null +++ b/collector/fixtures/textfile/histogram/metrics.prom @@ -0,0 +1,15 @@ +# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range histogram +prometheus_tsdb_compaction_chunk_range_bucket{le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count 1.691853e+06 diff --git a/collector/fixtures/textfile/histogram_extra_dimension.out b/collector/fixtures/textfile/histogram_extra_dimension.out new file mode 100644 index 0000000000..2f6aa854ad --- /dev/null +++ b/collector/fixtures/textfile/histogram_extra_dimension.out @@ -0,0 +1,34 @@ +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/histogram_extra_dimension/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range histogram +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06 diff --git a/collector/fixtures/textfile/histogram_extra_dimension/metrics.prom b/collector/fixtures/textfile/histogram_extra_dimension/metrics.prom new file mode 100644 index 0000000000..55bb81e097 --- /dev/null +++ b/collector/fixtures/textfile/histogram_extra_dimension/metrics.prom @@ -0,0 +1,28 @@ +# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range histogram +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06 +prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11 +prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06 diff --git a/collector/fixtures/textfile/inconsistent_metrics.out b/collector/fixtures/textfile/inconsistent_metrics.out new file mode 100644 index 0000000000..45ad4535e6 --- /dev/null +++ b/collector/fixtures/textfile/inconsistent_metrics.out @@ -0,0 +1,29 @@ +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines{foo=""} 20 +go_goroutines{foo="bar"} 229 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{baz="",code="200",foo="",handler="",method="get"} 11 +http_requests_total{baz="",code="200",foo="",handler="alerts",method="get"} 35 +http_requests_total{baz="",code="200",foo="",handler="config",method="get"} 8 +http_requests_total{baz="",code="200",foo="",handler="flags",method="get"} 18 +http_requests_total{baz="",code="200",foo="",handler="graph",method="get"} 89 +http_requests_total{baz="",code="200",foo="",handler="prometheus",method="get"} 17051 +http_requests_total{baz="",code="200",foo="",handler="query",method="get"} 401 +http_requests_total{baz="",code="200",foo="",handler="query_range",method="get"} 15663 +http_requests_total{baz="",code="200",foo="",handler="rules",method="get"} 7 +http_requests_total{baz="",code="200",foo="",handler="series",method="get"} 221 +http_requests_total{baz="",code="200",foo="",handler="static",method="get"} 1647 +http_requests_total{baz="",code="200",foo="",handler="status",method="get"} 12 +http_requests_total{baz="",code="200",foo="bar",handler="",method="get"} 325 +http_requests_total{baz="",code="206",foo="",handler="static",method="get"} 2 +http_requests_total{baz="",code="400",foo="",handler="query_range",method="get"} 40 +http_requests_total{baz="",code="503",foo="",handler="query_range",method="get"} 3 +http_requests_total{baz="bar",code="200",foo="",handler="",method="get"} 93 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/inconsistent_metrics/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/inconsistent_metrics/metrics.prom b/collector/fixtures/textfile/inconsistent_metrics/metrics.prom new file mode 100644 index 0000000000..dfbdd62908 --- /dev/null +++ b/collector/fixtures/textfile/inconsistent_metrics/metrics.prom @@ -0,0 +1,24 @@ +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="alerts",method="get"} 35 +http_requests_total{code="200",handler="config",method="get"} 8 +http_requests_total{code="200",method="get", foo="bar"} 325 +http_requests_total{code="200",handler="flags",method="get"} 18 +http_requests_total{code="200",handler="graph",method="get"} 89 +http_requests_total{code="200",method="get", baz="bar"} 93 +http_requests_total{code="200",handler="prometheus",method="get"} 17051 +http_requests_total{code="200",handler="query",method="get"} 401 +http_requests_total{code="200",handler="query_range",method="get"} 15663 +http_requests_total{code="200",handler="rules",method="get"} 7 +http_requests_total{code="200",handler="series",method="get"} 221 +http_requests_total{code="200",handler="static",method="get"} 1647 +http_requests_total{code="200",handler="status",method="get"} 12 +http_requests_total{code="200",method="get"} 11 +http_requests_total{code="206",handler="static",method="get"} 2 +http_requests_total{code="400",handler="query_range",method="get"} 40 +http_requests_total{code="503",handler="query_range",method="get"} 3 + +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines{foo="bar"} 229 +go_goroutines 20 diff --git a/collector/fixtures/textfile/no_metric_files.out b/collector/fixtures/textfile/no_metric_files.out index 59a11b901e..cf36bd09c3 100644 --- a/collector/fixtures/textfile/no_metric_files.out +++ b/collector/fixtures/textfile/no_metric_files.out @@ -1,8 +1,3 @@ -name: "node_textfile_scrape_error" -help: "1 if there was an error opening or reading a file, 0 otherwise" -type: GAUGE -metric: < - gauge: < - value: 0 - > -> +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/nonexistent_path.out b/collector/fixtures/textfile/nonexistent_path.out index 83b1fd0dfe..75ad6924b7 100644 --- a/collector/fixtures/textfile/nonexistent_path.out +++ b/collector/fixtures/textfile/nonexistent_path.out @@ -1,8 +1,3 @@ -name: "node_textfile_scrape_error" -help: "1 if there was an error opening or reading a file, 0 otherwise" -type: GAUGE -metric: < - gauge: < - value: 1 - > -> +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 1 diff --git a/collector/fixtures/textfile/summary.out b/collector/fixtures/textfile/summary.out new file mode 100644 index 0000000000..c83dba97f3 --- /dev/null +++ b/collector/fixtures/textfile/summary.out @@ -0,0 +1,28 @@ +# HELP event_duration_seconds_total Query timings +# TYPE event_duration_seconds_total summary +event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 +event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 +event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 +event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 +event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 +event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 +event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 +event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 +event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 +event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 +event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 +event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 +event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 +event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/summary/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 diff --git a/collector/fixtures/textfile/summary/metrics.prom b/collector/fixtures/textfile/summary/metrics.prom new file mode 100644 index 0000000000..b6cbccbac3 --- /dev/null +++ b/collector/fixtures/textfile/summary/metrics.prom @@ -0,0 +1,22 @@ +# HELP event_duration_seconds_total Query timings +# TYPE event_duration_seconds_total summary +event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 +event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 +event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 +event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 +event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 +event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 +event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 +event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 +event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 +event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 +event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 +event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 +event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 +event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 +event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 +event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 +event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 diff --git a/collector/fixtures/textfile/summary_extra_dimension.out b/collector/fixtures/textfile/summary_extra_dimension.out new file mode 100644 index 0000000000..d49e8a1d2f --- /dev/null +++ b/collector/fixtures/textfile/summary_extra_dimension.out @@ -0,0 +1,20 @@ +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/summary_extra_dimension/metrics.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_seconds summary +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.9"} 0.001765451 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.99"} 0.018672076 +prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="alerting"} 214.85081044700146 +prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="alerting"} 185209 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.5"} 4.3132e-05 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.9"} 8.9295e-05 +prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.99"} 0.000193657 +prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="recording"} 185091.01317759082 +prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="recording"} 1.0020195e+08 +prometheus_rule_evaluation_duration_seconds{handler="foo",rule_type="alerting",quantile="0.5"} 0.000571464 +prometheus_rule_evaluation_duration_seconds_sum{handler="foo",rule_type="alerting"} 0 +prometheus_rule_evaluation_duration_seconds_count{handler="foo",rule_type="alerting"} 0 diff --git a/collector/fixtures/textfile/summary_extra_dimension/metrics.prom b/collector/fixtures/textfile/summary_extra_dimension/metrics.prom new file mode 100644 index 0000000000..33ab859507 --- /dev/null +++ b/collector/fixtures/textfile/summary_extra_dimension/metrics.prom @@ -0,0 +1,12 @@ +# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_seconds summary +prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.5", handler="foo"} 0.000571464 +prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.9"} 0.001765451 +prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.99"} 0.018672076 +prometheus_rule_evaluation_duration_seconds_sum{rule_type="alerting"} 214.85081044700146 +prometheus_rule_evaluation_duration_seconds_count{rule_type="alerting"} 185209 +prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.5"} 4.3132e-05 +prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.9"} 8.9295e-05 +prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.99"} 0.000193657 +prometheus_rule_evaluation_duration_seconds_sum{rule_type="recording"} 185091.01317759082 +prometheus_rule_evaluation_duration_seconds_count{rule_type="recording"} 1.0020195e+08 diff --git a/collector/fixtures/textfile/two_metric_files.out b/collector/fixtures/textfile/two_metric_files.out index 4fb60e2f26..fbff74dd5f 100644 --- a/collector/fixtures/textfile/two_metric_files.out +++ b/collector/fixtures/textfile/two_metric_files.out @@ -1,79 +1,19 @@ -name: "node_textfile_mtime" -help: "Unixtime mtime of textfiles successfully read." -type: GAUGE -metric: < - label: < - name: "file" - value: "metrics1.prom" - > - gauge: < - value: 1 - > -> -metric: < - label: < - name: "file" - value: "metrics2.prom" - > - gauge: < - value: 2 - > -> -name: "node_textfile_scrape_error" -help: "1 if there was an error opening or reading a file, 0 otherwise" -type: GAUGE -metric: < - gauge: < - value: 0 - > -> -name: "testmetric1_1" -help: "Metric read from fixtures/textfile/two_metric_files/metrics1.prom" -type: UNTYPED -metric: < - label: < - name: "foo" - value: "bar" - > - untyped: < - value: 10 - > -> -name: "testmetric1_2" -help: "Metric read from fixtures/textfile/two_metric_files/metrics1.prom" -type: UNTYPED -metric: < - label: < - name: "foo" - value: "baz" - > - untyped: < - value: 20 - > -> -name: "testmetric2_1" -help: "Metric read from fixtures/textfile/two_metric_files/metrics2.prom" -type: UNTYPED -metric: < - label: < - name: "foo" - value: "bar" - > - untyped: < - value: 30 - > - timestamp_ms: 1441205977284 -> -name: "testmetric2_2" -help: "Metric read from fixtures/textfile/two_metric_files/metrics2.prom" -type: UNTYPED -metric: < - label: < - name: "foo" - value: "baz" - > - untyped: < - value: 40 - > - timestamp_ms: 1441205977284 -> +# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. +# TYPE node_textfile_mtime_seconds gauge +node_textfile_mtime_seconds{file="fixtures/textfile/two_metric_files/metrics1.prom"} 1 +node_textfile_mtime_seconds{file="fixtures/textfile/two_metric_files/metrics2.prom"} 1 +# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise +# TYPE node_textfile_scrape_error gauge +node_textfile_scrape_error 0 +# HELP testmetric1_1 Metric read from fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_1 untyped +testmetric1_1{foo="bar"} 10 +# HELP testmetric1_2 Metric read from fixtures/textfile/two_metric_files/metrics1.prom +# TYPE testmetric1_2 untyped +testmetric1_2{foo="baz"} 20 +# HELP testmetric2_1 Metric read from fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_1 untyped +testmetric2_1{foo="bar"} 30 +# HELP testmetric2_2 Metric read from fixtures/textfile/two_metric_files/metrics2.prom +# TYPE testmetric2_2 untyped +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/textfile/two_metric_files/metrics2.prom b/collector/fixtures/textfile/two_metric_files/metrics2.prom index 9d089840b9..81234f9329 100644 --- a/collector/fixtures/textfile/two_metric_files/metrics2.prom +++ b/collector/fixtures/textfile/two_metric_files/metrics2.prom @@ -1,2 +1,2 @@ -testmetric2_1{foo="bar"} 30 1441205977284 -testmetric2_2{foo="baz"} 40 1441205977284 +testmetric2_1{foo="bar"} 30 +testmetric2_2{foo="baz"} 40 diff --git a/collector/fixtures/usr/lib/os-release b/collector/fixtures/usr/lib/os-release new file mode 100644 index 0000000000..f228f22264 --- /dev/null +++ b/collector/fixtures/usr/lib/os-release @@ -0,0 +1,12 @@ +NAME="Ubuntu" +VERSION="20.04.2 LTS (Focal Fossa)" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 20.04.2 LTS" +VERSION_ID="20.04" +HOME_URL="https://www.ubuntu.com/" +SUPPORT_URL="https://help.ubuntu.com/" +BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" +PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" +VERSION_CODENAME=focal +UBUNTU_CODENAME=focal diff --git a/collector/fixtures/wifi/wlan0/stationinfo.json b/collector/fixtures/wifi/wlan0/stationinfo.json index 84bcb6951b..8bb0f35ce5 100644 --- a/collector/fixtures/wifi/wlan0/stationinfo.json +++ b/collector/fixtures/wifi/wlan0/stationinfo.json @@ -1,10 +1,25 @@ -{ - "connected": 30000000000, - "inactive": 400000000, - "receivebitrate": 128000000, - "transmitbitrate": 164000000, - "signal": -52, - "transmitretries": 10, - "transmitfailed": 2, - "beaconloss": 1 -} +[ + { + "hardwareaddr": "qrvM3e7/", + "connected": 30000000000, + "inactive": 400000000, + "receivebitrate": 128000000, + "transmitbitrate": 164000000, + "signal": -52, + "transmitretries": 10, + "transmitfailed": 2, + "beaconloss": 1 + }, + { + "hardwareaddr": "AQIDBAUG", + "connected": 60000000000, + "inactive": 800000000, + "receivebitrate": 256000000, + "transmitbitrate": 328000000, + "signal": -26, + "transmitretries": 20, + "transmitfailed": 4, + "beaconloss": 2 + } +] + diff --git a/collector/fixtures_bindmount/proc/mounts b/collector/fixtures_bindmount/proc/mounts new file mode 100644 index 0000000000..32f9567e98 --- /dev/null +++ b/collector/fixtures_bindmount/proc/mounts @@ -0,0 +1,6 @@ +/dev/nvme1n0 /host ext4 rw,seclabel,relatime,data=ordered 0 0 +/dev/nvme1n1 /host/media/volume1 ext4 rw,seclabel,relatime,data=ordered 0 0 +/dev/nvme1n2 /host/media/volume2 ext4 rw,seclabel,relatime,data=ordered 0 0 +tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0 +tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 +tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0 diff --git a/collector/fixtures_hidepid/proc/mounts b/collector/fixtures_hidepid/proc/mounts new file mode 100644 index 0000000000..fb6a96359c --- /dev/null +++ b/collector/fixtures_hidepid/proc/mounts @@ -0,0 +1 @@ +rootfs / rootfs rw 0 0 diff --git a/collector/ganglia/format.go b/collector/ganglia/format.go deleted file mode 100644 index 89ec044058..0000000000 --- a/collector/ganglia/format.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ganglia provides types for unmarshalling gmond's XML output. -// -// Not used elements in gmond's XML output are commented. -// In case you want to use them, please change the names so that one -// can understand without needing to know what the acronym stands for. -package ganglia - -import "encoding/xml" - -// ExtraElement describes EXTRA_ELEMENT elements. -type ExtraElement struct { - Name string `xml:"NAME,attr"` - Val string `xml:"VAL,attr"` -} - -// ExtraData describes EXTRA_DATA elements. -type ExtraData struct { - ExtraElements []ExtraElement `xml:"EXTRA_ELEMENT"` -} - -// Metric describes METRIC elements. -type Metric struct { - Name string `xml:"NAME,attr"` - Value float64 `xml:"VAL,attr"` - /* - Unit string `xml:"UNITS,attr"` - Slope string `xml:"SLOPE,attr"` - Tn int `xml:"TN,attr"` - Tmax int `xml:"TMAX,attr"` - Dmax int `xml:"DMAX,attr"` - */ - ExtraData ExtraData `xml:"EXTRA_DATA"` -} - -// Host describes HOST elements. -type Host struct { - Name string `xml:"NAME,attr"` - /* - Ip string `xml:"IP,attr"` - Tags string `xml:"TAGS,attr"` - Reported int `xml:"REPORTED,attr"` - Tn int `xml:"TN,attr"` - Tmax int `xml:"TMAX,attr"` - Dmax int `xml:"DMAX,attr"` - Location string `xml:"LOCATION,attr"` - GmondStarted int `xml:"GMOND_STARTED",attr"` - */ - Metrics []Metric `xml:"METRIC"` -} - -// Cluster describes CLUSTER elements. -type Cluster struct { - Name string `xml:"NAME,attr"` - /* - Owner string `xml:"OWNER,attr"` - LatLong string `xml:"LATLONG,attr"` - Url string `xml:"URL,attr"` - Localtime int `xml:"LOCALTIME,attr"` - */ - Hosts []Host `xml:"HOST"` -} - -// Ganglia describes the top-level XML structure. -type Ganglia struct { - XMLNAME xml.Name `xml:"GANGLIA_XML"` - Clusters []Cluster `xml:"CLUSTER"` -} diff --git a/collector/gmond.go b/collector/gmond.go deleted file mode 100644 index f0d4119b0a..0000000000 --- a/collector/gmond.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !nogmond - -package collector - -import ( - "bufio" - "encoding/xml" - "fmt" - "io" - "net" - "regexp" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" - "github.com/prometheus/node_exporter/collector/ganglia" -) - -const ( - gangliaAddress = "127.0.0.1:8649" - gangliaProto = "tcp" - gangliaTimeout = 30 * time.Second - gangliaNamespace = "ganglia" -) - -type gmondCollector struct { - metrics map[string]*prometheus.GaugeVec -} - -func init() { - registerCollector("gmond", defaultDisabled, NewGmondCollector) -} - -var illegalCharsRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) - -// NewGmondCollector returns a new Collector scraping ganglia. -func NewGmondCollector() (Collector, error) { - warnDeprecated("gmond") - c := gmondCollector{ - metrics: map[string]*prometheus.GaugeVec{}, - } - - return &c, nil -} - -func (c *gmondCollector) Update(ch chan<- prometheus.Metric) error { - conn, err := net.Dial(gangliaProto, gangliaAddress) - log.Debugf("gmondCollector Update") - if err != nil { - return fmt.Errorf("can't connect to gmond: %s", err) - } - conn.SetDeadline(time.Now().Add(gangliaTimeout)) - - ganglia := ganglia.Ganglia{} - decoder := xml.NewDecoder(bufio.NewReader(conn)) - decoder.CharsetReader = toUtf8 - - err = decoder.Decode(&ganglia) - if err != nil { - return fmt.Errorf("couldn't parse xml: %s", err) - } - - for _, cluster := range ganglia.Clusters { - for _, host := range cluster.Hosts { - - for _, metric := range host.Metrics { - name := illegalCharsRE.ReplaceAllString(metric.Name, "_") - - c.setMetric(name, cluster.Name, metric) - } - } - } - for _, m := range c.metrics { - m.Collect(ch) - } - return err -} - -func (c *gmondCollector) setMetric(name, cluster string, metric ganglia.Metric) { - if _, ok := c.metrics[name]; !ok { - var desc string - var title string - for _, element := range metric.ExtraData.ExtraElements { - switch element.Name { - case "DESC": - desc = element.Val - case "TITLE": - title = element.Val - } - if title != "" && desc != "" { - break - } - } - log.Debugf("Register %s: %s", name, desc) - c.metrics[name] = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: gangliaNamespace, - Name: name, - Help: desc, - }, - []string{"cluster"}, - ) - } - log.Debugf("Set %s{cluster=%q}: %f", name, cluster, metric.Value) - c.metrics[name].WithLabelValues(cluster).Set(metric.Value) -} - -func toUtf8(charset string, input io.Reader) (io.Reader, error) { - return input, nil //FIXME -} diff --git a/collector/helper.go b/collector/helper.go index 2bf461e7b6..527fa74227 100644 --- a/collector/helper.go +++ b/collector/helper.go @@ -14,7 +14,9 @@ package collector import ( + "bytes" "io/ioutil" + "regexp" "strconv" "strings" ) @@ -30,3 +32,33 @@ func readUintFromFile(path string) (uint64, error) { } return value, nil } + +// Take a []byte{} and return a string based on null termination. +// This is useful for situations where the OS has returned a null terminated +// string to use. +// If this function happens to receive a byteArray that contains no nulls, we +// simply convert the array to a string with no bounding. +func bytesToString(byteArray []byte) string { + n := bytes.IndexByte(byteArray, 0) + if n < 0 { + return string(byteArray) + } + return string(byteArray[:n]) +} + +var metricNameRegex = regexp.MustCompile(`_*[^0-9A-Za-z_]+_*`) + +// Sanitize the given metric name by replacing invalid characters by underscores. +// +// OpenMetrics and the Prometheus exposition format require the metric name +// to consist only of alphanumericals and "_", ":" and they must not start +// with digits. Since colons in MetricFamily are reserved to signal that the +// MetricFamily is the result of a calculation or aggregation of a general +// purpose monitoring system, colons will be replaced as well. +// +// Note: If not subsequently prepending a namespace and/or subsystem (e.g., +// with prometheus.BuildFQName), the caller must ensure that the supplied +// metricName does not begin with a digit. +func SanitizeMetricName(metricName string) string { + return metricNameRegex.ReplaceAllString(metricName, "_") +} diff --git a/collector/helper_test.go b/collector/helper_test.go new file mode 100644 index 0000000000..15fe5054c1 --- /dev/null +++ b/collector/helper_test.go @@ -0,0 +1,82 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "testing" +) + +func TestBytesToString(t *testing.T) { + tests := []struct { + name string + b []byte + expected string + }{ + { + "Single null byte", + []byte{0}, + "", + }, + { + "Empty byte array", + []byte{}, + "", + }, + { + "Not null terminated", + []byte{65, 66, 67}, + "ABC", + }, + { + "Null randomly in array", + []byte{65, 66, 67, 0, 65, 0, 65}, + "ABC", + }, + { + "Array starts with null and contains other valid bytes", + []byte{0, 65, 66, 67, 0}, + "", + }, + } + + for _, tt := range tests { + name := tt.name + b := tt.b + result := bytesToString(b) + expected := tt.expected + + if result != expected { + t.Errorf("bytesToString(%#v): Name: %s, expected %#v, got %#v)", b, name, expected, result) + } + } +} + +func TestSanitizeMetricName(t *testing.T) { + testcases := map[string]string{ + "": "", + "rx_errors": "rx_errors", + "Queue[0] AllocFails": "Queue_0_AllocFails", + "Tx LPI entry count": "Tx_LPI_entry_count", + "port.VF_admin_queue_requests": "port_VF_admin_queue_requests", + "[3]: tx_bytes": "_3_tx_bytes", + " err": "_err", + } + + for metricName, expected := range testcases { + got := SanitizeMetricName(metricName) + if expected != got { + t.Errorf("Expected '%s' but got '%s'", expected, got) + } + } +} diff --git a/collector/hwmon_linux.go b/collector/hwmon_linux.go index 8e1ee4db24..3178b1ccce 100644 --- a/collector/hwmon_linux.go +++ b/collector/hwmon_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nohwmon // +build !nohwmon package collector @@ -19,14 +20,15 @@ import ( "errors" "io/ioutil" "os" - "path" "path/filepath" "regexp" "strconv" "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" + "golang.org/x/sys/unix" ) var ( @@ -45,12 +47,14 @@ func init() { registerCollector("hwmon", defaultEnabled, NewHwMonCollector) } -type hwMonCollector struct{} +type hwMonCollector struct { + logger log.Logger +} // NewHwMonCollector returns a new Collector exposing /sys/class/hwmon stats // (similar to lm-sensors). -func NewHwMonCollector() (Collector, error) { - return &hwMonCollector{}, nil +func NewHwMonCollector(logger log.Logger) (Collector, error) { + return &hwMonCollector{logger}, nil } func cleanMetricName(name string) string { @@ -61,8 +65,8 @@ func cleanMetricName(name string) string { } func addValueFile(data map[string]map[string]string, sensor string, prop string, file string) { - raw, e := ioutil.ReadFile(file) - if e != nil { + raw, err := sysReadFile(file) + if err != nil { return } value := strings.Trim(string(raw), "\n") @@ -74,6 +78,28 @@ func addValueFile(data map[string]map[string]string, sensor string, prop string, data[sensor][prop] = value } +// sysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +func sysReadFile(file string) ([]byte, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using system call directly. + b := make([]byte, 128) + n, err := unix.Read(int(f.Fd()), b) + if err != nil { + return nil, err + } + + return b[:n], nil +} + // explodeSensorFilename splits a sensor name into _. func explodeSensorFilename(filename string) (ok bool, sensorType string, sensorNum int, sensorProperty string) { matches := hwmonFilenameFormat.FindStringSubmatch(filename) @@ -115,7 +141,7 @@ func collectSensorData(dir string, data map[string]map[string]string) error { for _, t := range hwmonSensorTypes { if t == sensorType { - addValueFile(data, sensorType+strconv.Itoa(sensorNum), sensorProperty, path.Join(dir, file.Name())) + addValueFile(data, sensorType+strconv.Itoa(sensorNum), sensorProperty, filepath.Join(dir, file.Name())) break } } @@ -134,8 +160,8 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er if err != nil { return err } - if _, err := os.Stat(path.Join(dir, "device")); err == nil { - err := collectSensorData(path.Join(dir, "device"), data) + if _, err := os.Stat(filepath.Join(dir, "device")); err == nil { + err := collectSensorData(filepath.Join(dir, "device"), data) if err != nil { return err } @@ -251,6 +277,9 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er continue } if sensorType == "temp" && element != "type" { + if element == "" { + element = "input" + } desc := prometheus.NewDesc(name+"_celsius", "Hardware monitor for temperature ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue*0.001, labels...) @@ -320,17 +349,17 @@ func (c *hwMonCollector) hwmonName(dir string) (string, error) { // However the path of the device has to be stable: // - /sys/devices// // Some hardware monitors have a "name" file that exports a human - // readbale name that can be used. + // readable name that can be used. // human readable names would be bat0 or coretemp, while a path string // could be platform_applesmc.768 // preference 1: construct a name based on device name, always unique - devicePath, devErr := filepath.EvalSymlinks(path.Join(dir, "device")) + devicePath, devErr := filepath.EvalSymlinks(filepath.Join(dir, "device")) if devErr == nil { - devPathPrefix, devName := path.Split(devicePath) - _, devType := path.Split(strings.TrimRight(devPathPrefix, "/")) + devPathPrefix, devName := filepath.Split(devicePath) + _, devType := filepath.Split(strings.TrimRight(devPathPrefix, "/")) cleanDevName := cleanMetricName(devName) cleanDevType := cleanMetricName(devType) @@ -345,7 +374,7 @@ func (c *hwMonCollector) hwmonName(dir string) (string, error) { } // preference 2: is there a name file - sysnameRaw, nameErr := ioutil.ReadFile(path.Join(dir, "name")) + sysnameRaw, nameErr := ioutil.ReadFile(filepath.Join(dir, "name")) if nameErr == nil && string(sysnameRaw) != "" { cleanName := cleanMetricName(string(sysnameRaw)) if cleanName != "" { @@ -362,7 +391,7 @@ func (c *hwMonCollector) hwmonName(dir string) (string, error) { } // take the last path element, this will be hwmonX - _, name := path.Split(realDir) + _, name := filepath.Split(realDir) cleanName := cleanMetricName(name) if cleanName != "" { return cleanName, nil @@ -373,7 +402,7 @@ func (c *hwMonCollector) hwmonName(dir string) (string, error) { // hwmonHumanReadableChipName is similar to the methods in hwmonName, but with // different precedences -- we can allow duplicates here. func (c *hwMonCollector) hwmonHumanReadableChipName(dir string) (string, error) { - sysnameRaw, nameErr := ioutil.ReadFile(path.Join(dir, "name")) + sysnameRaw, nameErr := ioutil.ReadFile(filepath.Join(dir, "name")) if nameErr != nil { return "", nameErr } @@ -392,20 +421,20 @@ func (c *hwMonCollector) Update(ch chan<- prometheus.Metric) error { // Step 1: scan /sys/class/hwmon, resolve all symlinks and call // updatesHwmon for each folder - hwmonPathName := path.Join(sysFilePath("class"), "hwmon") + hwmonPathName := filepath.Join(sysFilePath("class"), "hwmon") hwmonFiles, err := ioutil.ReadDir(hwmonPathName) if err != nil { - if os.IsNotExist(err) { - log.Debug("hwmon collector metrics are not available for this system") - return nil + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "hwmon collector metrics are not available for this system") + return ErrNoData } return err } for _, hwDir := range hwmonFiles { - hwmonXPathName := path.Join(hwmonPathName, hwDir.Name()) + hwmonXPathName := filepath.Join(hwmonPathName, hwDir.Name()) if hwDir.Mode()&os.ModeSymlink > 0 { hwDir, err = os.Stat(hwmonXPathName) diff --git a/collector/infiniband_linux.go b/collector/infiniband_linux.go index de8ce1950c..3afb7c45b6 100644 --- a/collector/infiniband_linux.go +++ b/collector/infiniband_linux.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Prometheus Authors +// Copyright 2017-2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,36 +11,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !noinfiniband +//go:build linux && !noinfiniband +// +build linux,!noinfiniband package collector import ( "errors" + "fmt" "os" - "path/filepath" + "strconv" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" -) - -const infinibandPath = "class/infiniband" - -var ( - errInfinibandNoDevicesFound = errors.New("no InfiniBand devices detected") - errInfinibandNoPortsFound = errors.New("no InfiniBand ports detected") + "github.com/prometheus/procfs/sysfs" ) type infinibandCollector struct { - metricDescs map[string]*prometheus.Desc - counters map[string]infinibandMetric - legacyCounters map[string]infinibandMetric -} - -type infinibandMetric struct { - File string - Help string + fs sysfs.FS + metricDescs map[string]*prometheus.Desc + logger log.Logger + subsystem string } func init() { @@ -48,49 +40,60 @@ func init() { } // NewInfiniBandCollector returns a new Collector exposing InfiniBand stats. -func NewInfiniBandCollector() (Collector, error) { +func NewInfiniBandCollector(logger log.Logger) (Collector, error) { var i infinibandCollector + var err error - // Filenames of all InfiniBand counter metrics including a detailed description. - i.counters = map[string]infinibandMetric{ - "link_downed_total": {"link_downed", "Number of times the link failed to recover from an error state and went down"}, - "link_error_recovery_total": {"link_error_recovery", "Number of times the link successfully recovered from an error state"}, - "multicast_packets_received_total": {"multicast_rcv_packets", "Number of multicast packets received (including errors)"}, - "multicast_packets_transmitted_total": {"multicast_xmit_packets", "Number of multicast packets transmitted (including errors)"}, - "port_data_received_bytes": {"port_rcv_data", "Number of data octets received on all links"}, - "port_data_transmitted_bytes": {"port_xmit_data", "Number of data octets transmitted on all links"}, - "unicast_packets_received_total": {"unicast_rcv_packets", "Number of unicast packets received (including errors)"}, - "unicast_packets_transmitted_total": {"unicast_xmit_packets", "Number of unicast packets transmitted (including errors)"}, + i.fs, err = sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) } - - // Deprecated counters for some older versions of InfiniBand drivers. - i.legacyCounters = map[string]infinibandMetric{ - "legacy_multicast_packets_received_total": {"port_multicast_rcv_packets", "Number of multicast packets received"}, - "legacy_multicast_packets_transmitted_total": {"port_multicast_xmit_packets", "Number of multicast packets transmitted"}, - "legacy_data_received_bytes_total": {"port_rcv_data_64", "Number of data octets received on all links"}, - "legacy_packets_received_total": {"port_rcv_packets_64", "Number of data packets received on all links"}, - "legacy_unicast_packets_received_total": {"port_unicast_rcv_packets", "Number of unicast packets received"}, - "legacy_unicast_packets_transmitted_total": {"port_unicast_xmit_packets", "Number of unicast packets transmitted"}, - "legacy_data_transmitted_bytes_total": {"port_xmit_data_64", "Number of data octets transmitted on all links"}, - "legacy_packets_transmitted_total": {"port_xmit_packets_64", "Number of data packets received on all links"}, + i.logger = logger + + // Detailed description for all metrics. + descriptions := map[string]string{ + "legacy_multicast_packets_received_total": "Number of multicast packets received", + "legacy_multicast_packets_transmitted_total": "Number of multicast packets transmitted", + "legacy_data_received_bytes_total": "Number of data octets received on all links", + "legacy_packets_received_total": "Number of data packets received on all links", + "legacy_unicast_packets_received_total": "Number of unicast packets received", + "legacy_unicast_packets_transmitted_total": "Number of unicast packets transmitted", + "legacy_data_transmitted_bytes_total": "Number of data octets transmitted on all links", + "legacy_packets_transmitted_total": "Number of data packets received on all links", + "excessive_buffer_overrun_errors_total": "Number of times that OverrunErrors consecutive flow control update periods occurred, each having at least one overrun error.", + "link_downed_total": "Number of times the link failed to recover from an error state and went down", + "link_error_recovery_total": "Number of times the link successfully recovered from an error state", + "local_link_integrity_errors_total": "Number of times that the count of local physical errors exceeded the threshold specified by LocalPhyErrors.", + "multicast_packets_received_total": "Number of multicast packets received (including errors)", + "multicast_packets_transmitted_total": "Number of multicast packets transmitted (including errors)", + "physical_state_id": "Physical state of the InfiniBand port (0: no change, 1: sleep, 2: polling, 3: disable, 4: shift, 5: link up, 6: link error recover, 7: phytest)", + "port_constraint_errors_received_total": "Number of packets received on the switch physical port that are discarded", + "port_constraint_errors_transmitted_total": "Number of packets not transmitted from the switch physical port", + "port_data_received_bytes_total": "Number of data octets received on all links", + "port_data_transmitted_bytes_total": "Number of data octets transmitted on all links", + "port_discards_received_total": "Number of inbound packets discarded by the port because the port is down or congested", + "port_discards_transmitted_total": "Number of outbound packets discarded by the port because the port is down or congested", + "port_errors_received_total": "Number of packets containing an error that were received on this port", + "port_packets_received_total": "Number of packets received on all VLs by this port (including errors)", + "port_packets_transmitted_total": "Number of packets transmitted on all VLs from this port (including errors)", + "port_transmit_wait_total": "Number of ticks during which the port had data to transmit but no data was sent during the entire tick", + "rate_bytes_per_second": "Maximum signal transfer rate", + "state_id": "State of the InfiniBand port (0: no change, 1: down, 2: init, 3: armed, 4: active, 5: act defer)", + "unicast_packets_received_total": "Number of unicast packets received (including errors)", + "unicast_packets_transmitted_total": "Number of unicast packets transmitted (including errors)", + "port_receive_remote_physical_errors_total": "Number of packets marked with the EBP (End of Bad Packet) delimiter received on the port.", + "port_receive_switch_relay_errors_total": "Number of packets that could not be forwarded by the switch.", + "symbol_error_total": "Number of minor link errors detected on one or more physical lanes.", + "vl15_dropped_total": "Number of incoming VL15 packets dropped due to resource limitations.", } - subsystem := "infiniband" i.metricDescs = make(map[string]*prometheus.Desc) + i.subsystem = "infiniband" - for metricName, infinibandMetric := range i.counters { - i.metricDescs[metricName] = prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, metricName), - infinibandMetric.Help, - []string{"device", "port"}, - nil, - ) - } - - for metricName, infinibandMetric := range i.legacyCounters { + for metricName, description := range descriptions { i.metricDescs[metricName] = prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, metricName), - infinibandMetric.Help, + prometheus.BuildFQName(namespace, i.subsystem, metricName), + description, []string{"device", "port"}, nil, ) @@ -99,132 +102,73 @@ func NewInfiniBandCollector() (Collector, error) { return &i, nil } -// infinibandDevices retrieves a list of InfiniBand devices. -func infinibandDevices(infinibandPath string) ([]string, error) { - devices, err := filepath.Glob(filepath.Join(infinibandPath, "/*")) - if err != nil { - return nil, err - } - - if len(devices) < 1 { - log.Debugf("Unable to detect InfiniBand devices") - err = errInfinibandNoDevicesFound - return nil, err - } - - // Extract just the filenames which equate to the device names. - for i, device := range devices { - devices[i] = filepath.Base(device) - } - - return devices, nil +func (c *infinibandCollector) pushMetric(ch chan<- prometheus.Metric, name string, value uint64, deviceName string, port string, valueType prometheus.ValueType) { + ch <- prometheus.MustNewConstMetric(c.metricDescs[name], valueType, float64(value), deviceName, port) } -// Retrieve a list of ports for the InfiniBand device. -func infinibandPorts(infinibandPath, device string) ([]string, error) { - ports, err := filepath.Glob(filepath.Join(infinibandPath, device, "ports/*")) - if err != nil { - return nil, err - } - - if len(ports) < 1 { - log.Debugf("Unable to detect ports for %s", device) - err = errInfinibandNoPortsFound - return nil, err - } - - // Extract just the filenames which equates to the port numbers. - for i, port := range ports { - ports[i] = filepath.Base(port) +func (c *infinibandCollector) pushCounter(ch chan<- prometheus.Metric, name string, value *uint64, deviceName string, port string) { + if value != nil { + c.pushMetric(ch, name, *value, deviceName, port, prometheus.CounterValue) } - - return ports, nil -} - -func readMetric(directory, metricFile string) (uint64, error) { - metric, err := readUintFromFile(filepath.Join(directory, metricFile)) - if err != nil { - log.Debugf("Error reading %q file", metricFile) - return 0, err - } - - // According to Mellanox, the following metrics "are divided by 4 unconditionally" - // as they represent the amount of data being transmitted and received per lane. - // Mellanox cards have 4 lanes per port, so all values must be multiplied by 4 - // to get the expected value. - switch metricFile { - case "port_rcv_data", "port_xmit_data", "port_rcv_data_64", "port_xmit_data_64": - metric *= 4 - } - - return metric, nil } func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error { - devices, err := infinibandDevices(sysFilePath(infinibandPath)) - - // If no devices are found or another error is raised while attempting to find devices, - // InfiniBand is likely not installed and the collector should be skipped. - switch err { - case nil: - case errInfinibandNoDevicesFound: - return nil - default: - return err + devices, err := c.fs.InfiniBandClass() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "infiniband statistics not found, skipping") + return ErrNoData + } + return fmt.Errorf("error obtaining InfiniBand class info: %w", err) } for _, device := range devices { - ports, err := infinibandPorts(sysFilePath(infinibandPath), device) - - // If no ports are found for the specified device, skip to the next device. - switch err { - case nil: - case errInfinibandNoPortsFound: - continue - default: - return err - } - - for _, port := range ports { - portFiles := sysFilePath(filepath.Join(infinibandPath, device, "ports", port)) - - // Add metrics for the InfiniBand counters. - for metricName, infinibandMetric := range c.counters { - if _, err := os.Stat(filepath.Join(portFiles, "counters", infinibandMetric.File)); os.IsNotExist(err) { - continue - } - metric, err := readMetric(filepath.Join(portFiles, "counters"), infinibandMetric.File) - if err != nil { - return err - } - - ch <- prometheus.MustNewConstMetric( - c.metricDescs[metricName], - prometheus.CounterValue, - float64(metric), - device, - port, - ) - } - - // Add metrics for the legacy InfiniBand counters. - for metricName, infinibandMetric := range c.legacyCounters { - if _, err := os.Stat(filepath.Join(portFiles, "counters_ext", infinibandMetric.File)); os.IsNotExist(err) { - continue - } - metric, err := readMetric(filepath.Join(portFiles, "counters_ext"), infinibandMetric.File) - if err != nil { - return err - } - - ch <- prometheus.MustNewConstMetric( - c.metricDescs[metricName], - prometheus.CounterValue, - float64(metric), - device, - port, - ) - } + infoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "Non-numeric data from /sys/class/infiniband/, value is always 1.", + []string{"device", "board_id", "firmware_version", "hca_type"}, + nil, + ) + infoValue := 1.0 + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, device.Name, device.BoardID, device.FirmwareVersion, device.HCAType) + + for _, port := range device.Ports { + portStr := strconv.FormatUint(uint64(port.Port), 10) + + c.pushMetric(ch, "state_id", uint64(port.StateID), port.Name, portStr, prometheus.GaugeValue) + c.pushMetric(ch, "physical_state_id", uint64(port.PhysStateID), port.Name, portStr, prometheus.GaugeValue) + c.pushMetric(ch, "rate_bytes_per_second", port.Rate, port.Name, portStr, prometheus.GaugeValue) + + c.pushCounter(ch, "legacy_multicast_packets_received_total", port.Counters.LegacyPortMulticastRcvPackets, port.Name, portStr) + c.pushCounter(ch, "legacy_multicast_packets_transmitted_total", port.Counters.LegacyPortMulticastXmitPackets, port.Name, portStr) + c.pushCounter(ch, "legacy_data_received_bytes_total", port.Counters.LegacyPortRcvData64, port.Name, portStr) + c.pushCounter(ch, "legacy_packets_received_total", port.Counters.LegacyPortRcvPackets64, port.Name, portStr) + c.pushCounter(ch, "legacy_unicast_packets_received_total", port.Counters.LegacyPortUnicastRcvPackets, port.Name, portStr) + c.pushCounter(ch, "legacy_unicast_packets_transmitted_total", port.Counters.LegacyPortUnicastXmitPackets, port.Name, portStr) + c.pushCounter(ch, "legacy_data_transmitted_bytes_total", port.Counters.LegacyPortXmitData64, port.Name, portStr) + c.pushCounter(ch, "legacy_packets_transmitted_total", port.Counters.LegacyPortXmitPackets64, port.Name, portStr) + c.pushCounter(ch, "excessive_buffer_overrun_errors_total", port.Counters.ExcessiveBufferOverrunErrors, port.Name, portStr) + c.pushCounter(ch, "link_downed_total", port.Counters.LinkDowned, port.Name, portStr) + c.pushCounter(ch, "link_error_recovery_total", port.Counters.LinkErrorRecovery, port.Name, portStr) + c.pushCounter(ch, "local_link_integrity_errors_total", port.Counters.LocalLinkIntegrityErrors, port.Name, portStr) + c.pushCounter(ch, "multicast_packets_received_total", port.Counters.MulticastRcvPackets, port.Name, portStr) + c.pushCounter(ch, "multicast_packets_transmitted_total", port.Counters.MulticastXmitPackets, port.Name, portStr) + c.pushCounter(ch, "port_constraint_errors_received_total", port.Counters.PortRcvConstraintErrors, port.Name, portStr) + c.pushCounter(ch, "port_constraint_errors_transmitted_total", port.Counters.PortXmitConstraintErrors, port.Name, portStr) + c.pushCounter(ch, "port_data_received_bytes_total", port.Counters.PortRcvData, port.Name, portStr) + c.pushCounter(ch, "port_data_transmitted_bytes_total", port.Counters.PortXmitData, port.Name, portStr) + c.pushCounter(ch, "port_discards_received_total", port.Counters.PortRcvDiscards, port.Name, portStr) + c.pushCounter(ch, "port_discards_transmitted_total", port.Counters.PortXmitDiscards, port.Name, portStr) + c.pushCounter(ch, "port_errors_received_total", port.Counters.PortRcvErrors, port.Name, portStr) + c.pushCounter(ch, "port_packets_received_total", port.Counters.PortRcvPackets, port.Name, portStr) + c.pushCounter(ch, "port_packets_transmitted_total", port.Counters.PortXmitPackets, port.Name, portStr) + c.pushCounter(ch, "port_transmit_wait_total", port.Counters.PortXmitWait, port.Name, portStr) + c.pushCounter(ch, "unicast_packets_received_total", port.Counters.UnicastRcvPackets, port.Name, portStr) + c.pushCounter(ch, "unicast_packets_transmitted_total", port.Counters.UnicastXmitPackets, port.Name, portStr) + c.pushCounter(ch, "port_receive_remote_physical_errors_total", port.Counters.PortRcvRemotePhysicalErrors, port.Name, portStr) + c.pushCounter(ch, "port_receive_switch_relay_errors_total", port.Counters.PortRcvSwitchRelayErrors, port.Name, portStr) + c.pushCounter(ch, "symbol_error_total", port.Counters.SymbolError, port.Name, portStr) + c.pushCounter(ch, "vl15_dropped_total", port.Counters.VL15Dropped, port.Name, portStr) } } diff --git a/collector/infiniband_linux_test.go b/collector/infiniband_linux_test.go deleted file mode 100644 index 68370c0504..0000000000 --- a/collector/infiniband_linux_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" -) - -func TestInfiniBandDevices(t *testing.T) { - devices, err := infinibandDevices("fixtures/sys/class/infiniband") - if err != nil { - t.Fatal(err) - } - - if l := len(devices); l != 1 { - t.Fatalf("Retrieved an unexpected number of InfiniBand devices: %d", l) - } -} - -func TestInfiniBandPorts(t *testing.T) { - ports, err := infinibandPorts("fixtures/sys/class/infiniband", "mlx4_0") - if err != nil { - t.Fatal(err) - } - - if l := len(ports); l != 2 { - t.Fatalf("Retrieved an unexpected number of InfiniBand ports: %d", l) - } -} diff --git a/collector/interrupts_common.go b/collector/interrupts_common.go index b1c5f27f48..eea703f97e 100644 --- a/collector/interrupts_common.go +++ b/collector/interrupts_common.go @@ -11,15 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (linux || openbsd) && !nointerrupts // +build linux openbsd // +build !nointerrupts package collector -import "github.com/prometheus/client_golang/prometheus" +import ( + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) type interruptsCollector struct { - desc typedDesc + desc typedDesc + logger log.Logger } func init() { @@ -27,12 +32,13 @@ func init() { } // NewInterruptsCollector returns a new Collector exposing interrupts stats. -func NewInterruptsCollector() (Collector, error) { +func NewInterruptsCollector(logger log.Logger) (Collector, error) { return &interruptsCollector{ desc: typedDesc{prometheus.NewDesc( - namespace+"_interrupts", + namespace+"_interrupts_total", "Interrupt details.", interruptLabelNames, nil, ), prometheus.CounterValue}, + logger: logger, }, nil } diff --git a/collector/interrupts_linux.go b/collector/interrupts_linux.go index 1ef899c89a..4d74c536a6 100644 --- a/collector/interrupts_linux.go +++ b/collector/interrupts_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nointerrupts // +build !nointerrupts package collector @@ -28,19 +29,19 @@ import ( ) var ( - interruptLabelNames = []string{"CPU", "type", "info", "devices"} + interruptLabelNames = []string{"cpu", "type", "info", "devices"} ) func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) { interrupts, err := getInterrupts() if err != nil { - return fmt.Errorf("couldn't get interrupts: %s", err) + return fmt.Errorf("couldn't get interrupts: %w", err) } for name, interrupt := range interrupts { for cpuNo, value := range interrupt.values { fv, err := strconv.ParseFloat(value, 64) if err != nil { - return fmt.Errorf("invalid value %s in interrupts: %s", value, err) + return fmt.Errorf("invalid value %s in interrupts: %w", value, err) } ch <- c.desc.mustNewConstMetric(fv, strconv.Itoa(cpuNo), name, interrupt.info, interrupt.devices) } @@ -82,7 +83,7 @@ func parseInterrupts(r io.Reader) (map[string]interrupt, error) { } intName := parts[0][:len(parts[0])-1] // remove trailing : intr := interrupt{ - values: parts[1:cpuNum], + values: parts[1 : cpuNum+1], } if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt diff --git a/collector/interrupts_linux_test.go b/collector/interrupts_linux_test.go index f076fe8a3a..02acb896a0 100644 --- a/collector/interrupts_linux_test.go +++ b/collector/interrupts_linux_test.go @@ -33,4 +33,8 @@ func TestInterrupts(t *testing.T) { if want, got := "5031", interrupts["NMI"].values[1]; want != got { t.Errorf("want interrupts %s, got %s", want, got) } + + if want, got := "4968", interrupts["NMI"].values[3]; want != got { + t.Errorf("want interrupts %s, got %s", want, got) + } } diff --git a/collector/interrupts_openbsd.go b/collector/interrupts_openbsd.go index c0fca98f57..ba73406954 100644 --- a/collector/interrupts_openbsd.go +++ b/collector/interrupts_openbsd.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nointerrupts +//go:build openbsd && !amd64 && !nointerrupts +// +build openbsd,!amd64,!nointerrupts package collector @@ -95,20 +96,20 @@ sysctl_intr(struct intr *intr, int idx) import "C" var ( - interruptLabelNames = []string{"CPU", "type", "devices"} + interruptLabelNames = []string{"cpu", "type", "devices"} ) func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error { interrupts, err := getInterrupts() if err != nil { - return fmt.Errorf("couldn't get interrupts: %s", err) + return fmt.Errorf("couldn't get interrupts: %w", err) } for dev, interrupt := range interrupts { for cpuNo, value := range interrupt.values { ch <- c.desc.mustNewConstMetric( value, strconv.Itoa(cpuNo), - fmt.Sprintf("%d", interrupt.vector), + strconv.Itoa(interrupt.vector), dev, ) } diff --git a/collector/interrupts_openbsd_amd64.go b/collector/interrupts_openbsd_amd64.go new file mode 100644 index 0000000000..ba1091c417 --- /dev/null +++ b/collector/interrupts_openbsd_amd64.go @@ -0,0 +1,110 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nointerrupts +// +build !nointerrupts + +package collector + +import ( + "fmt" + "strconv" + "unsafe" + + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +const ( + KERN_INTRCNT = 63 + KERN_INTRCNT_NUM = 1 + KERN_INTRCNT_CNT = 2 + KERN_INTRCNT_NAME = 3 + KERN_INTRCNT_VECTOR = 4 +) + +func nintr() _C_int { + mib := [3]_C_int{unix.CTL_KERN, KERN_INTRCNT, KERN_INTRCNT_NUM} + buf, err := sysctl(mib[:]) + if err != nil { + return 0 + } + return *(*_C_int)(unsafe.Pointer(&buf[0])) +} + +func intr(idx _C_int) (itr interrupt, err error) { + mib := [4]_C_int{unix.CTL_KERN, KERN_INTRCNT, KERN_INTRCNT_NAME, idx} + buf, err := sysctl(mib[:]) + if err != nil { + return + } + dev := *(*[128]byte)(unsafe.Pointer(&buf[0])) + itr.device = string(dev[:]) + + mib[2] = KERN_INTRCNT_VECTOR + buf, err = sysctl(mib[:]) + if err != nil { + return + } + itr.vector = *(*int)(unsafe.Pointer(&buf[0])) + + mib[2] = KERN_INTRCNT_CNT + buf, err = sysctl(mib[:]) + if err != nil { + return + } + count := *(*uint64)(unsafe.Pointer(&buf[0])) + itr.values = []float64{float64(count)} + return +} + +var interruptLabelNames = []string{"cpu", "type", "devices"} + +func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error { + interrupts, err := getInterrupts() + if err != nil { + return fmt.Errorf("couldn't get interrupts: %s", err) + } + for dev, interrupt := range interrupts { + for cpuNo, value := range interrupt.values { + ch <- c.desc.mustNewConstMetric( + value, + strconv.Itoa(cpuNo), + fmt.Sprintf("%d", interrupt.vector), + dev, + ) + } + } + return nil +} + +type interrupt struct { + vector int + device string + values []float64 +} + +func getInterrupts() (map[string]interrupt, error) { + var interrupts = map[string]interrupt{} + n := nintr() + + for i := _C_int(0); i < n; i++ { + itr, err := intr(i) + if err != nil { + return nil, err + } + interrupts[itr.device] = itr + } + + return interrupts, nil +} diff --git a/collector/ipvs_linux.go b/collector/ipvs_linux.go index 373919331a..4a9c14b6d3 100644 --- a/collector/ipvs_linux.go +++ b/collector/ipvs_linux.go @@ -11,54 +11,87 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noipvs // +build !noipvs package collector import ( + "errors" "fmt" "os" + "sort" "strconv" + "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "github.com/prometheus/procfs" + kingpin "gopkg.in/alecthomas/kingpin.v2" ) type ipvsCollector struct { Collector fs procfs.FS + backendLabels []string backendConnectionsActive, backendConnectionsInact, backendWeight typedDesc connections, incomingPackets, outgoingPackets, incomingBytes, outgoingBytes typedDesc + logger log.Logger } +type ipvsBackendStatus struct { + ActiveConn uint64 + InactConn uint64 + Weight uint64 +} + +const ( + ipvsLabelLocalAddress = "local_address" + ipvsLabelLocalPort = "local_port" + ipvsLabelRemoteAddress = "remote_address" + ipvsLabelRemotePort = "remote_port" + ipvsLabelProto = "proto" + ipvsLabelLocalMark = "local_mark" +) + +var ( + fullIpvsBackendLabels = []string{ + ipvsLabelLocalAddress, + ipvsLabelLocalPort, + ipvsLabelRemoteAddress, + ipvsLabelRemotePort, + ipvsLabelProto, + ipvsLabelLocalMark, + } + ipvsLabels = kingpin.Flag("collector.ipvs.backend-labels", "Comma separated list for IPVS backend stats labels.").Default(strings.Join(fullIpvsBackendLabels, ",")).String() +) + func init() { registerCollector("ipvs", defaultEnabled, NewIPVSCollector) } // NewIPVSCollector sets up a new collector for IPVS metrics. It accepts the // "procfs" config parameter to override the default proc location (/proc). -func NewIPVSCollector() (Collector, error) { - return newIPVSCollector() +func NewIPVSCollector(logger log.Logger) (Collector, error) { + return newIPVSCollector(logger) } -func newIPVSCollector() (*ipvsCollector, error) { +func newIPVSCollector(logger log.Logger) (*ipvsCollector, error) { var ( - ipvsBackendLabelNames = []string{ - "local_address", - "local_port", - "remote_address", - "remote_port", - "proto", - } c ipvsCollector err error subsystem = "ipvs" ) + if c.backendLabels, err = c.parseIpvsLabels(*ipvsLabels); err != nil { + return nil, err + } + + c.logger = logger c.fs, err = procfs.NewFS(*procPath) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to open procfs: %w", err) } c.connections = typedDesc{prometheus.NewDesc( @@ -89,31 +122,31 @@ func newIPVSCollector() (*ipvsCollector, error) { c.backendConnectionsActive = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "backend_connections_active"), "The current active connections by local and remote address.", - ipvsBackendLabelNames, nil, + c.backendLabels, nil, ), prometheus.GaugeValue} c.backendConnectionsInact = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "backend_connections_inactive"), "The current inactive connections by local and remote address.", - ipvsBackendLabelNames, nil, + c.backendLabels, nil, ), prometheus.GaugeValue} c.backendWeight = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "backend_weight"), "The current backend weight by local and remote address.", - ipvsBackendLabelNames, nil, + c.backendLabels, nil, ), prometheus.GaugeValue} return &c, nil } func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error { - ipvsStats, err := c.fs.NewIPVSStats() + ipvsStats, err := c.fs.IPVSStats() if err != nil { // Cannot access ipvs metrics, report no error. - if os.IsNotExist(err) { - log.Debug("ipvs collector metrics are not available for this system") - return nil + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "ipvs collector metrics are not available for this system") + return ErrNoData } - return fmt.Errorf("could not get IPVS stats: %s", err) + return fmt.Errorf("could not get IPVS stats: %w", err) } ch <- c.connections.mustNewConstMetric(float64(ipvsStats.Connections)) ch <- c.incomingPackets.mustNewConstMetric(float64(ipvsStats.IncomingPackets)) @@ -121,22 +154,79 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error { ch <- c.incomingBytes.mustNewConstMetric(float64(ipvsStats.IncomingBytes)) ch <- c.outgoingBytes.mustNewConstMetric(float64(ipvsStats.OutgoingBytes)) - backendStats, err := c.fs.NewIPVSBackendStatus() + backendStats, err := c.fs.IPVSBackendStatus() if err != nil { - return fmt.Errorf("could not get backend status: %s", err) + return fmt.Errorf("could not get backend status: %w", err) } + sums := map[string]ipvsBackendStatus{} + labelValues := map[string][]string{} for _, backend := range backendStats { - labelValues := []string{ - backend.LocalAddress.String(), - strconv.FormatUint(uint64(backend.LocalPort), 10), - backend.RemoteAddress.String(), - strconv.FormatUint(uint64(backend.RemotePort), 10), - backend.Proto, + localAddress := "" + if backend.LocalAddress.String() != "" { + localAddress = backend.LocalAddress.String() + } + kv := make([]string, len(c.backendLabels)) + for i, label := range c.backendLabels { + var labelValue string + switch label { + case ipvsLabelLocalAddress: + labelValue = localAddress + case ipvsLabelLocalPort: + labelValue = strconv.FormatUint(uint64(backend.LocalPort), 10) + case ipvsLabelRemoteAddress: + labelValue = backend.RemoteAddress.String() + case ipvsLabelRemotePort: + labelValue = strconv.FormatUint(uint64(backend.RemotePort), 10) + case ipvsLabelProto: + labelValue = backend.Proto + case ipvsLabelLocalMark: + labelValue = backend.LocalMark + } + kv[i] = labelValue } - ch <- c.backendConnectionsActive.mustNewConstMetric(float64(backend.ActiveConn), labelValues...) - ch <- c.backendConnectionsInact.mustNewConstMetric(float64(backend.InactConn), labelValues...) - ch <- c.backendWeight.mustNewConstMetric(float64(backend.Weight), labelValues...) + key := strings.Join(kv, "-") + status := sums[key] + status.ActiveConn += backend.ActiveConn + status.InactConn += backend.InactConn + status.Weight += backend.Weight + sums[key] = status + labelValues[key] = kv + } + for key, status := range sums { + kv := labelValues[key] + ch <- c.backendConnectionsActive.mustNewConstMetric(float64(status.ActiveConn), kv...) + ch <- c.backendConnectionsInact.mustNewConstMetric(float64(status.InactConn), kv...) + ch <- c.backendWeight.mustNewConstMetric(float64(status.Weight), kv...) } return nil } + +func (c *ipvsCollector) parseIpvsLabels(labelString string) ([]string, error) { + labels := strings.Split(labelString, ",") + labelSet := make(map[string]bool, len(labels)) + results := make([]string, 0, len(labels)) + for _, label := range labels { + if label != "" { + labelSet[label] = true + } + } + + for _, label := range fullIpvsBackendLabels { + if labelSet[label] { + results = append(results, label) + } + delete(labelSet, label) + } + + if len(labelSet) > 0 { + keys := make([]string, 0, len(labelSet)) + for label := range labelSet { + keys = append(keys, label) + } + sort.Strings(keys) + return nil, fmt.Errorf("unknown IPVS backend labels: %q", strings.Join(keys, ", ")) + } + + return results, nil +} diff --git a/collector/ipvs_linux_test.go b/collector/ipvs_linux_test.go index d390f1648e..cf12a216ce 100644 --- a/collector/ipvs_linux_test.go +++ b/collector/ipvs_linux_test.go @@ -14,6 +14,7 @@ package collector import ( + "errors" "fmt" "io/ioutil" "net/http" @@ -21,39 +22,123 @@ import ( "strings" "testing" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "gopkg.in/alecthomas/kingpin.v2" ) func TestIPVSCollector(t *testing.T) { - if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "fixtures/proc"}); err != nil { - t.Fatal(err) - } - collector, err := newIPVSCollector() - if err != nil { - t.Fatal(err) + testcases := []struct { + labels string + expects []string + err error + }{ + { + "", + []string{ + prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto", "local_mark"}, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto", "local_mark"}, nil).String(), + prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto", "local_mark"}, nil).String(), + }, + nil, + }, + { + "", + []string{ + prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", nil, nil).String(), + }, + nil, + }, + { + "local_port", + []string{ + prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_port"}, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_port"}, nil).String(), + prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_port"}, nil).String(), + }, + nil, + }, + { + "local_address,local_port", + []string{ + prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_address", "local_port"}, nil).String(), + prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_address", "local_port"}, nil).String(), + prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_address", "local_port"}, nil).String(), + }, + nil, + }, + { + "invalid_label", + nil, + errors.New(`unknown IPVS backend labels: "invalid_label"`), + }, + { + "invalid_label,bad_label", + nil, + errors.New(`unknown IPVS backend labels: "bad_label, invalid_label"`), + }, } - sink := make(chan prometheus.Metric) - go func() { - err = collector.Update(sink) - if err != nil { - panic(fmt.Sprintf("failed to update collector: %v", err)) - } - }() - for expected, got := range map[string]string{ - prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(): (<-sink).Desc().String(), - prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(): (<-sink).Desc().String(), - prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(): (<-sink).Desc().String(), - prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(): (<-sink).Desc().String(), - prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(): (<-sink).Desc().String(), - prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(), - prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(), - prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(), - } { - if expected != got { - t.Fatalf("Expected '%s' but got '%s'", expected, got) - } + for _, test := range testcases { + t.Run(test.labels, func(t *testing.T) { + args := []string{"--path.procfs", "fixtures/proc"} + if test.labels != "" { + args = append(args, "--collector.ipvs.backend-labels="+test.labels) + } + if _, err := kingpin.CommandLine.Parse(args); err != nil { + t.Fatal(err) + } + collector, err := newIPVSCollector(log.NewNopLogger()) + if err != nil { + if test.err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), test.err.Error()) { + t.Fatalf("expect error: %v contains %v", err, test.err) + } + return + } + if test.err != nil { + t.Fatalf("expect error: %v but got no error", test.err) + } + + sink := make(chan prometheus.Metric) + go func() { + err = collector.Update(sink) + if err != nil { + panic(fmt.Sprintf("failed to update collector: %v", err)) + } + }() + for _, expected := range test.expects { + got := (<-sink).Desc().String() + if expected != got { + t.Fatalf("Expected '%s' but got '%s'", expected, got) + } + } + }) } } @@ -76,44 +161,61 @@ func (c miniCollector) Describe(ch chan<- *prometheus.Desc) { } func TestIPVSCollectorResponse(t *testing.T) { - if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "fixtures/proc"}); err != nil { - t.Fatal(err) - } - collector, err := NewIPVSCollector() - if err != nil { - t.Fatal(err) + testcases := []struct { + labels string + metricsFile string + }{ + {"", "fixtures/ip_vs_result.txt"}, + {"", "fixtures/ip_vs_result_lbs_none.txt"}, + {"local_port", "fixtures/ip_vs_result_lbs_local_port.txt"}, + {"local_address,local_port", "fixtures/ip_vs_result_lbs_local_address_local_port.txt"}, } - prometheus.MustRegister(miniCollector{c: collector}) + for _, test := range testcases { + t.Run(test.labels, func(t *testing.T) { + args := []string{"--path.procfs", "fixtures/proc"} + if test.labels != "" { + args = append(args, "--collector.ipvs.backend-labels="+test.labels) + } + if _, err := kingpin.CommandLine.Parse(args); err != nil { + t.Fatal(err) + } + collector, err := NewIPVSCollector(log.NewNopLogger()) + if err != nil { + t.Fatal(err) + } + registry := prometheus.NewRegistry() + registry.MustRegister(miniCollector{c: collector}) - rw := httptest.NewRecorder() - promhttp.Handler().ServeHTTP(rw, &http.Request{}) + rw := httptest.NewRecorder() + promhttp.InstrumentMetricHandler(registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})).ServeHTTP(rw, &http.Request{}) - metricsFile := "fixtures/ip_vs_result.txt" - wantMetrics, err := ioutil.ReadFile(metricsFile) - if err != nil { - t.Fatalf("unable to read input test file %s: %s", metricsFile, err) - } + wantMetrics, err := ioutil.ReadFile(test.metricsFile) + if err != nil { + t.Fatalf("unable to read input test file %s: %s", test.metricsFile, err) + } - wantLines := strings.Split(string(wantMetrics), "\n") - gotLines := strings.Split(string(rw.Body.String()), "\n") - gotLinesIdx := 0 + wantLines := strings.Split(string(wantMetrics), "\n") + gotLines := strings.Split(string(rw.Body.String()), "\n") + gotLinesIdx := 0 - // Until the Prometheus Go client library offers better testability - // (https://github.com/prometheus/client_golang/issues/58), we simply compare - // verbatim text-format metrics outputs, but ignore any lines we don't have - // in the fixture. Put differently, we are only testing that each line from - // the fixture is present, in the order given. -wantLoop: - for _, want := range wantLines { - for _, got := range gotLines[gotLinesIdx:] { - if want == got { - // this is a line we are interested in, and it is correct - continue wantLoop - } else { - gotLinesIdx++ + // Until the Prometheus Go client library offers better testability + // (https://github.com/prometheus/client_golang/issues/58), we simply compare + // verbatim text-format metrics outputs, but ignore any lines we don't have + // in the fixture. Put differently, we are only testing that each line from + // the fixture is present, in the order given. + wantLoop: + for _, want := range wantLines { + for _, got := range gotLines[gotLinesIdx:] { + if want == got { + // this is a line we are interested in, and it is correct + continue wantLoop + } else { + gotLinesIdx++ + } + } + // if this point is reached, the line we want was missing + t.Fatalf("Missing expected output line(s), first missing line is %s", want) } - } - // if this point is reached, the line we want was missing - t.Fatalf("Missing expected output line(s), first missing line is %s", want) + }) } } diff --git a/collector/ksmd_linux.go b/collector/ksmd_linux.go index c220e08755..6d4142ae9a 100644 --- a/collector/ksmd_linux.go +++ b/collector/ksmd_linux.go @@ -11,14 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noksmd // +build !noksmd package collector import ( "fmt" - "path" + "path/filepath" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -29,6 +31,7 @@ var ( type ksmdCollector struct { metricDescs map[string]*prometheus.Desc + logger log.Logger } func init() { @@ -47,7 +50,7 @@ func getCanonicalMetricName(filename string) string { } // NewKsmdCollector returns a new Collector exposing kernel/system statistics. -func NewKsmdCollector() (Collector, error) { +func NewKsmdCollector(logger log.Logger) (Collector, error) { subsystem := "ksmd" descs := make(map[string]*prometheus.Desc) @@ -56,13 +59,13 @@ func NewKsmdCollector() (Collector, error) { prometheus.BuildFQName(namespace, subsystem, getCanonicalMetricName(n)), fmt.Sprintf("ksmd '%s' file.", n), nil, nil) } - return &ksmdCollector{descs}, nil + return &ksmdCollector{descs, logger}, nil } // Update implements Collector and exposes kernel and system statistics. func (c *ksmdCollector) Update(ch chan<- prometheus.Metric) error { for _, n := range ksmdFiles { - val, err := readUintFromFile(sysFilePath(path.Join("kernel/mm/ksm", n))) + val, err := readUintFromFile(sysFilePath(filepath.Join("kernel/mm/ksm", n))) if err != nil { return err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/collector/kvm_bsd.c similarity index 50% rename from vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go rename to collector/kvm_bsd.c index 8bb9b8b68f..ad86d33e3a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go +++ b/collector/kvm_bsd.c @@ -11,34 +11,42 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !go1.8 +// +build !nomeminfo +// +build freebsd dragonfly -package promhttp +#include +#include +#include +#include +#include -import ( - "io" - "net/http" -) +int _kvm_swap_used_pages(uint64_t *out) { + const int total_only = 1; // from kvm_getswapinfo(3) -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } + kvm_t *kd; + struct kvm_swap current; - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier + kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, NULL); + if (kd == NULL) { + return -1; } - if _, ok := w.(http.Flusher); ok { - id += flusher + + if (kvm_getswapinfo(kd, ¤t, total_only, 0) == -1) { + goto error1; } - if _, ok := w.(http.Hijacker); ok { - id += hijacker + + if (kvm_close(kd) != 0) { + return -1; } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom + kd = NULL; + + *out = current.ksw_used; + return 0; + +error1: + if (kd != NULL) { + kvm_close(kd); } - return pickDelegator[id](d) + return -1; } diff --git a/collector/loadavg_unix.go b/collector/kvm_bsd.go similarity index 55% rename from collector/loadavg_unix.go rename to collector/kvm_bsd.go index 4d6588529c..c4a4479aff 100644 --- a/collector/loadavg_unix.go +++ b/collector/kvm_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Prometheus Authors +// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,23 +11,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin dragonfly netbsd openbsd -// +build !noloadavg +//go:build !nokvm && (freebsd || dragonfly) +// +build !nokvm +// +build freebsd dragonfly package collector import ( - "errors" + "fmt" + "sync" ) -// #include +// #cgo LDFLAGS: -lkvm +// #include "kvm_bsd.h" import "C" -func getLoad() ([]float64, error) { - var loadavg [3]C.double - samples := C.getloadavg(&loadavg[0], 3) - if samples != 3 { - return nil, errors.New("failed to get load average") +type kvm struct { + mu sync.Mutex + hasErr bool +} + +func (k *kvm) SwapUsedPages() (value uint64, err error) { + k.mu.Lock() + defer k.mu.Unlock() + if C._kvm_swap_used_pages((*C.uint64_t)(&value)) == -1 { + k.hasErr = true + return 0, fmt.Errorf("couldn't get kvm stats") } - return []float64{float64(loadavg[0]), float64(loadavg[1]), float64(loadavg[2])}, nil + + return value, nil } diff --git a/vendor/github.com/prometheus/procfs/sysfs/doc.go b/collector/kvm_bsd.h similarity index 82% rename from vendor/github.com/prometheus/procfs/sysfs/doc.go rename to collector/kvm_bsd.h index 9a6c244e91..0cfbfa702e 100644 --- a/vendor/github.com/prometheus/procfs/sysfs/doc.go +++ b/collector/kvm_bsd.h @@ -11,6 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package sysfs provides functions to retrieve system and kernel metrics -// from the pseudo-filesystem sys. -package sysfs +// +build !nomeminfo +// +build freebsd dragonfly + +#include + +int _kvm_swap_used_pages(uint64_t *out); diff --git a/collector/lnstat_linux.go b/collector/lnstat_linux.go new file mode 100644 index 0000000000..bf67cfa570 --- /dev/null +++ b/collector/lnstat_linux.go @@ -0,0 +1,72 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nolnstat +// +build !nolnstat + +package collector + +import ( + "fmt" + "strconv" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type lnstatCollector struct { + logger log.Logger +} + +func init() { + registerCollector("lnstat", defaultDisabled, NewLnstatCollector) +} + +func NewLnstatCollector(logger log.Logger) (Collector, error) { + return &lnstatCollector{logger}, nil +} + +func (c *lnstatCollector) Update(ch chan<- prometheus.Metric) error { + const ( + subsystem = "lnstat" + ) + + fs, err := procfs.NewFS(*procPath) + if err != nil { + return fmt.Errorf("failed to open procfs: %w", err) + } + + netStats, err := fs.NetStat() + if err != nil { + return fmt.Errorf("Lnstat error: %s", err) + } + + for _, netStatFile := range netStats { + labelNames := []string{"subsystem", "cpu"} + for header, stats := range netStatFile.Stats { + for cpu, value := range stats { + labelValues := []string{netStatFile.Filename, strconv.Itoa(cpu)} + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, header+"_total"), + "linux network cache stats", + labelNames, nil, + ), + prometheus.CounterValue, float64(value), labelValues..., + ) + } + } + } + return nil +} diff --git a/collector/loadavg.go b/collector/loadavg.go index 5568faa4b2..cb7b2cb142 100644 --- a/collector/loadavg.go +++ b/collector/loadavg.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !noloadavg // +build darwin dragonfly freebsd linux netbsd openbsd solaris // +build !noloadavg @@ -19,12 +20,14 @@ package collector import ( "fmt" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" ) type loadavgCollector struct { metric []typedDesc + logger log.Logger } func init() { @@ -32,23 +35,24 @@ func init() { } // NewLoadavgCollector returns a new Collector exposing load average stats. -func NewLoadavgCollector() (Collector, error) { +func NewLoadavgCollector(logger log.Logger) (Collector, error) { return &loadavgCollector{ metric: []typedDesc{ {prometheus.NewDesc(namespace+"_load1", "1m load average.", nil, nil), prometheus.GaugeValue}, {prometheus.NewDesc(namespace+"_load5", "5m load average.", nil, nil), prometheus.GaugeValue}, {prometheus.NewDesc(namespace+"_load15", "15m load average.", nil, nil), prometheus.GaugeValue}, }, + logger: logger, }, nil } func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) error { loads, err := getLoad() if err != nil { - return fmt.Errorf("couldn't get load: %s", err) + return fmt.Errorf("couldn't get load: %w", err) } for i, load := range loads { - log.Debugf("return load %d: %f", i, load) + level.Debug(c.logger).Log("msg", "return load", "index", i, "load", load) ch <- c.metric[i].mustNewConstMetric(load) } return err diff --git a/collector/loadavg_freebsd.go b/collector/loadavg_bsd.go similarity index 89% rename from collector/loadavg_freebsd.go rename to collector/loadavg_bsd.go index e919c50e4b..a1f5ae6643 100644 --- a/collector/loadavg_freebsd.go +++ b/collector/loadavg_bsd.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && !noloadavg +// +build darwin dragonfly freebsd netbsd openbsd // +build !noloadavg package collector diff --git a/collector/loadavg_linux.go b/collector/loadavg_linux.go index 668d3eda37..d118eaba5f 100644 --- a/collector/loadavg_linux.go +++ b/collector/loadavg_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noloadavg // +build !noloadavg package collector @@ -45,7 +46,7 @@ func parseLoad(data string) (loads []float64, err error) { for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load '%s': %s", load, err) + return nil, fmt.Errorf("could not parse load '%s': %w", load, err) } } return loads, nil diff --git a/collector/loadavg_solaris.go b/collector/loadavg_solaris.go index 3fc3b1637e..316233fa98 100644 --- a/collector/loadavg_solaris.go +++ b/collector/loadavg_solaris.go @@ -11,28 +11,55 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noloadavg // +build !noloadavg package collector import ( - "errors" + "fmt" + "strconv" + + "github.com/illumos/go-kstat" ) -/* -// Define "__stack_chk_fail" and "__stack_chk_guard" symbols. -#cgo LDFLAGS: -fno-stack-protector -lssp -// Ensure "hrtime_t" is defined for sys/loadavg.h -#include -#include -*/ +// #include import "C" +func kstatToFloat(ks *kstat.KStat, kstatKey string) float64 { + kstatValue, err := ks.GetNamed(kstatKey) + + if err != nil { + panic(err) + } + + kstatLoadavg, err := strconv.ParseFloat( + fmt.Sprintf("%.2f", float64(kstatValue.UintVal)/C.FSCALE), 64) + + if err != nil { + panic(err) + } + + return kstatLoadavg +} + func getLoad() ([]float64, error) { - var loadavg [3]C.double - samples := C.getloadavg(&loadavg[0], 3) - if samples != 3 { - return nil, errors.New("failed to get load average") + tok, err := kstat.Open() + if err != nil { + panic(err) } - return []float64{float64(loadavg[0]), float64(loadavg[1]), float64(loadavg[2])}, nil + + defer tok.Close() + + ks, err := tok.Lookup("unix", 0, "system_misc") + + if err != nil { + panic(err) + } + + loadavg1Min := kstatToFloat(ks, "avenrun_1min") + loadavg5Min := kstatToFloat(ks, "avenrun_5min") + loadavg15Min := kstatToFloat(ks, "avenrun_15min") + + return []float64{loadavg1Min, loadavg5Min, loadavg15Min}, nil } diff --git a/collector/logind_linux.go b/collector/logind_linux.go index ba23f118aa..06703e9640 100644 --- a/collector/logind_linux.go +++ b/collector/logind_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nologind // +build !nologind package collector @@ -20,6 +21,7 @@ import ( "os" "strconv" + "github.com/go-kit/log" "github.com/godbus/dbus" "github.com/prometheus/client_golang/prometheus" ) @@ -43,7 +45,9 @@ var ( ) ) -type logindCollector struct{} +type logindCollector struct { + logger log.Logger +} type logindDbus struct { conn *dbus.Conn @@ -82,14 +86,14 @@ func init() { } // NewLogindCollector returns a new Collector exposing logind statistics. -func NewLogindCollector() (Collector, error) { - return &logindCollector{}, nil +func NewLogindCollector(logger log.Logger) (Collector, error) { + return &logindCollector{logger}, nil } func (lc *logindCollector) Update(ch chan<- prometheus.Metric) error { c, err := newDbus() if err != nil { - return fmt.Errorf("unable to connect to dbus: %s", err) + return fmt.Errorf("unable to connect to dbus: %w", err) } defer c.conn.Close() @@ -99,12 +103,12 @@ func (lc *logindCollector) Update(ch chan<- prometheus.Metric) error { func collectMetrics(ch chan<- prometheus.Metric, c logindInterface) error { seats, err := c.listSeats() if err != nil { - return fmt.Errorf("unable to get seats: %s", err) + return fmt.Errorf("unable to get seats: %w", err) } sessionList, err := c.listSessions() if err != nil { - return fmt.Errorf("unable to get sessions: %s", err) + return fmt.Errorf("unable to get sessions: %w", err) } sessions := make(map[logindSession]float64) diff --git a/collector/mdadm_linux.go b/collector/mdadm_linux.go index bc29fbc9d7..89c56b75d0 100644 --- a/collector/mdadm_linux.go +++ b/collector/mdadm_linux.go @@ -11,230 +11,76 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nomdadm // +build !nomdadm package collector import ( + "errors" "fmt" - "io/ioutil" "os" - "regexp" - "strconv" - "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" + "github.com/prometheus/procfs" ) -var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - raid0lineRE = regexp.MustCompile(`(\d+) blocks .*\d+k chunks`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) - unknownPersonalityLineRE = regexp.MustCompile(`(\d+) blocks (.*)`) - raidPersonalityRE = regexp.MustCompile(`raid[0-9]+`) -) - -type mdStatus struct { - name string - active bool - disksActive int64 - disksTotal int64 - blocksTotal int64 - blocksSynced int64 +type mdadmCollector struct { + logger log.Logger } -type mdadmCollector struct{} - func init() { registerCollector("mdadm", defaultEnabled, NewMdadmCollector) } -func evalStatusline(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - - // +1 to make it more obvious that the whole string containing the info is also returned as matches[0]. - if len(matches) < 3+1 { - return 0, 0, 0, fmt.Errorf("too few matches found in statusline: %s", statusline) - } else if len(matches) > 3+1 { - return 0, 0, 0, fmt.Errorf("too many matches found in statusline: %s", statusline) - } - - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) - } - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) - } - - return active, total, size, nil -} - -func evalRaid0line(statusline string) (size int64, err error) { - matches := raid0lineRE.FindStringSubmatch(statusline) - - if len(matches) < 2 { - return 0, fmt.Errorf("invalid raid0 status line: %s", statusline) - } - - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("%s in statusline: %s", err, statusline) - } - - return size, nil -} - -func evalUnknownPersonalitylineRE(statusline string) (size int64, err error) { - matches := unknownPersonalityLineRE.FindStringSubmatch(statusline) - - if len(matches) != 2+1 { - return 0, fmt.Errorf("invalid unknown personality status line: %s", statusline) - } - - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("%s in statusline: %s", err, statusline) - } - - return size, nil -} - -// evalBuildline gets the size that has already been synced out of the sync-line. -func evalBuildline(buildline string) (int64, error) { - matches := buildlineRE.FindStringSubmatch(buildline) - - // +1 to make it more obvious that the whole string containing the info is also returned as matches[0]. - if len(matches) < 1+1 { - return 0, fmt.Errorf("too few matches found in buildline: %s", buildline) - } - - if len(matches) > 1+1 { - return 0, fmt.Errorf("too many matches found in buildline: %s", buildline) - } - - syncedSize, err := strconv.ParseInt(matches[1], 10, 64) - - if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) - } - - return syncedSize, nil -} - -// parseMdstat parses an mdstat-file and returns a struct with the relevant infos. -func parseMdstat(mdStatusFilePath string) ([]mdStatus, error) { - content, err := ioutil.ReadFile(mdStatusFilePath) - if err != nil { - return []mdStatus{}, err - } - - lines := strings.Split(string(content), "\n") - // Each md has at least the deviceline, statusline and one empty line afterwards - // so we will have probably something of the order len(lines)/3 devices - // so we use that for preallocation. - mdStates := make([]mdStatus, 0, len(lines)/3) - for i, line := range lines { - if line == "" { - continue - } - if line[0] == ' ' || line[0] == '\t' { - // Lines starting with white space are not the beginning of a md-section. - continue - } - if strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { - // These lines contain general information. - continue - } - - mainLine := strings.Split(line, " ") - if len(mainLine) < 4 { - return mdStates, fmt.Errorf("error parsing mdline: %s", line) - } - md := mdStatus{ - name: mainLine[0], - active: mainLine[2] == "active", - } - - if len(lines) <= i+3 { - return mdStates, fmt.Errorf("error parsing mdstat: entry for %s has fewer lines than expected", md.name) - } - - personality := "" - for _, possiblePersonality := range mainLine[3:] { - if raidPersonalityRE.MatchString(possiblePersonality) { - personality = possiblePersonality - break - } - } - switch { - case personality == "raid0": - md.disksActive = int64(len(mainLine) - 4) // Get the number of devices from the main line. - md.disksTotal = md.disksActive // Raid0 active and total is always the same if active. - md.blocksTotal, err = evalRaid0line(lines[i+1]) - case raidPersonalityRE.MatchString(personality): - md.disksActive, md.disksTotal, md.blocksTotal, err = evalStatusline(lines[i+1]) - default: - log.Infof("Personality unknown: %s\n", mainLine) - md.blocksTotal, err = evalUnknownPersonalitylineRE(lines[i+1]) - } - if err != nil { - return mdStates, fmt.Errorf("error parsing mdstat: %s", err) - } - - syncLine := lines[i+2] - if strings.Contains(syncLine, "bitmap") { - syncLine = lines[i+3] - } - - // If device is syncing at the moment, get the number of currently synced bytes, - // otherwise that number equals the size of the device. - if strings.Contains(syncLine, "recovery") || - strings.Contains(syncLine, "resync") && - !strings.Contains(syncLine, "\tresync=") { - md.blocksSynced, err = evalBuildline(syncLine) - if err != nil { - return mdStates, fmt.Errorf("error parsing mdstat: %s", err) - } - } else { - md.blocksSynced = md.blocksTotal - } - - mdStates = append(mdStates, md) - } - - return mdStates, nil -} - // NewMdadmCollector returns a new Collector exposing raid statistics. -func NewMdadmCollector() (Collector, error) { - return &mdadmCollector{}, nil +func NewMdadmCollector(logger log.Logger) (Collector, error) { + return &mdadmCollector{logger}, nil } var ( - isActiveDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "md", "is_active"), - "Indicator whether the md-device is active or not.", + activeDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "state"), + "Indicates the state of md-device.", []string{"device"}, - nil, + prometheus.Labels{"state": "active"}, ) - - disksActiveDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "md", "disks_active"), - "Number of active disks of device.", + inActiveDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "state"), + "Indicates the state of md-device.", + []string{"device"}, + prometheus.Labels{"state": "inactive"}, + ) + recoveringDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "state"), + "Indicates the state of md-device.", []string{"device"}, + prometheus.Labels{"state": "recovering"}, + ) + resyncDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "state"), + "Indicates the state of md-device.", + []string{"device"}, + prometheus.Labels{"state": "resync"}, + ) + checkDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "state"), + "Indicates the state of md-device.", + []string{"device"}, + prometheus.Labels{"state": "check"}, + ) + + disksDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "md", "disks"), + "Number of active/failed/spare disks of device.", + []string{"device", "state"}, nil, ) disksTotalDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "md", "disks"), + prometheus.BuildFQName(namespace, "md", "disks_required"), "Total number of disks of device.", []string{"device"}, nil, @@ -256,52 +102,103 @@ var ( ) func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error { - statusfile := procFilePath("mdstat") - mdstate, err := parseMdstat(statusfile) + fs, err := procfs.NewFS(*procPath) + if err != nil { - if os.IsNotExist(err) { - log.Debugf("Not collecting mdstat, file does not exist: %s", statusfile) - return nil - } - return fmt.Errorf("error parsing mdstatus: %s", err) + return fmt.Errorf("failed to open procfs: %w", err) } - for _, mds := range mdstate { - log.Debugf("collecting metrics for device %s", mds.name) + mdStats, err := fs.MDStat() - var active float64 - if mds.active { - active = 1 + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "Not collecting mdstat, file does not exist", "file", *procPath) + return ErrNoData } + + return fmt.Errorf("error parsing mdstatus: %w", err) + } + + for _, mdStat := range mdStats { + level.Debug(c.logger).Log("msg", "collecting metrics for device", "device", mdStat.Name) + + stateVals := make(map[string]float64) + stateVals[mdStat.ActivityState] = 1 + ch <- prometheus.MustNewConstMetric( - isActiveDesc, + disksTotalDesc, prometheus.GaugeValue, - active, - mds.name, + float64(mdStat.DisksTotal), + mdStat.Name, ) + ch <- prometheus.MustNewConstMetric( - disksActiveDesc, + disksDesc, prometheus.GaugeValue, - float64(mds.disksActive), - mds.name, + float64(mdStat.DisksActive), + mdStat.Name, + "active", ) ch <- prometheus.MustNewConstMetric( - disksTotalDesc, + disksDesc, + prometheus.GaugeValue, + float64(mdStat.DisksFailed), + mdStat.Name, + "failed", + ) + ch <- prometheus.MustNewConstMetric( + disksDesc, + prometheus.GaugeValue, + float64(mdStat.DisksSpare), + mdStat.Name, + "spare", + ) + ch <- prometheus.MustNewConstMetric( + activeDesc, + prometheus.GaugeValue, + stateVals["active"], + mdStat.Name, + ) + + ch <- prometheus.MustNewConstMetric( + inActiveDesc, + prometheus.GaugeValue, + stateVals["inactive"], + mdStat.Name, + ) + + ch <- prometheus.MustNewConstMetric( + recoveringDesc, + prometheus.GaugeValue, + stateVals["recovering"], + mdStat.Name, + ) + + ch <- prometheus.MustNewConstMetric( + resyncDesc, prometheus.GaugeValue, - float64(mds.disksTotal), - mds.name, + stateVals["resyncing"], + mdStat.Name, ) + + ch <- prometheus.MustNewConstMetric( + checkDesc, + prometheus.GaugeValue, + stateVals["checking"], + mdStat.Name, + ) + ch <- prometheus.MustNewConstMetric( blocksTotalDesc, prometheus.GaugeValue, - float64(mds.blocksTotal), - mds.name, + float64(mdStat.BlocksTotal), + mdStat.Name, ) ch <- prometheus.MustNewConstMetric( blocksSyncedDesc, prometheus.GaugeValue, - float64(mds.blocksSynced), - mds.name, + float64(mdStat.BlocksSynced), + mdStat.Name, ) } diff --git a/collector/mdadm_linux_test.go b/collector/mdadm_linux_test.go deleted file mode 100644 index 30776bf9d6..0000000000 --- a/collector/mdadm_linux_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" -) - -func TestMdadm(t *testing.T) { - mdStates, err := parseMdstat("fixtures/proc/mdstat") - if err != nil { - t.Fatalf("parsing of reference-file failed entirely: %s", err) - } - - refs := map[string]mdStatus{ - // { "", , , , , } - "md3": {"md3", true, 8, 8, 5853468288, 5853468288}, - "md127": {"md127", true, 2, 2, 312319552, 312319552}, - "md0": {"md0", true, 2, 2, 248896, 248896}, - "md4": {"md4", false, 2, 2, 4883648, 4883648}, - "md6": {"md6", true, 1, 2, 195310144, 16775552}, - "md8": {"md8", true, 2, 2, 195310144, 16775552}, - "md7": {"md7", true, 3, 4, 7813735424, 7813735424}, - "md9": {"md9", true, 4, 4, 523968, 523968}, - "md10": {"md10", true, 2, 2, 314159265, 314159265}, - "md11": {"md11", true, 2, 2, 4190208, 4190208}, - "md12": {"md12", true, 2, 2, 3886394368, 3886394368}, - "md126": {"md126", true, 2, 2, 1855870976, 1855870976}, - "md219": {"md219", false, 0, 0, 7932, 7932}, - "md00": {"md00", true, 1, 1, 4186624, 4186624}, - } - - for _, md := range mdStates { - if md != refs[md.name] { - t.Errorf("failed parsing md-device %s correctly: want %v, got %v", md.name, refs[md.name], md) - } - } - - if len(mdStates) != len(refs) { - t.Errorf("expected number of parsed md-device to be %d, but was %d", len(refs), len(mdStates)) - } -} - -func TestInvalidMdstat(t *testing.T) { - _, err := parseMdstat("fixtures/proc/mdstat_invalid") - if err == nil { - t.Fatalf("parsing of invalid reference file did not find any errors") - } -} diff --git a/collector/megacli.go b/collector/megacli.go deleted file mode 100644 index e24f1b4970..0000000000 --- a/collector/megacli.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !nomegacli - -package collector - -import ( - "bufio" - "io" - "os/exec" - "strconv" - "strings" - - "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" -) - -const ( - defaultMegaCli = "megacli" - adapterHeaderSep = "================" -) - -var ( - megacliCommand = kingpin.Flag("collector.megacli.command", "Command to run megacli.").Default(defaultMegaCli).String() -) - -type megaCliCollector struct { - cli string - - driveTemperature *prometheus.GaugeVec - driveCounters *prometheus.GaugeVec - drivePresence *prometheus.GaugeVec -} - -func init() { - registerCollector("megacli", defaultDisabled, NewMegaCliCollector) -} - -// NewMegaCliCollector returns a new Collector exposing RAID status through -// megacli. -func NewMegaCliCollector() (Collector, error) { - warnDeprecated("megacli") - return &megaCliCollector{ - cli: *megacliCommand, - driveTemperature: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "megacli_drive_temperature_celsius", - Help: "megacli: drive temperature", - }, []string{"enclosure", "slot"}), - driveCounters: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "megacli_drive_count", - Help: "megacli: drive error and event counters", - }, []string{"enclosure", "slot", "type"}), - drivePresence: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "megacli_adapter_disk_presence", - Help: "megacli: disk presence per adapter", - }, []string{"type"}), - }, nil -} - -func (c *megaCliCollector) Update(ch chan<- prometheus.Metric) error { - if err := c.updateAdapter(); err != nil { - return err - } - if err := c.updateDisks(); err != nil { - return err - } - c.driveTemperature.Collect(ch) - c.driveCounters.Collect(ch) - c.drivePresence.Collect(ch) - return nil -} - -func parseMegaCliDisks(r io.Reader) (map[int]map[int]map[string]string, error) { - var ( - stats = map[int]map[int]map[string]string{} - scanner = bufio.NewScanner(r) - curEnc = -1 - curSlot = -1 - ) - - for scanner.Scan() { - var err error - text := strings.TrimSpace(scanner.Text()) - parts := strings.SplitN(text, ":", 2) - if len(parts) != 2 { // Adapter #X - continue - } - key := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - switch { - case key == "Enclosure Device ID": - curEnc, err = strconv.Atoi(value) - if err != nil { - return nil, err - } - case key == "Slot Number": - curSlot, err = strconv.Atoi(value) - if err != nil { - return nil, err - } - case curSlot != -1 && curEnc != -1: - if _, ok := stats[curEnc]; !ok { - stats[curEnc] = map[int]map[string]string{} - } - if _, ok := stats[curEnc][curSlot]; !ok { - stats[curEnc][curSlot] = map[string]string{} - } - stats[curEnc][curSlot][key] = value - } - } - - return stats, scanner.Err() -} - -func parseMegaCliAdapter(r io.Reader) (map[string]map[string]string, error) { - var ( - raidStats = map[string]map[string]string{} - scanner = bufio.NewScanner(r) - header = "" - last = "" - ) - - for scanner.Scan() { - text := strings.TrimSpace(scanner.Text()) - if text == adapterHeaderSep { - header = last - raidStats[header] = map[string]string{} - continue - } - last = text - if header == "" { // skip Adapter #X and separator - continue - } - parts := strings.SplitN(text, ":", 2) - if len(parts) != 2 { // these section never include anything we are interested in - continue - } - key := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - - raidStats[header][key] = value - - } - - return raidStats, scanner.Err() -} - -func (c *megaCliCollector) updateAdapter() error { - cmd := exec.Command(c.cli, "-AdpAllInfo", "-aALL") - pipe, err := cmd.StdoutPipe() - if err != nil { - return err - } - - if err := cmd.Start(); err != nil { - return err - } - - stats, err := parseMegaCliAdapter(pipe) - if err != nil { - return err - } - if err := cmd.Wait(); err != nil { - return err - } - - for k, v := range stats["Device Present"] { - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return err - } - c.drivePresence.WithLabelValues(k).Set(value) - } - return nil -} - -func (c *megaCliCollector) updateDisks() error { - var counters = []string{"Media Error Count", "Other Error Count", "Predictive Failure Count"} - - cmd := exec.Command(c.cli, "-PDList", "-aALL") - pipe, err := cmd.StdoutPipe() - if err != nil { - return err - } - - if err := cmd.Start(); err != nil { - return err - } - - stats, err := parseMegaCliDisks(pipe) - if err != nil { - return err - } - if err := cmd.Wait(); err != nil { - return err - } - - for enc, encStats := range stats { - for slot, slotStats := range encStats { - encStr := strconv.Itoa(enc) - slotStr := strconv.Itoa(slot) - - tStr := slotStats["Drive Temperature"] - if strings.Index(tStr, "C") > 0 { - tStr = tStr[:strings.Index(tStr, "C")] - t, err := strconv.ParseFloat(tStr, 64) - if err != nil { - return err - } - c.driveTemperature.WithLabelValues(encStr, slotStr).Set(t) - } - - for _, i := range counters { - counter, err := strconv.ParseFloat(slotStats[i], 64) - if err != nil { - return err - } - - c.driveCounters.WithLabelValues(encStr, slotStr, i).Set(counter) - } - } - } - return nil -} diff --git a/collector/megacli_test.go b/collector/megacli_test.go deleted file mode 100644 index adb653532e..0000000000 --- a/collector/megacli_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !nomegacli - -package collector - -import ( - "os" - "testing" - - "github.com/prometheus/client_golang/prometheus" - "gopkg.in/alecthomas/kingpin.v2" -) - -const ( - testMegaCliAdapter = "fixtures/megacli_adapter.txt" - testMegaCliDisks = "fixtures/megacli_disks.txt" - - physicalDevicesExpected = "5" - virtualDevicesDegraded = "0" -) - -func TestMegaCliAdapter(t *testing.T) { - data, err := os.Open(testMegaCliAdapter) - if err != nil { - t.Fatal(err) - } - stats, err := parseMegaCliAdapter(data) - if err != nil { - t.Fatal(err) - } - - if stats["Device Present"]["Physical Devices"] != physicalDevicesExpected { - t.Fatalf("Unexpected device count: %s != %s", stats["Device Present"]["Physical Devices"], physicalDevicesExpected) - } - - if stats["Device Present"]["Degraded"] != virtualDevicesDegraded { - t.Fatalf("Unexpected degraded device count: %s != %s", stats["Device Present"]["Degraded"], virtualDevicesDegraded) - } -} - -func TestMegaCliDisks(t *testing.T) { - data, err := os.Open(testMegaCliDisks) - if err != nil { - t.Fatal(err) - } - stats, err := parseMegaCliDisks(data) - if err != nil { - t.Fatal(err) - } - - if stats[32][0]["Drive Temperature"] != "37C (98.60 F)" { - t.Fatalf("Unexpected drive temperature: %s", stats[32][0]["Drive Temperature"]) - } - - if stats[32][1]["Drive Temperature"] != "N/A" { - t.Fatalf("Unexpected drive temperature: %s", stats[32][2]["Drive Temperature"]) - } - - if stats[32][3]["Predictive Failure Count"] != "23" { - t.Fatalf("Unexpected predictive failure count: %s", stats[32][3]["Predictive Failure Count"]) - } -} - -func TestMegaCliCollectorDoesntCrash(t *testing.T) { - if _, err := kingpin.CommandLine.Parse([]string{"--collector.megacli.command", "./fixtures/megacli"}); err != nil { - t.Fatal(err) - } - collector, err := NewMegaCliCollector() - if err != nil { - t.Fatal(err) - } - sink := make(chan prometheus.Metric) - go func() { - for { - <-sink - } - }() - - err = collector.Update(sink) - if err != nil { - t.Fatal(err) - } -} diff --git a/collector/meminfo.go b/collector/meminfo.go index 2fc5a0d757..8437f0f172 100644 --- a/collector/meminfo.go +++ b/collector/meminfo.go @@ -11,49 +11,60 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin dragonfly freebsd linux +//go:build (darwin || linux || openbsd) && !nomeminfo +// +build darwin linux openbsd // +build !nomeminfo package collector import ( "fmt" + "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" ) const ( memInfoSubsystem = "memory" ) -type meminfoCollector struct{} +type meminfoCollector struct { + logger log.Logger +} func init() { registerCollector("meminfo", defaultEnabled, NewMeminfoCollector) } // NewMeminfoCollector returns a new Collector exposing memory stats. -func NewMeminfoCollector() (Collector, error) { - return &meminfoCollector{}, nil +func NewMeminfoCollector(logger log.Logger) (Collector, error) { + return &meminfoCollector{logger}, nil } // Update calls (*meminfoCollector).getMemInfo to get the platform specific // memory metrics. func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error { + var metricType prometheus.ValueType memInfo, err := c.getMemInfo() if err != nil { - return fmt.Errorf("couldn't get meminfo: %s", err) + return fmt.Errorf("couldn't get meminfo: %w", err) } - log.Debugf("Set node_mem: %#v", memInfo) + level.Debug(c.logger).Log("msg", "Set node_mem", "memInfo", memInfo) for k, v := range memInfo { + if strings.HasSuffix(k, "_total") { + metricType = prometheus.CounterValue + } else { + metricType = prometheus.GaugeValue + } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, memInfoSubsystem, k), fmt.Sprintf("Memory information field %s.", k), nil, nil, ), - prometheus.GaugeValue, v, + metricType, v, ) } return nil diff --git a/collector/meminfo_bsd.go b/collector/meminfo_bsd.go deleted file mode 100644 index 0c722c243f..0000000000 --- a/collector/meminfo_bsd.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build freebsd dragonfly -// +build !nomeminfo - -package collector - -import ( - "fmt" - - "golang.org/x/sys/unix" -) - -func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { - info := make(map[string]float64) - - tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size") - if err != nil { - return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %s", err) - } - size := float64(tmp32) - fromPage := func(v float64) float64 { - return v * size - } - - for _, ctl := range []bsdSysctl{ - {name: "active_bytes", mib: "vm.stats.vm.v_active_count", conversion: fromPage}, - {name: "inactive_bytes", mib: "vm.stats.vm.v_inactive_count", conversion: fromPage}, - {name: "wired_bytes", mib: "vm.stats.vm.v_wire_count", conversion: fromPage}, - {name: "cache_bytes", mib: "vm.stats.vm.v_cache_count", conversion: fromPage}, - {name: "buffer_bytes", mib: "vfs.bufspace"}, - {name: "free_bytes", mib: "vm.stats.vm.v_free_count", conversion: fromPage}, - {name: "size_bytes", mib: "vm.stats.vm.v_page_count", conversion: fromPage}, - {name: "swap_in_bytes_total", mib: "vm.stats.vm.v_swappgsin", conversion: fromPage}, - {name: "swap_out_bytes_total", mib: "vm.stats.vm.v_swappgsout", conversion: fromPage}, - {name: "swap_size_bytes", mib: "vm.swap_total", dataType: bsdSysctlTypeUint64}, - } { - v, err := ctl.Value() - if err != nil { - return nil, err - } - - info[ctl.name] = v - } - - return info, nil -} diff --git a/collector/meminfo_darwin.go b/collector/meminfo_darwin.go index 0aa35ce039..0c7f64ee83 100644 --- a/collector/meminfo_darwin.go +++ b/collector/meminfo_darwin.go @@ -11,28 +11,31 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nomeminfo // +build !nomeminfo package collector // #include +// #include +// typedef struct xsw_usage xsw_usage_t; import "C" import ( "encoding/binary" "fmt" - "syscall" "unsafe" "golang.org/x/sys/unix" ) func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { - infoCount := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) - vmstat := C.vm_statistics_data_t{} - ret := C.host_statistics( - C.host_t(C.mach_host_self()), - C.HOST_VM_INFO, + host := C.mach_host_self() + infoCount := C.mach_msg_type_number_t(C.HOST_VM_INFO64_COUNT) + vmstat := C.vm_statistics64_data_t{} + ret := C.host_statistics64( + C.host_t(host), + C.HOST_VM_INFO64, C.host_info_t(unsafe.Pointer(&vmstat)), &infoCount, ) @@ -43,17 +46,30 @@ func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { if err != nil { return nil, err } + + swapraw, err := unix.SysctlRaw("vm.swapusage") + if err != nil { + return nil, err + } + swap := (*C.xsw_usage_t)(unsafe.Pointer(&swapraw[0])) + // Syscall removes terminating NUL which we need to cast to uint64 total := binary.LittleEndian.Uint64([]byte(totalb + "\x00")) - ps := C.natural_t(syscall.Getpagesize()) + var pageSize C.vm_size_t + C.host_page_size(C.host_t(host), &pageSize) + + ps := float64(pageSize) return map[string]float64{ - "active_bytes_total": float64(ps * vmstat.active_count), - "inactive_bytes_total": float64(ps * vmstat.inactive_count), - "wired_bytes_total": float64(ps * vmstat.wire_count), - "free_bytes_total": float64(ps * vmstat.free_count), - "swapped_in_pages_total": float64(ps * vmstat.pageins), - "swapped_out_pages_total": float64(ps * vmstat.pageouts), - "bytes_total": float64(total), + "active_bytes": ps * float64(vmstat.active_count), + "compressed_bytes": ps * float64(vmstat.compressor_page_count), + "inactive_bytes": ps * float64(vmstat.inactive_count), + "wired_bytes": ps * float64(vmstat.wire_count), + "free_bytes": ps * float64(vmstat.free_count), + "swapped_in_bytes_total": ps * float64(vmstat.pageins), + "swapped_out_bytes_total": ps * float64(vmstat.pageouts), + "total_bytes": float64(total), + "swap_used_bytes": float64(swap.xsu_used), + "swap_total_bytes": float64(swap.xsu_total), }, nil } diff --git a/collector/meminfo_linux.go b/collector/meminfo_linux.go index e17626dcaa..cee295024f 100644 --- a/collector/meminfo_linux.go +++ b/collector/meminfo_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nomeminfo // +build !nomeminfo package collector @@ -25,6 +26,10 @@ import ( "strings" ) +var ( + reParens = regexp.MustCompile(`\((.*)\)`) +) + func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { file, err := os.Open(procFilePath("meminfo")) if err != nil { @@ -39,26 +44,30 @@ func parseMemInfo(r io.Reader) (map[string]float64, error) { var ( memInfo = map[string]float64{} scanner = bufio.NewScanner(r) - re = regexp.MustCompile(`\((.*)\)`) ) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(line) + // Workaround for empty lines occasionally occur in CentOS 6.2 kernel 3.10.90. + if len(parts) == 0 { + continue + } fv, err := strconv.ParseFloat(parts[1], 64) if err != nil { - return nil, fmt.Errorf("invalid value in meminfo: %s", err) + return nil, fmt.Errorf("invalid value in meminfo: %w", err) } + key := parts[0][:len(parts[0])-1] // remove trailing : from key + // Active(anon) -> Active_anon + key = reParens.ReplaceAllString(key, "_${1}") switch len(parts) { case 2: // no unit case 3: // has unit, we presume kB fv *= 1024 + key = key + "_bytes" default: return nil, fmt.Errorf("invalid line in meminfo: %s", line) } - key := parts[0][:len(parts[0])-1] // remove trailing : from key - // Active(anon) -> Active_anon - key = re.ReplaceAllString(key, "_${1}") memInfo[key] = fv } diff --git a/collector/meminfo_linux_test.go b/collector/meminfo_linux_test.go index c42bdf0303..dc0aff58d7 100644 --- a/collector/meminfo_linux_test.go +++ b/collector/meminfo_linux_test.go @@ -30,11 +30,11 @@ func TestMemInfo(t *testing.T) { t.Fatal(err) } - if want, got := 3831959552.0, memInfo["MemTotal"]; want != got { + if want, got := 3831959552.0, memInfo["MemTotal_bytes"]; want != got { t.Errorf("want memory total %f, got %f", want, got) } - if want, got := 3787456512.0, memInfo["DirectMap2M"]; want != got { + if want, got := 3787456512.0, memInfo["DirectMap2M_bytes"]; want != got { t.Errorf("want memory directMap2M %f, got %f", want, got) } } diff --git a/collector/meminfo_numa_linux.go b/collector/meminfo_numa_linux.go index b096164ae8..5ce08e99d9 100644 --- a/collector/meminfo_numa_linux.go +++ b/collector/meminfo_numa_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nomeminfo_numa // +build !nomeminfo_numa package collector @@ -20,12 +21,12 @@ import ( "fmt" "io" "os" - "path" "path/filepath" "regexp" "strconv" "strings" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -44,6 +45,7 @@ type meminfoMetric struct { type meminfoNumaCollector struct { metricDescs map[string]*prometheus.Desc + logger log.Logger } func init() { @@ -51,16 +53,17 @@ func init() { } // NewMeminfoNumaCollector returns a new Collector exposing memory stats. -func NewMeminfoNumaCollector() (Collector, error) { +func NewMeminfoNumaCollector(logger log.Logger) (Collector, error) { return &meminfoNumaCollector{ metricDescs: map[string]*prometheus.Desc{}, + logger: logger, }, nil } func (c *meminfoNumaCollector) Update(ch chan<- prometheus.Metric) error { metrics, err := getMemInfoNuma() if err != nil { - return fmt.Errorf("couldn't get NUMA meminfo: %s", err) + return fmt.Errorf("couldn't get NUMA meminfo: %w", err) } for _, v := range metrics { desc, ok := c.metricDescs[v.metricName] @@ -86,7 +89,7 @@ func getMemInfoNuma() ([]meminfoMetric, error) { return nil, err } for _, node := range nodes { - meminfoFile, err := os.Open(path.Join(node, "meminfo")) + meminfoFile, err := os.Open(filepath.Join(node, "meminfo")) if err != nil { return nil, err } @@ -98,7 +101,7 @@ func getMemInfoNuma() ([]meminfoMetric, error) { } metrics = append(metrics, numaInfo...) - numastatFile, err := os.Open(path.Join(node, "numastat")) + numastatFile, err := os.Open(filepath.Join(node, "numastat")) if err != nil { return nil, err } @@ -135,7 +138,7 @@ func parseMemInfoNuma(r io.Reader) ([]meminfoMetric, error) { fv, err := strconv.ParseFloat(parts[3], 64) if err != nil { - return nil, fmt.Errorf("invalid value in meminfo: %s", err) + return nil, fmt.Errorf("invalid value in meminfo: %w", err) } switch l := len(parts); { case l == 4: // no unit @@ -172,7 +175,7 @@ func parseMemInfoNumaStat(r io.Reader, nodeNumber string) ([]meminfoMetric, erro fv, err := strconv.ParseFloat(parts[1], 64) if err != nil { - return nil, fmt.Errorf("invalid value in numastat: %s", err) + return nil, fmt.Errorf("invalid value in numastat: %w", err) } numaStat = append(numaStat, meminfoMetric{parts[0] + "_total", prometheus.CounterValue, nodeNumber, fv}) diff --git a/collector/meminfo_openbsd.go b/collector/meminfo_openbsd.go new file mode 100644 index 0000000000..2c81c50ccd --- /dev/null +++ b/collector/meminfo_openbsd.go @@ -0,0 +1,83 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build openbsd && !amd64 && !nomeminfo +// +build openbsd,!amd64,!nomeminfo + +package collector + +import ( + "fmt" +) + +/* +#include +#include +#include +#include + +int +sysctl_uvmexp(struct uvmexp *uvmexp) +{ + static int uvmexp_mib[] = {CTL_VM, VM_UVMEXP}; + size_t sz = sizeof(struct uvmexp); + + if(sysctl(uvmexp_mib, 2, uvmexp, &sz, NULL, 0) < 0) + return -1; + + return 0; +} + +int +sysctl_bcstats(struct bcachestats *bcstats) +{ + static int bcstats_mib[] = {CTL_VFS, VFS_GENERIC, VFS_BCACHESTAT}; + size_t sz = sizeof(struct bcachestats); + + if(sysctl(bcstats_mib, 3, bcstats, &sz, NULL, 0) < 0) + return -1; + + return 0; +} + +*/ +import "C" + +func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { + var uvmexp C.struct_uvmexp + var bcstats C.struct_bcachestats + + if _, err := C.sysctl_uvmexp(&uvmexp); err != nil { + return nil, fmt.Errorf("sysctl CTL_VM VM_UVMEXP failed: %w", err) + } + + if _, err := C.sysctl_bcstats(&bcstats); err != nil { + return nil, fmt.Errorf("sysctl CTL_VFS VFS_GENERIC VFS_BCACHESTAT failed: %w", err) + } + + ps := float64(uvmexp.pagesize) + + // see uvm(9) + return map[string]float64{ + "active_bytes": ps * float64(uvmexp.active), + "cache_bytes": ps * float64(bcstats.numbufpages), + "free_bytes": ps * float64(uvmexp.free), + "inactive_bytes": ps * float64(uvmexp.inactive), + "size_bytes": ps * float64(uvmexp.npages), + "swap_size_bytes": ps * float64(uvmexp.swpages), + "swap_used_bytes": ps * float64(uvmexp.swpginuse), + "swapped_in_pages_bytes_total": ps * float64(uvmexp.pgswapin), + "swapped_out_pages_bytes_total": ps * float64(uvmexp.pgswapout), + "wired_bytes": ps * float64(uvmexp.wired), + }, nil +} diff --git a/collector/meminfo_openbsd_amd64.go b/collector/meminfo_openbsd_amd64.go new file mode 100644 index 0000000000..41adebc3a4 --- /dev/null +++ b/collector/meminfo_openbsd_amd64.go @@ -0,0 +1,81 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nomeminfo +// +build !nomeminfo + +package collector + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +const ( + CTL_VFS = 10 + VFS_GENERIC = 0 + VFS_BCACHESTAT = 3 +) + +type bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Kvaslots_avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} + +func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { + uvmexpb, err := unix.SysctlRaw("vm.uvmexp") + if err != nil { + return nil, err + } + + mib := [3]_C_int{CTL_VFS, VFS_GENERIC, VFS_BCACHESTAT} + bcstatsb, err := sysctl(mib[:]) + if err != nil { + return nil, err + } + + uvmexp := *(*unix.Uvmexp)(unsafe.Pointer(&uvmexpb[0])) + ps := float64(uvmexp.Pagesize) + + bcstats := *(*bcachestats)(unsafe.Pointer(&bcstatsb[0])) + + // see uvm(9) + return map[string]float64{ + "active_bytes": ps * float64(uvmexp.Active), + "cache_bytes": ps * float64(bcstats.Numbufpages), + "free_bytes": ps * float64(uvmexp.Free), + "inactive_bytes": ps * float64(uvmexp.Inactive), + "size_bytes": ps * float64(uvmexp.Npages), + "swap_size_bytes": ps * float64(uvmexp.Swpages), + "swap_used_bytes": ps * float64(uvmexp.Swpginuse), + "swapped_in_pages_bytes_total": ps * float64(uvmexp.Pgswapin), + "swapped_out_pages_bytes_total": ps * float64(uvmexp.Pgswapout), + "wired_bytes": ps * float64(uvmexp.Wired), + }, nil +} diff --git a/collector/memory_bsd.go b/collector/memory_bsd.go new file mode 100644 index 0000000000..5b312691e9 --- /dev/null +++ b/collector/memory_bsd.go @@ -0,0 +1,169 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (freebsd || dragonfly) && !nomeminfo +// +build freebsd dragonfly +// +build !nomeminfo + +package collector + +import ( + "fmt" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" +) + +const ( + memorySubsystem = "memory" +) + +type memoryCollector struct { + pageSize uint64 + sysctls []bsdSysctl + kvm kvm + logger log.Logger +} + +func init() { + registerCollector("meminfo", defaultEnabled, NewMemoryCollector) +} + +// NewMemoryCollector returns a new Collector exposing memory stats. +func NewMemoryCollector(logger log.Logger) (Collector, error) { + tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size") + if err != nil { + return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %w", err) + } + size := float64(tmp32) + + mibSwapTotal := "vm.swap_total" + /* swap_total is FreeBSD specific. Fall back to Dfly specific mib if not present. */ + _, err = unix.SysctlUint64(mibSwapTotal) + if err != nil { + mibSwapTotal = "vm.swap_size" + } + + fromPage := func(v float64) float64 { + return v * size + } + + return &memoryCollector{ + logger: logger, + pageSize: uint64(tmp32), + sysctls: []bsdSysctl{ + // Descriptions via: https://wiki.freebsd.org/Memory + { + name: "active_bytes", + description: "Recently used by userland", + mib: "vm.stats.vm.v_active_count", + conversion: fromPage, + }, + { + name: "inactive_bytes", + description: "Not recently used by userland", + mib: "vm.stats.vm.v_inactive_count", + conversion: fromPage, + }, + { + name: "wired_bytes", + description: "Locked in memory by kernel, mlock, etc", + mib: "vm.stats.vm.v_wire_count", + conversion: fromPage, + }, + { + name: "cache_bytes", + description: "Almost free, backed by swap or files, available for re-allocation", + mib: "vm.stats.vm.v_cache_count", + conversion: fromPage, + }, + { + name: "buffer_bytes", + description: "Disk IO Cache entries for non ZFS filesystems, only usable by kernel", + mib: "vfs.bufspace", + dataType: bsdSysctlTypeCLong, + }, + { + name: "free_bytes", + description: "Unallocated, available for allocation", + mib: "vm.stats.vm.v_free_count", + conversion: fromPage, + }, + { + name: "size_bytes", + description: "Total physical memory size", + mib: "vm.stats.vm.v_page_count", + conversion: fromPage, + }, + { + name: "swap_size_bytes", + description: "Total swap memory size", + mib: mibSwapTotal, + dataType: bsdSysctlTypeUint64, + }, + // Descriptions via: top(1) + { + name: "swap_in_bytes_total", + description: "Bytes paged in from swap devices", + mib: "vm.stats.vm.v_swappgsin", + valueType: prometheus.CounterValue, + conversion: fromPage, + }, + { + name: "swap_out_bytes_total", + description: "Bytes paged out to swap devices", + mib: "vm.stats.vm.v_swappgsout", + valueType: prometheus.CounterValue, + conversion: fromPage, + }, + }, + }, nil +} + +// Update checks relevant sysctls for current memory usage, and kvm for swap +// usage. +func (c *memoryCollector) Update(ch chan<- prometheus.Metric) error { + for _, m := range c.sysctls { + v, err := m.Value() + if err != nil { + return fmt.Errorf("couldn't get memory: %w", err) + } + + // Most are gauges. + if m.valueType == 0 { + m.valueType = prometheus.GaugeValue + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, memorySubsystem, m.name), + m.description, + nil, nil, + ), m.valueType, v) + } + + swapUsed, err := c.kvm.SwapUsedPages() + if err != nil { + return fmt.Errorf("couldn't get kvm: %w", err) + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, memorySubsystem, "swap_used_bytes"), + "Currently allocated swap", + nil, nil, + ), prometheus.GaugeValue, float64(swapUsed*c.pageSize)) + + return nil +} diff --git a/collector/mountstats_linux.go b/collector/mountstats_linux.go index 62e715df20..9dea6fad71 100644 --- a/collector/mountstats_linux.go +++ b/collector/mountstats_linux.go @@ -11,17 +11,25 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nomountstats +// +build !nomountstats + package collector import ( "fmt" - "sort" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "github.com/prometheus/procfs" ) +var ( + // 64-bit float mantissa: https://en.wikipedia.org/wiki/Double-precision_floating-point_format + float64Mantissa uint64 = 9007199254740992 +) + type mountStatsCollector struct { // General statistics NFSAgeSecondsTotal *prometheus.Desc @@ -87,6 +95,15 @@ type mountStatsCollector struct { NFSEventPNFSWriteTotal *prometheus.Desc proc procfs.Proc + + logger log.Logger +} + +// used to uniquely identify an NFS mount to prevent duplicates +type nfsDeviceIdentifier struct { + Device string + Protocol string + MountAddress string } func init() { @@ -94,15 +111,15 @@ func init() { } // NewMountStatsCollector returns a new Collector exposing NFS statistics. -func NewMountStatsCollector() (Collector, error) { +func NewMountStatsCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { - return nil, fmt.Errorf("failed to open procfs: %v", err) + return nil, fmt.Errorf("failed to open procfs: %w", err) } proc, err := fs.Self() if err != nil { - return nil, fmt.Errorf("failed to open /proc/self: %v", err) + return nil, fmt.Errorf("failed to open /proc/self: %w", err) } const ( @@ -111,8 +128,8 @@ func NewMountStatsCollector() (Collector, error) { ) var ( - labels = []string{"export"} - opLabels = []string{"export", "operation"} + labels = []string{"export", "protocol", "mountaddr"} + opLabels = []string{"export", "protocol", "mountaddr", "operation"} ) return &mountStatsCollector{ @@ -487,234 +504,246 @@ func NewMountStatsCollector() (Collector, error) { nil, ), - proc: proc, + proc: proc, + logger: logger, }, nil } func (c *mountStatsCollector) Update(ch chan<- prometheus.Metric) error { mounts, err := c.proc.MountStats() if err != nil { - return fmt.Errorf("failed to parse mountstats: %v", err) + return fmt.Errorf("failed to parse mountstats: %w", err) + } + + mountsInfo, err := c.proc.MountInfo() + if err != nil { + return fmt.Errorf("failed to parse mountinfo: %w", err) } - var deviceList []string - for _, m := range mounts { + // store all seen nfsDeviceIdentifiers for deduplication + deviceList := make(map[nfsDeviceIdentifier]bool) + + for idx, m := range mounts { // For the time being, only NFS statistics are available via this mechanism stats, ok := m.Stats.(*procfs.MountStatsNFS) + if !ok { continue } - sort.Strings(deviceList) - i := sort.SearchStrings(deviceList, m.Device) - if i < len(deviceList) && deviceList[i] == m.Device { - log.Debugf("Skipping duplicate device entry %q", m.Device) - } else { - deviceList = append(deviceList, m.Device) - c.updateNFSStats(ch, m.Device, stats) + var mountAddress string + if idx < len(mountsInfo) { + // The mount entry order in the /proc/self/mountstats and /proc/self/mountinfo is the same. + miStats := mountsInfo[idx] + mountAddress = miStats.SuperOptions["addr"] + } + + deviceIdentifier := nfsDeviceIdentifier{m.Device, stats.Transport.Protocol, mountAddress} + i := deviceList[deviceIdentifier] + if i { + level.Debug(c.logger).Log("msg", "Skipping duplicate device entry", "device", deviceIdentifier) + continue } + + deviceList[deviceIdentifier] = true + c.updateNFSStats(ch, stats, m.Device, stats.Transport.Protocol, mountAddress) } return nil } -func (c *mountStatsCollector) updateNFSStats(ch chan<- prometheus.Metric, export string, s *procfs.MountStatsNFS) { +func (c *mountStatsCollector) updateNFSStats(ch chan<- prometheus.Metric, s *procfs.MountStatsNFS, export, protocol, mountAddress string) { + labelValues := []string{export, protocol, mountAddress} ch <- prometheus.MustNewConstMetric( c.NFSAgeSecondsTotal, prometheus.CounterValue, s.Age.Seconds(), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSReadBytesTotal, prometheus.CounterValue, float64(s.Bytes.Read), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSWriteBytesTotal, prometheus.CounterValue, float64(s.Bytes.Write), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSDirectReadBytesTotal, prometheus.CounterValue, float64(s.Bytes.DirectRead), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSDirectWriteBytesTotal, prometheus.CounterValue, float64(s.Bytes.DirectWrite), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTotalReadBytesTotal, prometheus.CounterValue, float64(s.Bytes.ReadTotal), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTotalWriteBytesTotal, prometheus.CounterValue, float64(s.Bytes.WriteTotal), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSReadPagesTotal, prometheus.CounterValue, float64(s.Bytes.ReadPages), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSWritePagesTotal, prometheus.CounterValue, float64(s.Bytes.WritePages), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportBindTotal, prometheus.CounterValue, float64(s.Transport.Bind), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportConnectTotal, prometheus.CounterValue, float64(s.Transport.Connect), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportIdleTimeSeconds, prometheus.GaugeValue, - s.Transport.IdleTime.Seconds(), - export, + float64(s.Transport.IdleTimeSeconds%float64Mantissa), + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportSendsTotal, prometheus.CounterValue, float64(s.Transport.Sends), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportReceivesTotal, prometheus.CounterValue, float64(s.Transport.Receives), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportBadTransactionIDsTotal, prometheus.CounterValue, float64(s.Transport.BadTransactionIDs), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportBacklogQueueTotal, prometheus.CounterValue, float64(s.Transport.CumulativeBacklog), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportMaximumRPCSlots, prometheus.GaugeValue, float64(s.Transport.MaximumRPCSlotsUsed), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportSendingQueueTotal, prometheus.CounterValue, float64(s.Transport.CumulativeSendingQueue), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportPendingQueueTotal, prometheus.CounterValue, float64(s.Transport.CumulativePendingQueue), - export, + labelValues..., ) for _, op := range s.Operations { + opLabelValues := []string{export, protocol, mountAddress, op.Operation} + ch <- prometheus.MustNewConstMetric( c.NFSOperationsRequestsTotal, prometheus.CounterValue, float64(op.Requests), - export, - op.Operation, + opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsTransmissionsTotal, prometheus.CounterValue, float64(op.Transmissions), - export, - op.Operation, + opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsMajorTimeoutsTotal, prometheus.CounterValue, float64(op.MajorTimeouts), - export, - op.Operation, + opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsSentBytesTotal, prometheus.CounterValue, float64(op.BytesSent), - export, - op.Operation, + opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsReceivedBytesTotal, prometheus.CounterValue, float64(op.BytesReceived), - export, - op.Operation, + opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsQueueTimeSecondsTotal, prometheus.CounterValue, - op.CumulativeQueueTime.Seconds(), - export, - op.Operation, + float64(op.CumulativeQueueMilliseconds%float64Mantissa)/1000.0, + opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsResponseTimeSecondsTotal, prometheus.CounterValue, - op.CumulativeTotalResponseTime.Seconds(), - export, - op.Operation, + float64(op.CumulativeTotalResponseMilliseconds%float64Mantissa)/1000.0, + opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsRequestTimeSecondsTotal, prometheus.CounterValue, - op.CumulativeTotalRequestTime.Seconds(), - export, - op.Operation, + float64(op.CumulativeTotalRequestMilliseconds%float64Mantissa)/1000.0, + opLabelValues..., ) } @@ -722,181 +751,181 @@ func (c *mountStatsCollector) updateNFSStats(ch chan<- prometheus.Metric, export c.NFSEventInodeRevalidateTotal, prometheus.CounterValue, float64(s.Events.InodeRevalidate), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventDnodeRevalidateTotal, prometheus.CounterValue, float64(s.Events.DnodeRevalidate), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventDataInvalidateTotal, prometheus.CounterValue, float64(s.Events.DataInvalidate), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventAttributeInvalidateTotal, prometheus.CounterValue, float64(s.Events.AttributeInvalidate), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSOpenTotal, prometheus.CounterValue, float64(s.Events.VFSOpen), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSLookupTotal, prometheus.CounterValue, float64(s.Events.VFSLookup), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSAccessTotal, prometheus.CounterValue, float64(s.Events.VFSAccess), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSUpdatePageTotal, prometheus.CounterValue, float64(s.Events.VFSUpdatePage), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSReadPageTotal, prometheus.CounterValue, float64(s.Events.VFSReadPage), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSReadPagesTotal, prometheus.CounterValue, float64(s.Events.VFSReadPages), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSWritePageTotal, prometheus.CounterValue, float64(s.Events.VFSWritePage), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSWritePagesTotal, prometheus.CounterValue, float64(s.Events.VFSWritePages), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSGetdentsTotal, prometheus.CounterValue, float64(s.Events.VFSGetdents), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSSetattrTotal, prometheus.CounterValue, float64(s.Events.VFSSetattr), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSFlushTotal, prometheus.CounterValue, float64(s.Events.VFSFlush), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSFsyncTotal, prometheus.CounterValue, float64(s.Events.VFSFsync), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSLockTotal, prometheus.CounterValue, float64(s.Events.VFSLock), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSFileReleaseTotal, prometheus.CounterValue, float64(s.Events.VFSFileRelease), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventTruncationTotal, prometheus.CounterValue, float64(s.Events.Truncation), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventWriteExtensionTotal, prometheus.CounterValue, float64(s.Events.WriteExtension), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventSillyRenameTotal, prometheus.CounterValue, float64(s.Events.SillyRename), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventShortReadTotal, prometheus.CounterValue, float64(s.Events.ShortRead), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventShortWriteTotal, prometheus.CounterValue, float64(s.Events.ShortWrite), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventJukeboxDelayTotal, prometheus.CounterValue, float64(s.Events.JukeboxDelay), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventPNFSReadTotal, prometheus.CounterValue, float64(s.Events.PNFSRead), - export, + labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventPNFSWriteTotal, prometheus.CounterValue, float64(s.Events.PNFSWrite), - export, + labelValues..., ) } diff --git a/collector/netclass_linux.go b/collector/netclass_linux.go new file mode 100644 index 0000000000..a1a3fa72d8 --- /dev/null +++ b/collector/netclass_linux.go @@ -0,0 +1,204 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetclass && linux +// +build !nonetclass,linux + +package collector + +import ( + "errors" + "fmt" + "os" + "regexp" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + netclassIgnoredDevices = kingpin.Flag("collector.netclass.ignored-devices", "Regexp of net devices to ignore for netclass collector.").Default("^$").String() + netclassInvalidSpeed = kingpin.Flag("collector.netclass.ignore-invalid-speed", "Ignore devices where the speed is invalid. This will be the default behavior in 2.x.").Bool() +) + +type netClassCollector struct { + fs sysfs.FS + subsystem string + ignoredDevicesPattern *regexp.Regexp + metricDescs map[string]*prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("netclass", defaultEnabled, NewNetClassCollector) +} + +// NewNetClassCollector returns a new Collector exposing network class stats. +func NewNetClassCollector(logger log.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + pattern := regexp.MustCompile(*netclassIgnoredDevices) + return &netClassCollector{ + fs: fs, + subsystem: "network", + ignoredDevicesPattern: pattern, + metricDescs: map[string]*prometheus.Desc{}, + logger: logger, + }, nil +} + +func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error { + netClass, err := c.getNetClassInfo() + if err != nil { + if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) { + level.Debug(c.logger).Log("msg", "Could not read netclass file", "err", err) + return ErrNoData + } + return fmt.Errorf("could not get net class info: %w", err) + } + for _, ifaceInfo := range netClass { + upDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "up"), + "Value is 1 if operstate is 'up', 0 otherwise.", + []string{"device"}, + nil, + ) + upValue := 0.0 + if ifaceInfo.OperState == "up" { + upValue = 1.0 + } + + ch <- prometheus.MustNewConstMetric(upDesc, prometheus.GaugeValue, upValue, ifaceInfo.Name) + + infoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "Non-numeric data from /sys/class/net/, value is always 1.", + []string{"device", "address", "broadcast", "duplex", "operstate", "ifalias"}, + nil, + ) + infoValue := 1.0 + + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, ifaceInfo.Name, ifaceInfo.Address, ifaceInfo.Broadcast, ifaceInfo.Duplex, ifaceInfo.OperState, ifaceInfo.IfAlias) + + if ifaceInfo.AddrAssignType != nil { + pushMetric(ch, c.subsystem, "address_assign_type", *ifaceInfo.AddrAssignType, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.Carrier != nil { + pushMetric(ch, c.subsystem, "carrier", *ifaceInfo.Carrier, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.CarrierChanges != nil { + pushMetric(ch, c.subsystem, "carrier_changes_total", *ifaceInfo.CarrierChanges, ifaceInfo.Name, prometheus.CounterValue) + } + + if ifaceInfo.CarrierUpCount != nil { + pushMetric(ch, c.subsystem, "carrier_up_changes_total", *ifaceInfo.CarrierUpCount, ifaceInfo.Name, prometheus.CounterValue) + } + + if ifaceInfo.CarrierDownCount != nil { + pushMetric(ch, c.subsystem, "carrier_down_changes_total", *ifaceInfo.CarrierDownCount, ifaceInfo.Name, prometheus.CounterValue) + } + + if ifaceInfo.DevID != nil { + pushMetric(ch, c.subsystem, "device_id", *ifaceInfo.DevID, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.Dormant != nil { + pushMetric(ch, c.subsystem, "dormant", *ifaceInfo.Dormant, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.Flags != nil { + pushMetric(ch, c.subsystem, "flags", *ifaceInfo.Flags, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.IfIndex != nil { + pushMetric(ch, c.subsystem, "iface_id", *ifaceInfo.IfIndex, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.IfLink != nil { + pushMetric(ch, c.subsystem, "iface_link", *ifaceInfo.IfLink, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.LinkMode != nil { + pushMetric(ch, c.subsystem, "iface_link_mode", *ifaceInfo.LinkMode, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.MTU != nil { + pushMetric(ch, c.subsystem, "mtu_bytes", *ifaceInfo.MTU, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.NameAssignType != nil { + pushMetric(ch, c.subsystem, "name_assign_type", *ifaceInfo.NameAssignType, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.NetDevGroup != nil { + pushMetric(ch, c.subsystem, "net_dev_group", *ifaceInfo.NetDevGroup, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.Speed != nil { + // Some devices return -1 if the speed is unknown. + if *ifaceInfo.Speed >= 0 || !*netclassInvalidSpeed { + speedBytes := int64(*ifaceInfo.Speed * 1000 * 1000 / 8) + pushMetric(ch, c.subsystem, "speed_bytes", speedBytes, ifaceInfo.Name, prometheus.GaugeValue) + } + } + + if ifaceInfo.TxQueueLen != nil { + pushMetric(ch, c.subsystem, "transmit_queue_length", *ifaceInfo.TxQueueLen, ifaceInfo.Name, prometheus.GaugeValue) + } + + if ifaceInfo.Type != nil { + pushMetric(ch, c.subsystem, "protocol_type", *ifaceInfo.Type, ifaceInfo.Name, prometheus.GaugeValue) + } + } + + return nil +} + +func pushMetric(ch chan<- prometheus.Metric, subsystem string, name string, value int64, ifaceName string, valueType prometheus.ValueType) { + fieldDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, name), + fmt.Sprintf("%s value of /sys/class/net/.", name), + []string{"device"}, + nil, + ) + + ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, float64(value), ifaceName) +} + +func (c *netClassCollector) getNetClassInfo() (sysfs.NetClass, error) { + netClass := sysfs.NetClass{} + netDevices, err := c.fs.NetClassDevices() + if err != nil { + return netClass, err + } + + for _, device := range netDevices { + if c.ignoredDevicesPattern.MatchString(device) { + continue + } + interfaceClass, err := c.fs.NetClassByIface(device) + if err != nil { + return netClass, err + } + netClass[device] = *interfaceClass + } + + return netClass, nil +} diff --git a/collector/netdev_bsd.go b/collector/netdev_bsd.go index d846daf501..d472b2dfeb 100644 --- a/collector/netdev_bsd.go +++ b/collector/netdev_bsd.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonetdev && (freebsd || dragonfly) // +build !nonetdev // +build freebsd dragonfly @@ -18,10 +19,9 @@ package collector import ( "errors" - "regexp" - "strconv" - "github.com/prometheus/common/log" + "github.com/go-kit/log" + "github.com/go-kit/log/level" ) /* @@ -34,8 +34,8 @@ import ( */ import "C" -func getNetDevStats(ignore *regexp.Regexp) (map[string]map[string]string, error) { - netDev := map[string]map[string]string{} +func getNetDevStats(filter *netDevFilter, logger log.Logger) (netDevStats, error) { + netDev := netDevStats{} var ifap, ifa *C.struct_ifaddrs if C.getifaddrs(&ifap) == -1 { @@ -44,33 +44,31 @@ func getNetDevStats(ignore *regexp.Regexp) (map[string]map[string]string, error) defer C.freeifaddrs(ifap) for ifa = ifap; ifa != nil; ifa = ifa.ifa_next { - if ifa.ifa_addr.sa_family == C.AF_LINK { - dev := C.GoString(ifa.ifa_name) - if ignore.MatchString(dev) { - log.Debugf("Ignoring device: %s", dev) - continue - } + if ifa.ifa_addr.sa_family != C.AF_LINK { + continue + } + + dev := C.GoString(ifa.ifa_name) + if filter.ignored(dev) { + level.Debug(logger).Log("msg", "Ignoring device", "device", dev) + continue + } - devStats := map[string]string{} - data := (*C.struct_if_data)(ifa.ifa_data) + data := (*C.struct_if_data)(ifa.ifa_data) - devStats["receive_packets"] = convertFreeBSDCPUTime(uint64(data.ifi_ipackets)) - devStats["transmit_packets"] = convertFreeBSDCPUTime(uint64(data.ifi_opackets)) - devStats["receive_errs"] = convertFreeBSDCPUTime(uint64(data.ifi_ierrors)) - devStats["transmit_errs"] = convertFreeBSDCPUTime(uint64(data.ifi_oerrors)) - devStats["receive_bytes"] = convertFreeBSDCPUTime(uint64(data.ifi_ibytes)) - devStats["transmit_bytes"] = convertFreeBSDCPUTime(uint64(data.ifi_obytes)) - devStats["receive_multicast"] = convertFreeBSDCPUTime(uint64(data.ifi_imcasts)) - devStats["transmit_multicast"] = convertFreeBSDCPUTime(uint64(data.ifi_omcasts)) - devStats["receive_drop"] = convertFreeBSDCPUTime(uint64(data.ifi_iqdrops)) - devStats["transmit_drop"] = convertFreeBSDCPUTime(uint64(data.ifi_oqdrops)) - netDev[dev] = devStats + netDev[dev] = map[string]uint64{ + "receive_packets": uint64(data.ifi_ipackets), + "transmit_packets": uint64(data.ifi_opackets), + "receive_errs": uint64(data.ifi_ierrors), + "transmit_errs": uint64(data.ifi_oerrors), + "receive_bytes": uint64(data.ifi_ibytes), + "transmit_bytes": uint64(data.ifi_obytes), + "receive_multicast": uint64(data.ifi_imcasts), + "transmit_multicast": uint64(data.ifi_omcasts), + "receive_drop": uint64(data.ifi_iqdrops), + "transmit_drop": uint64(data.ifi_oqdrops), } } return netDev, nil } - -func convertFreeBSDCPUTime(counter uint64) string { - return strconv.FormatUint(counter, 10) -} diff --git a/collector/netdev_bsd_test.go b/collector/netdev_bsd_test.go deleted file mode 100644 index a661c1cc0e..0000000000 --- a/collector/netdev_bsd_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !nonetdev -// +build freebsd dragonfly - -package collector - -import "testing" - -type uintToStringTest struct { - in uint64 - out string -} - -var uinttostringtests = []uintToStringTest{ - // Copied base10 values from strconv's tests: - {0, "0"}, - {1, "1"}, - {12345678, "12345678"}, - {1<<31 - 1, "2147483647"}, - {1 << 31, "2147483648"}, - {1<<31 + 1, "2147483649"}, - {1<<32 - 1, "4294967295"}, - {1 << 32, "4294967296"}, - {1<<32 + 1, "4294967297"}, - {1 << 50, "1125899906842624"}, - {1<<63 - 1, "9223372036854775807"}, - - // Some values that convert correctly on amd64, but not on i386. - {0x1bf0c640a, "7500227594"}, - {0xbee5df75, "3202735989"}, -} - -func TestUintToString(t *testing.T) { - for _, test := range uinttostringtests { - is := convertFreeBSDCPUTime(test.in) - if is != test.out { - t.Errorf("convertFreeBSDCPUTime(%v) = %v want %v", - test.in, is, test.out) - } - } -} diff --git a/collector/netdev_common.go b/collector/netdev_common.go index dee69e13a0..0b40b99284 100644 --- a/collector/netdev_common.go +++ b/collector/netdev_common.go @@ -11,67 +11,167 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonetdev && (linux || freebsd || openbsd || dragonfly || darwin) // +build !nonetdev // +build linux freebsd openbsd dragonfly darwin package collector import ( + "errors" "fmt" - "regexp" + "net" "strconv" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) var ( - netdevIgnoredDevices = kingpin.Flag("collector.netdev.ignored-devices", "Regexp of net devices to ignore for netdev collector.").Default("^$").String() + netdevDeviceInclude = kingpin.Flag("collector.netdev.device-include", "Regexp of net devices to include (mutually exclusive to device-exclude).").String() + oldNetdevDeviceInclude = kingpin.Flag("collector.netdev.device-whitelist", "DEPRECATED: Use collector.netdev.device-include").Hidden().String() + netdevDeviceExclude = kingpin.Flag("collector.netdev.device-exclude", "Regexp of net devices to exclude (mutually exclusive to device-include).").String() + oldNetdevDeviceExclude = kingpin.Flag("collector.netdev.device-blacklist", "DEPRECATED: Use collector.netdev.device-exclude").Hidden().String() + netdevAddressInfo = kingpin.Flag("collector.netdev.address-info", "Collect address-info for every device").Bool() ) type netDevCollector struct { - subsystem string - ignoredDevicesPattern *regexp.Regexp - metricDescs map[string]*prometheus.Desc + subsystem string + deviceFilter netDevFilter + metricDescs map[string]*prometheus.Desc + logger log.Logger } +type netDevStats map[string]map[string]uint64 + func init() { registerCollector("netdev", defaultEnabled, NewNetDevCollector) } // NewNetDevCollector returns a new Collector exposing network device stats. -func NewNetDevCollector() (Collector, error) { - pattern := regexp.MustCompile(*netdevIgnoredDevices) +func NewNetDevCollector(logger log.Logger) (Collector, error) { + if *oldNetdevDeviceInclude != "" { + if *netdevDeviceInclude == "" { + level.Warn(logger).Log("msg", "--collector.netdev.device-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-include") + *netdevDeviceInclude = *oldNetdevDeviceInclude + } else { + return nil, errors.New("--collector.netdev.device-whitelist and --collector.netdev.device-include are mutually exclusive") + } + } + + if *oldNetdevDeviceExclude != "" { + if *netdevDeviceExclude == "" { + level.Warn(logger).Log("msg", "--collector.netdev.device-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-exclude") + *netdevDeviceExclude = *oldNetdevDeviceExclude + } else { + return nil, errors.New("--collector.netdev.device-blacklist and --collector.netdev.device-exclude are mutually exclusive") + } + } + + if *netdevDeviceExclude != "" && *netdevDeviceInclude != "" { + return nil, errors.New("device-exclude & device-include are mutually exclusive") + } + + if *netdevDeviceExclude != "" { + level.Info(logger).Log("msg", "Parsed flag --collector.netdev.device-exclude", "flag", *netdevDeviceExclude) + } + + if *netdevDeviceInclude != "" { + level.Info(logger).Log("msg", "Parsed Flag --collector.netdev.device-include", "flag", *netdevDeviceInclude) + } + return &netDevCollector{ - subsystem: "network", - ignoredDevicesPattern: pattern, - metricDescs: map[string]*prometheus.Desc{}, + subsystem: "network", + deviceFilter: newNetDevFilter(*netdevDeviceExclude, *netdevDeviceInclude), + metricDescs: map[string]*prometheus.Desc{}, + logger: logger, }, nil } func (c *netDevCollector) Update(ch chan<- prometheus.Metric) error { - netDev, err := getNetDevStats(c.ignoredDevicesPattern) + netDev, err := getNetDevStats(&c.deviceFilter, c.logger) if err != nil { - return fmt.Errorf("couldn't get netstats: %s", err) + return fmt.Errorf("couldn't get netstats: %w", err) } for dev, devStats := range netDev { for key, value := range devStats { desc, ok := c.metricDescs[key] if !ok { desc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, c.subsystem, key), + prometheus.BuildFQName(namespace, c.subsystem, key+"_total"), fmt.Sprintf("Network device statistic %s.", key), []string{"device"}, nil, ) c.metricDescs[key] = desc } - v, err := strconv.ParseFloat(value, 64) + ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, float64(value), dev) + } + } + if *netdevAddressInfo { + interfaces, err := net.Interfaces() + if err != nil { + return fmt.Errorf("could not get network interfaces: %w", err) + } + + desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, "network_address", + "info"), "node network address by device", + []string{"device", "address", "netmask", "scope"}, nil) + + for _, addr := range getAddrsInfo(interfaces) { + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, 1, + addr.device, addr.addr, addr.netmask, addr.scope) + } + } + return nil +} + +type addrInfo struct { + device string + addr string + scope string + netmask string +} + +func scope(ip net.IP) string { + if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() { + return "link-local" + } + + if ip.IsInterfaceLocalMulticast() { + return "interface-local" + } + + if ip.IsGlobalUnicast() { + return "global" + } + + return "" +} + +// getAddrsInfo returns interface name, address, scope and netmask for all interfaces. +func getAddrsInfo(interfaces []net.Interface) []addrInfo { + var res []addrInfo + + for _, ifs := range interfaces { + addrs, _ := ifs.Addrs() + for _, addr := range addrs { + ip, ipNet, err := net.ParseCIDR(addr.String()) if err != nil { - return fmt.Errorf("invalid value %s in netstats: %s", value, err) + continue } - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, dev) + size, _ := ipNet.Mask.Size() + + res = append(res, addrInfo{ + device: ifs.Name, + addr: ip.String(), + scope: scope(ip), + netmask: strconv.Itoa(size), + }) } } - return nil + + return res } diff --git a/collector/netdev_darwin.go b/collector/netdev_darwin.go index 9e7a14b54a..0f83a60665 100644 --- a/collector/netdev_darwin.go +++ b/collector/netdev_darwin.go @@ -11,57 +11,106 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonetdev // +build !nonetdev package collector import ( - "errors" - "regexp" - "strconv" + "bytes" + "encoding/binary" + "fmt" + "net" - "github.com/prometheus/common/log" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "golang.org/x/sys/unix" ) -/* -#include -#include -#include -#include -#include -*/ -import "C" +func getNetDevStats(filter *netDevFilter, logger log.Logger) (netDevStats, error) { + netDev := netDevStats{} -func getNetDevStats(ignore *regexp.Regexp) (map[string]map[string]string, error) { - netDev := map[string]map[string]string{} - - var ifap, ifa *C.struct_ifaddrs - if C.getifaddrs(&ifap) == -1 { - return nil, errors.New("getifaddrs() failed") + ifs, err := net.Interfaces() + if err != nil { + return nil, fmt.Errorf("net.Interfaces() failed: %w", err) } - defer C.freeifaddrs(ifap) - for ifa = ifap; ifa != nil; ifa = ifa.ifa_next { - if ifa.ifa_addr.sa_family == C.AF_LINK { - dev := C.GoString(ifa.ifa_name) - if ignore.MatchString(dev) { - log.Debugf("Ignoring device: %s", dev) - continue - } + for _, iface := range ifs { + if filter.ignored(iface.Name) { + level.Debug(logger).Log("msg", "Ignoring device", "device", iface.Name) + continue + } + + ifaceData, err := getIfaceData(iface.Index) + if err != nil { + level.Debug(logger).Log("msg", "failed to load data for interface", "device", iface.Name, "err", err) + continue + } - devStats := map[string]string{} - data := (*C.struct_if_data)(ifa.ifa_data) - devStats["receive_packets"] = strconv.FormatUint(uint64(data.ifi_ipackets), 10) - devStats["transmit_packets"] = strconv.FormatUint(uint64(data.ifi_opackets), 10) - devStats["receive_errs"] = strconv.FormatUint(uint64(data.ifi_ierrors), 10) - devStats["transmit_errs"] = strconv.FormatUint(uint64(data.ifi_oerrors), 10) - devStats["receive_bytes"] = strconv.FormatUint(uint64(data.ifi_ibytes), 10) - devStats["transmit_bytes"] = strconv.FormatUint(uint64(data.ifi_obytes), 10) - devStats["receive_multicast"] = strconv.FormatUint(uint64(data.ifi_imcasts), 10) - devStats["transmit_multicast"] = strconv.FormatUint(uint64(data.ifi_omcasts), 10) - netDev[dev] = devStats + netDev[iface.Name] = map[string]uint64{ + "receive_packets": ifaceData.Data.Ipackets, + "transmit_packets": ifaceData.Data.Opackets, + "receive_errs": ifaceData.Data.Ierrors, + "transmit_errs": ifaceData.Data.Oerrors, + "receive_bytes": ifaceData.Data.Ibytes, + "transmit_bytes": ifaceData.Data.Obytes, + "receive_multicast": ifaceData.Data.Imcasts, + "transmit_multicast": ifaceData.Data.Omcasts, } } return netDev, nil } + +func getIfaceData(index int) (*ifMsghdr2, error) { + var data ifMsghdr2 + rawData, err := unix.SysctlRaw("net", unix.AF_ROUTE, 0, 0, unix.NET_RT_IFLIST2, index) + if err != nil { + return nil, err + } + err = binary.Read(bytes.NewReader(rawData), binary.LittleEndian, &data) + return &data, err +} + +type ifMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + SndLen int32 + SndMaxlen int32 + SndDrops int32 + Timer int32 + Data ifData64 +} + +type ifData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange unix.Timeval32 +} diff --git a/collector/netdev_filter.go b/collector/netdev_filter.go new file mode 100644 index 0000000000..e809c499e3 --- /dev/null +++ b/collector/netdev_filter.go @@ -0,0 +1,41 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "regexp" +) + +type netDevFilter struct { + ignorePattern *regexp.Regexp + acceptPattern *regexp.Regexp +} + +func newNetDevFilter(ignoredPattern, acceptPattern string) (f netDevFilter) { + if ignoredPattern != "" { + f.ignorePattern = regexp.MustCompile(ignoredPattern) + } + + if acceptPattern != "" { + f.acceptPattern = regexp.MustCompile(acceptPattern) + } + + return +} + +// ignores returns whether the device should be ignored +func (f *netDevFilter) ignored(name string) bool { + return ((f.ignorePattern != nil && f.ignorePattern.MatchString(name)) || + (f.acceptPattern != nil && !f.acceptPattern.MatchString(name))) +} diff --git a/collector/netdev_filter_test.go b/collector/netdev_filter_test.go new file mode 100644 index 0000000000..13cebbc68a --- /dev/null +++ b/collector/netdev_filter_test.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "testing" +) + +func TestNetDevFilter(t *testing.T) { + tests := []struct { + ignore string + accept string + name string + expectedResult bool + }{ + {"", "", "eth0", false}, + {"", "^💩0$", "💩0", false}, + {"", "^💩0$", "💩1", true}, + {"", "^💩0$", "veth0", true}, + {"^💩", "", "💩3", true}, + {"^💩", "", "veth0", false}, + } + + for _, test := range tests { + filter := newNetDevFilter(test.ignore, test.accept) + result := filter.ignored(test.name) + + if result != test.expectedResult { + t.Errorf("ignorePattern=%v acceptPattern=%v ifname=%v expected=%v result=%v", test.ignore, test.accept, test.name, test.expectedResult, result) + } + } +} diff --git a/collector/netdev_linux.go b/collector/netdev_linux.go index 82657a0bf0..e825db0855 100644 --- a/collector/netdev_linux.go +++ b/collector/netdev_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonetdev // +build !nonetdev package collector @@ -21,26 +22,29 @@ import ( "io" "os" "regexp" + "strconv" "strings" - "github.com/prometheus/common/log" + "github.com/go-kit/log" + "github.com/go-kit/log/level" ) var ( - procNetDevFieldSep = regexp.MustCompile("[ :] *") + procNetDevInterfaceRE = regexp.MustCompile(`^(.+): *(.+)$`) + procNetDevFieldSep = regexp.MustCompile(` +`) ) -func getNetDevStats(ignore *regexp.Regexp) (map[string]map[string]string, error) { +func getNetDevStats(filter *netDevFilter, logger log.Logger) (netDevStats, error) { file, err := os.Open(procFilePath("net/dev")) if err != nil { return nil, err } defer file.Close() - return parseNetDevStats(file, ignore) + return parseNetDevStats(file, filter, logger) } -func parseNetDevStats(r io.Reader, ignore *regexp.Regexp) (map[string]map[string]string, error) { +func parseNetDevStats(r io.Reader, filter *netDevFilter, logger log.Logger) (netDevStats, error) { scanner := bufio.NewScanner(r) scanner.Scan() // skip first header scanner.Scan() @@ -50,25 +54,49 @@ func parseNetDevStats(r io.Reader, ignore *regexp.Regexp) (map[string]map[string scanner.Text()) } - header := strings.Fields(parts[1]) - netDev := map[string]map[string]string{} + receiveHeader := strings.Fields(parts[1]) + transmitHeader := strings.Fields(parts[2]) + headerLength := len(receiveHeader) + len(transmitHeader) + + netDev := netDevStats{} for scanner.Scan() { line := strings.TrimLeft(scanner.Text(), " ") - parts := procNetDevFieldSep.Split(line, -1) - if len(parts) != 2*len(header)+1 { - return nil, fmt.Errorf("invalid line in net/dev: %s", scanner.Text()) + parts := procNetDevInterfaceRE.FindStringSubmatch(line) + if len(parts) != 3 { + return nil, fmt.Errorf("couldn't get interface name, invalid line in net/dev: %q", line) } - dev := parts[0][:len(parts[0])] - if ignore.MatchString(dev) { - log.Debugf("Ignoring device: %s", dev) + dev := parts[1] + if filter.ignored(dev) { + level.Debug(logger).Log("msg", "Ignoring device", "device", dev) continue } - netDev[dev] = map[string]string{} - for i, v := range header { - netDev[dev]["receive_"+v] = parts[i+1] - netDev[dev]["transmit_"+v] = parts[i+1+len(header)] + + values := procNetDevFieldSep.Split(strings.TrimLeft(parts[2], " "), -1) + if len(values) != headerLength { + return nil, fmt.Errorf("couldn't get values, invalid line in net/dev: %q", parts[2]) + } + + devStats := map[string]uint64{} + addStats := func(key, value string) { + v, err := strconv.ParseUint(value, 0, 64) + if err != nil { + level.Debug(logger).Log("msg", "invalid value in netstats", "key", key, "value", value, "err", err) + return + } + + devStats[key] = v + } + + for i := 0; i < len(receiveHeader); i++ { + addStats("receive_"+receiveHeader[i], values[i]) } + + for i := 0; i < len(transmitHeader); i++ { + addStats("transmit_"+transmitHeader[i], values[i+len(receiveHeader)]) + } + + netDev[dev] = devStats } return netDev, scanner.Err() } diff --git a/collector/netdev_linux_test.go b/collector/netdev_linux_test.go index 2adc12de32..8e227a2463 100644 --- a/collector/netdev_linux_test.go +++ b/collector/netdev_linux_test.go @@ -15,39 +15,71 @@ package collector import ( "os" - "regexp" "testing" + + "github.com/go-kit/log" ) -func TestNetDevStats(t *testing.T) { +func TestNetDevStatsIgnore(t *testing.T) { file, err := os.Open("fixtures/proc/net/dev") if err != nil { t.Fatal(err) } defer file.Close() - netStats, err := parseNetDevStats(file, regexp.MustCompile("^veth")) + filter := newNetDevFilter("^veth", "") + + netStats, err := parseNetDevStats(file, &filter, log.NewNopLogger()) if err != nil { t.Fatal(err) } - if want, got := "10437182923", netStats["wlan0"]["receive_bytes"]; want != got { - t.Errorf("want netstat wlan0 bytes %s, got %s", want, got) + if want, got := uint64(10437182923), netStats["wlan0"]["receive_bytes"]; want != got { + t.Errorf("want netstat wlan0 bytes %v, got %v", want, got) } - if want, got := "68210035552", netStats["eth0"]["receive_bytes"]; want != got { - t.Errorf("want netstat eth0 bytes %s, got %s", want, got) + if want, got := uint64(68210035552), netStats["eth0"]["receive_bytes"]; want != got { + t.Errorf("want netstat eth0 bytes %v, got %v", want, got) } - if want, got := "934", netStats["tun0"]["transmit_packets"]; want != got { - t.Errorf("want netstat tun0 packets %s, got %s", want, got) + if want, got := uint64(934), netStats["tun0"]["transmit_packets"]; want != got { + t.Errorf("want netstat tun0 packets %v, got %v", want, got) } - if want, got := 6, len(netStats); want != got { + if want, got := 9, len(netStats); want != got { t.Errorf("want count of devices to be %d, got %d", want, got) } if _, ok := netStats["veth4B09XN"]["transmit_bytes"]; ok { t.Error("want fixture interface veth4B09XN to not exist, but it does") } + + if want, got := uint64(0), netStats["ibr10:30"]["receive_fifo"]; want != got { + t.Error("want fixture interface ibr10:30 to exist, but it does not") + } + + if want, got := uint64(72), netStats["💩0"]["receive_multicast"]; want != got { + t.Error("want fixture interface 💩0 to exist, but it does not") + } +} + +func TestNetDevStatsAccept(t *testing.T) { + file, err := os.Open("fixtures/proc/net/dev") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + filter := newNetDevFilter("", "^💩0$") + netStats, err := parseNetDevStats(file, &filter, log.NewNopLogger()) + if err != nil { + t.Fatal(err) + } + + if want, got := 1, len(netStats); want != got { + t.Errorf("want count of devices to be %d, got %d", want, got) + } + if want, got := uint64(72), netStats["💩0"]["receive_multicast"]; want != got { + t.Error("want fixture interface 💩0 to exist, but it does not") + } } diff --git a/collector/netdev_openbsd.go b/collector/netdev_openbsd.go index 0e9c900987..2be10a3d6c 100644 --- a/collector/netdev_openbsd.go +++ b/collector/netdev_openbsd.go @@ -11,16 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !nonetdev +//go:build openbsd && !amd64 && !nonetdev +// +build openbsd,!amd64,!nonetdev package collector import ( "errors" - "regexp" - "strconv" - "github.com/prometheus/common/log" + "github.com/go-kit/log" + "github.com/go-kit/log/level" ) /* @@ -31,8 +31,8 @@ import ( */ import "C" -func getNetDevStats(ignore *regexp.Regexp) (map[string]map[string]string, error) { - netDev := map[string]map[string]string{} +func getNetDevStats(filter *netDevFilter, logger log.Logger) (netDevStats, error) { + netDev := netDevStats{} var ifap, ifa *C.struct_ifaddrs if C.getifaddrs(&ifap) == -1 { @@ -41,26 +41,28 @@ func getNetDevStats(ignore *regexp.Regexp) (map[string]map[string]string, error) defer C.freeifaddrs(ifap) for ifa = ifap; ifa != nil; ifa = ifa.ifa_next { - if ifa.ifa_addr.sa_family == C.AF_LINK { - dev := C.GoString(ifa.ifa_name) - if ignore.MatchString(dev) { - log.Debugf("Ignoring device: %s", dev) - continue - } + if ifa.ifa_addr.sa_family != C.AF_LINK { + continue + } + + dev := C.GoString(ifa.ifa_name) + if filter.ignored(dev) { + level.Debug(logger).Log("msg", "Ignoring device", "device", dev) + continue + } - devStats := map[string]string{} - data := (*C.struct_if_data)(ifa.ifa_data) + data := (*C.struct_if_data)(ifa.ifa_data) - devStats["receive_packets"] = strconv.Itoa(int(data.ifi_ipackets)) - devStats["transmit_packets"] = strconv.Itoa(int(data.ifi_opackets)) - devStats["receive_errs"] = strconv.Itoa(int(data.ifi_ierrors)) - devStats["transmit_errs"] = strconv.Itoa(int(data.ifi_oerrors)) - devStats["receive_bytes"] = strconv.Itoa(int(data.ifi_ibytes)) - devStats["transmit_bytes"] = strconv.Itoa(int(data.ifi_obytes)) - devStats["receive_multicast"] = strconv.Itoa(int(data.ifi_imcasts)) - devStats["transmit_multicast"] = strconv.Itoa(int(data.ifi_omcasts)) - devStats["receive_drop"] = strconv.Itoa(int(data.ifi_iqdrops)) - netDev[dev] = devStats + netDev[dev] = map[string]uint64{ + "receive_packets": uint64(data.ifi_ipackets), + "transmit_packets": uint64(data.ifi_opackets), + "receive_errs": uint64(data.ifi_ierrors), + "transmit_errs": uint64(data.ifi_oerrors), + "receive_bytes": uint64(data.ifi_ibytes), + "transmit_bytes": uint64(data.ifi_obytes), + "receive_multicast": uint64(data.ifi_imcasts), + "transmit_multicast": uint64(data.ifi_omcasts), + "receive_drop": uint64(data.ifi_iqdrops), } } diff --git a/collector/netdev_openbsd_amd64.go b/collector/netdev_openbsd_amd64.go new file mode 100644 index 0000000000..8b2bfa0e73 --- /dev/null +++ b/collector/netdev_openbsd_amd64.go @@ -0,0 +1,74 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetdev +// +build !nonetdev + +package collector + +import ( + "github.com/go-kit/log" + "github.com/go-kit/log/level" + + "golang.org/x/sys/unix" + "unsafe" +) + +func getNetDevStats(filter *netDevFilter, logger log.Logger) (netDevStats, error) { + netDev := netDevStats{} + + mib := [6]_C_int{unix.CTL_NET, unix.AF_ROUTE, 0, 0, unix.NET_RT_IFLIST, 0} + buf, err := sysctl(mib[:]) + if err != nil { + return nil, err + } + n := uintptr(len(buf)) + index := uintptr(unsafe.Pointer(&buf[0])) + next := uintptr(0) + + var rtm *unix.RtMsghdr + + for next = index; next < (index + n); next += uintptr(rtm.Msglen) { + rtm = (*unix.RtMsghdr)(unsafe.Pointer(next)) + if rtm.Version != unix.RTM_VERSION || rtm.Type != unix.RTM_IFINFO { + continue + } + ifm := (*unix.IfMsghdr)(unsafe.Pointer(next)) + if ifm.Addrs&unix.RTA_IFP == 0 { + continue + } + dl := (*unix.RawSockaddrDatalink)(unsafe.Pointer(next + uintptr(rtm.Hdrlen))) + if dl.Family != unix.AF_LINK { + continue + } + data := ifm.Data + dev := int8ToString(dl.Data[:dl.Nlen]) + if filter.ignored(dev) { + level.Debug(logger).Log("msg", "Ignoring device", "device", dev) + continue + } + + netDev[dev] = map[string]uint64{ + "receive_packets": data.Ipackets, + "transmit_packets": data.Opackets, + "receive_errs": data.Ierrors, + "transmit_errs": data.Oerrors, + "receive_bytes": data.Ibytes, + "transmit_bytes": data.Obytes, + "receive_multicast": data.Imcasts, + "transmit_multicast": data.Omcasts, + "receive_drop": data.Iqdrops, + } + } + return netDev, nil +} diff --git a/collector/netstat_linux.go b/collector/netstat_linux.go index f16c8cf60a..a034ab6415 100644 --- a/collector/netstat_linux.go +++ b/collector/netstat_linux.go @@ -11,26 +11,38 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonetstat // +build !nonetstat package collector import ( "bufio" + "errors" "fmt" "io" "os" + "regexp" "strconv" "strings" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "gopkg.in/alecthomas/kingpin.v2" ) const ( netStatsSubsystem = "netstat" ) -type netStatCollector struct{} +var ( + netStatFields = kingpin.Flag("collector.netstat.fields", "Regexp of fields to return for netstat collector.").Default("^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans|TCPTimeouts)|Tcp_(ActiveOpens|InSegs|OutSegs|OutRsts|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$").String() +) + +type netStatCollector struct { + fieldPattern *regexp.Regexp + logger log.Logger +} func init() { registerCollector("netstat", defaultEnabled, NewNetStatCollector) @@ -38,22 +50,26 @@ func init() { // NewNetStatCollector takes and returns // a new Collector exposing network stats. -func NewNetStatCollector() (Collector, error) { - return &netStatCollector{}, nil +func NewNetStatCollector(logger log.Logger) (Collector, error) { + pattern := regexp.MustCompile(*netStatFields) + return &netStatCollector{ + fieldPattern: pattern, + logger: logger, + }, nil } func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error { netStats, err := getNetStats(procFilePath("net/netstat")) if err != nil { - return fmt.Errorf("couldn't get netstats: %s", err) + return fmt.Errorf("couldn't get netstats: %w", err) } snmpStats, err := getNetStats(procFilePath("net/snmp")) if err != nil { - return fmt.Errorf("couldn't get SNMP stats: %s", err) + return fmt.Errorf("couldn't get SNMP stats: %w", err) } snmp6Stats, err := getSNMP6Stats(procFilePath("net/snmp6")) if err != nil { - return fmt.Errorf("couldn't get SNMP6 stats: %s", err) + return fmt.Errorf("couldn't get SNMP6 stats: %w", err) } // Merge the results of snmpStats into netStats (collisions are possible, but // we know that the keys are always unique for the given use case). @@ -68,7 +84,10 @@ func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error { key := protocol + "_" + name v, err := strconv.ParseFloat(value, 64) if err != nil { - return fmt.Errorf("invalid value %s in netstats: %s", value, err) + return fmt.Errorf("invalid value %s in netstats: %w", value, err) + } + if !c.fieldPattern.MatchString(key) { + continue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( @@ -121,6 +140,12 @@ func parseNetStats(r io.Reader, fileName string) (map[string]map[string]string, func getSNMP6Stats(fileName string) (map[string]map[string]string, error) { file, err := os.Open(fileName) if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err } defer file.Close() diff --git a/collector/netstat_linux_test.go b/collector/netstat_linux_test.go index fde77a08dc..a27382b416 100644 --- a/collector/netstat_linux_test.go +++ b/collector/netstat_linux_test.go @@ -20,6 +20,7 @@ import ( func TestNetStats(t *testing.T) { testNetStats(t, "fixtures/proc/net/netstat") + testSNMPStats(t, "fixtures/proc/net/snmp") testSNMP6Stats(t, "fixtures/proc/net/snmp6") } @@ -44,6 +45,27 @@ func testNetStats(t *testing.T, fileName string) { } } +func testSNMPStats(t *testing.T, fileName string) { + file, err := os.Open(fileName) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + snmpStats, err := parseNetStats(file, fileName) + if err != nil { + t.Fatal(err) + } + + if want, got := "9", snmpStats["Udp"]["RcvbufErrors"]; want != got { + t.Errorf("want netstat Udp RcvbufErrors %s, got %s", want, got) + } + + if want, got := "8", snmpStats["Udp"]["SndbufErrors"]; want != got { + t.Errorf("want netstat Udp SndbufErrors %s, got %s", want, got) + } +} + func testSNMP6Stats(t *testing.T, fileName string) { file, err := os.Open(fileName) if err != nil { @@ -63,4 +85,12 @@ func testSNMP6Stats(t *testing.T, fileName string) { if want, got := "8", snmp6Stats["Icmp6"]["OutMsgs"]; want != got { t.Errorf("want netstat ICPM6 OutMsgs %s, got %s", want, got) } + + if want, got := "9", snmp6Stats["Udp6"]["RcvbufErrors"]; want != got { + t.Errorf("want netstat Udp6 RcvbufErrors %s, got %s", want, got) + } + + if want, got := "8", snmp6Stats["Udp6"]["SndbufErrors"]; want != got { + t.Errorf("want netstat Udp6 SndbufErrors %s, got %s", want, got) + } } diff --git a/collector/network_route_linux.go b/collector/network_route_linux.go new file mode 100644 index 0000000000..c77e175109 --- /dev/null +++ b/collector/network_route_linux.go @@ -0,0 +1,204 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonetworkroute +// +build !nonetworkroute + +package collector + +import ( + "fmt" + "golang.org/x/sys/unix" + "net" + "strconv" + + "github.com/go-kit/log" + "github.com/jsimonetti/rtnetlink" + "github.com/prometheus/client_golang/prometheus" +) + +type networkRouteCollector struct { + routeInfoDesc *prometheus.Desc + routesDesc *prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("network_route", defaultDisabled, NewNetworkRouteCollector) +} + +// NewNetworkRouteCollector returns a new Collector exposing systemd statistics. +func NewNetworkRouteCollector(logger log.Logger) (Collector, error) { + const subsystem = "network" + + routeInfoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "route_info"), + "network routing table information", []string{"device", "src", "dest", "gw", "priority", "proto", "weight"}, nil, + ) + routesDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "routes"), + "network routes by interface", []string{"device"}, nil, + ) + + return &networkRouteCollector{ + routeInfoDesc: routeInfoDesc, + routesDesc: routesDesc, + logger: logger, + }, nil +} + +func (n networkRouteCollector) Update(ch chan<- prometheus.Metric) error { + deviceRoutes := make(map[string]int) + + conn, err := rtnetlink.Dial(nil) + if err != nil { + return fmt.Errorf("couldn't connect rtnetlink: %w", err) + } + defer conn.Close() + + links, err := conn.Link.List() + if err != nil { + return fmt.Errorf("couldn't get links: %w", err) + } + + routes, err := conn.Route.List() + if err != nil { + return fmt.Errorf("couldn't get routes: %w", err) + } + + for _, route := range routes { + if route.Type != unix.RTA_DST { + continue + } + if len(route.Attributes.Multipath) != 0 { + for _, nextHop := range route.Attributes.Multipath { + ifName := "" + for _, link := range links { + if link.Index == nextHop.Hop.IfIndex { + ifName = link.Attributes.Name + break + } + } + + labels := []string{ + ifName, // if + networkRouteIPToString(route.Attributes.Src), // src + networkRouteIPWithPrefixToString(route.Attributes.Dst, route.DstLength), // dest + networkRouteIPToString(nextHop.Gateway), // gw + strconv.FormatUint(uint64(route.Attributes.Priority), 10), // priority(metrics) + networkRouteProtocolToString(route.Protocol), // proto + strconv.Itoa(int(nextHop.Hop.Hops) + 1), // weight + } + ch <- prometheus.MustNewConstMetric(n.routeInfoDesc, prometheus.GaugeValue, 1, labels...) + deviceRoutes[ifName]++ + } + } else { + ifName := "" + for _, link := range links { + if link.Index == route.Attributes.OutIface { + ifName = link.Attributes.Name + break + } + } + + labels := []string{ + ifName, // if + networkRouteIPToString(route.Attributes.Src), // src + networkRouteIPWithPrefixToString(route.Attributes.Dst, route.DstLength), // dest + networkRouteIPToString(route.Attributes.Gateway), // gw + strconv.FormatUint(uint64(route.Attributes.Priority), 10), // priority(metrics) + networkRouteProtocolToString(route.Protocol), // proto + "", // weight + } + ch <- prometheus.MustNewConstMetric(n.routeInfoDesc, prometheus.GaugeValue, 1, labels...) + deviceRoutes[ifName]++ + } + } + + for dev, total := range deviceRoutes { + ch <- prometheus.MustNewConstMetric(n.routesDesc, prometheus.GaugeValue, float64(total), dev) + } + + return nil +} + +func networkRouteIPWithPrefixToString(ip net.IP, len uint8) string { + if len == 0 { + return "default" + } + iplen := net.IPv4len + if ip.To4() == nil { + iplen = net.IPv6len + } + network := &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(len), iplen*8), + } + return network.String() +} + +func networkRouteIPToString(ip net.IP) string { + if len(ip) == 0 { + return "" + } + return ip.String() +} + +func networkRouteProtocolToString(protocol uint8) string { + // from linux kernel 'include/uapi/linux/rtnetlink.h' + switch protocol { + case 0: + return "unspec" + case 1: + return "redirect" + case 2: + return "kernel" + case 3: + return "boot" + case 4: + return "static" + case 8: + return "gated" + case 9: + return "ra" + case 10: + return "mrt" + case 11: + return "zebra" + case 12: + return "bird" + case 13: + return "dnrouted" + case 14: + return "xorp" + case 15: + return "ntk" + case 16: + return "dhcp" + case 17: + return "mrouted" + case 42: + return "babel" + case 186: + return "bgp" + case 187: + return "isis" + case 188: + return "ospf" + case 189: + return "rip" + case 192: + return "eigrp" + } + return "unknown" +} diff --git a/collector/nfs_linux.go b/collector/nfs_linux.go index d1d75d41ba..e08acdbad7 100644 --- a/collector/nfs_linux.go +++ b/collector/nfs_linux.go @@ -11,167 +11,165 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nonfs +// +build !nonfs + package collector import ( "errors" - "io/ioutil" + "fmt" "os" - "regexp" - "strconv" - "strings" + "reflect" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" + "github.com/prometheus/procfs/nfs" ) -var ( - netLineRE = regexp.MustCompile(`^net \d+ (\d+) (\d+) (\d+)$`) - rpcLineRE = regexp.MustCompile(`^rpc (\d+) (\d+) (\d+)$`) - procLineRE = regexp.MustCompile(`^proc(\d+) \d+ (\d+( \d+)*)$`) - - nfsProcedures = map[string][]string{ - "2": { - "null", "getattr", "setattr", "root", "lookup", - "readlink", "read", "writecache", "write", "create", - "remove", "rename", "link", "symlink", "mkdir", - "rmdir", "readdir", "statfs", - }, - "3": { - "null", "getattr", "setattr", "lookup", "access", - "readlink", "read", "write", "create", "mkdir", - "symlink", "mknod", "remove", "rmdir", "rename", - "link", "readdir", "readdirplus", "fsstat", "fsinfo", - "pathconf", "commit", - }, - "4": { - "null", "read", "write", "commit", "open", - "open_confirm", "open_noattr", "open_downgrade", - "close", "setattr", "fsinfo", "renew", "setclientid", - "setclientid_confirm", "lock", "lockt", "locku", - "access", "getattr", "lookup", "lookup_root", "remove", - "rename", "link", "symlink", "create", "pathconf", - "statfs", "readlink", "readdir", "server_caps", - "delegreturn", "getacl", "setacl", "fs_locations", - "release_lockowner", "secinfo", "fsid_present", - "exchange_id", "create_session", "destroy_session", - "sequence", "get_lease_time", "reclaim_complete", - "layoutget", "getdeviceinfo", "layoutcommit", - "layoutreturn", "secinfo_no_name", "test_stateid", - "free_stateid", "getdevicelist", - "bind_conn_to_session", "destroy_clientid", "seek", - "allocate", "deallocate", "layoutstats", "clone", - "copy", - }, - } - - nfsNetReadsDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "nfs", "net_reads"), - "Number of reads at the network layer.", - []string{"protocol"}, - nil, - ) - nfsNetConnectionsDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "nfs", "net_connections"), - "Number of connections at the network layer.", - []string{"protocol"}, - nil, - ) - - nfsRPCOperationsDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "nfs", "rpc_operations"), - "Number of RPCs performed.", - nil, - nil, - ) - nfsRPCRetransmissionsDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "nfs", "rpc_retransmissions"), - "Number of RPC transmissions performed.", - nil, - nil, - ) - nfsRPCAuthenticationRefreshesDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "nfs", "rpc_authentication_refreshes"), - "Number of RPC authentication refreshes performed.", - nil, - nil, - ) - - nfsProceduresDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "nfs", "procedures"), - "Number of NFS procedures invoked.", - []string{"version", "procedure"}, - nil, - ) +const ( + nfsSubsystem = "nfs" ) -type nfsCollector struct{} +type nfsCollector struct { + fs nfs.FS + nfsNetReadsDesc *prometheus.Desc + nfsNetConnectionsDesc *prometheus.Desc + nfsRPCOperationsDesc *prometheus.Desc + nfsRPCRetransmissionsDesc *prometheus.Desc + nfsRPCAuthenticationRefreshesDesc *prometheus.Desc + nfsProceduresDesc *prometheus.Desc + logger log.Logger +} func init() { - registerCollector("nfs", defaultDisabled, NewNfsCollector) + registerCollector("nfs", defaultEnabled, NewNfsCollector) } // NewNfsCollector returns a new Collector exposing NFS statistics. -func NewNfsCollector() (Collector, error) { - return &nfsCollector{}, nil +func NewNfsCollector(logger log.Logger) (Collector, error) { + fs, err := nfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &nfsCollector{ + fs: fs, + nfsNetReadsDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsSubsystem, "packets_total"), + "Total NFSd network packets (sent+received) by protocol type.", + []string{"protocol"}, + nil, + ), + nfsNetConnectionsDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsSubsystem, "connections_total"), + "Total number of NFSd TCP connections.", + nil, + nil, + ), + nfsRPCOperationsDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsSubsystem, "rpcs_total"), + "Total number of RPCs performed.", + nil, + nil, + ), + nfsRPCRetransmissionsDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsSubsystem, "rpc_retransmissions_total"), + "Number of RPC transmissions performed.", + nil, + nil, + ), + nfsRPCAuthenticationRefreshesDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsSubsystem, "rpc_authentication_refreshes_total"), + "Number of RPC authentication refreshes performed.", + nil, + nil, + ), + nfsProceduresDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsSubsystem, "requests_total"), + "Number of NFS procedures invoked.", + []string{"proto", "method"}, + nil, + ), + logger: logger, + }, nil } func (c *nfsCollector) Update(ch chan<- prometheus.Metric) error { - statsFile := procFilePath("net/rpc/nfs") - content, err := ioutil.ReadFile(statsFile) + stats, err := c.fs.ClientRPCStats() if err != nil { - if os.IsNotExist(err) { - log.Debugf("Not collecting NFS statistics, as %q does not exist", statsFile) - return nil + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "Not collecting NFS metrics", "err", err) + return ErrNoData } - return err + return fmt.Errorf("failed to retrieve nfs stats: %w", err) } - for _, line := range strings.Split(string(content), "\n") { - if fields := netLineRE.FindStringSubmatch(line); fields != nil { - value, _ := strconv.ParseFloat(fields[1], 64) - ch <- prometheus.MustNewConstMetric( - nfsNetReadsDesc, prometheus.CounterValue, - value, "udp") - - value, _ = strconv.ParseFloat(fields[2], 64) - ch <- prometheus.MustNewConstMetric( - nfsNetReadsDesc, prometheus.CounterValue, - value, "tcp") - - value, _ = strconv.ParseFloat(fields[3], 64) - ch <- prometheus.MustNewConstMetric( - nfsNetConnectionsDesc, prometheus.CounterValue, - value, "tcp") - } else if fields := rpcLineRE.FindStringSubmatch(line); fields != nil { - value, _ := strconv.ParseFloat(fields[1], 64) - ch <- prometheus.MustNewConstMetric( - nfsRPCOperationsDesc, - prometheus.CounterValue, value) - - value, _ = strconv.ParseFloat(fields[2], 64) - ch <- prometheus.MustNewConstMetric( - nfsRPCRetransmissionsDesc, - prometheus.CounterValue, value) - - value, _ = strconv.ParseFloat(fields[3], 64) - ch <- prometheus.MustNewConstMetric( - nfsRPCAuthenticationRefreshesDesc, - prometheus.CounterValue, value) - } else if fields := procLineRE.FindStringSubmatch(line); fields != nil { - version := fields[1] - for procedure, count := range strings.Split(fields[2], " ") { - value, _ := strconv.ParseFloat(count, 64) - ch <- prometheus.MustNewConstMetric( - nfsProceduresDesc, - prometheus.CounterValue, - value, - version, - nfsProcedures[version][procedure]) - } - } else if line != "" { - return errors.New("Failed to parse line: " + line) - } - } + c.updateNFSNetworkStats(ch, &stats.Network) + c.updateNFSClientRPCStats(ch, &stats.ClientRPC) + c.updateNFSRequestsv2Stats(ch, &stats.V2Stats) + c.updateNFSRequestsv3Stats(ch, &stats.V3Stats) + c.updateNFSRequestsv4Stats(ch, &stats.ClientV4Stats) + return nil } + +// updateNFSNetworkStats collects statistics for network packets/connections. +func (c *nfsCollector) updateNFSNetworkStats(ch chan<- prometheus.Metric, s *nfs.Network) { + ch <- prometheus.MustNewConstMetric(c.nfsNetReadsDesc, prometheus.CounterValue, + float64(s.UDPCount), "udp") + ch <- prometheus.MustNewConstMetric(c.nfsNetReadsDesc, prometheus.CounterValue, + float64(s.TCPCount), "tcp") + ch <- prometheus.MustNewConstMetric(c.nfsNetConnectionsDesc, prometheus.CounterValue, + float64(s.TCPConnect)) +} + +// updateNFSClientRPCStats collects statistics for kernel server RPCs. +func (c *nfsCollector) updateNFSClientRPCStats(ch chan<- prometheus.Metric, s *nfs.ClientRPC) { + ch <- prometheus.MustNewConstMetric(c.nfsRPCOperationsDesc, prometheus.CounterValue, + float64(s.RPCCount)) + ch <- prometheus.MustNewConstMetric(c.nfsRPCRetransmissionsDesc, prometheus.CounterValue, + float64(s.Retransmissions)) + ch <- prometheus.MustNewConstMetric(c.nfsRPCAuthenticationRefreshesDesc, prometheus.CounterValue, + float64(s.AuthRefreshes)) +} + +// updateNFSRequestsv2Stats collects statistics for NFSv2 requests. +func (c *nfsCollector) updateNFSRequestsv2Stats(ch chan<- prometheus.Metric, s *nfs.V2Stats) { + const proto = "2" + + v := reflect.ValueOf(s).Elem() + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + + ch <- prometheus.MustNewConstMetric(c.nfsProceduresDesc, prometheus.CounterValue, + float64(field.Uint()), proto, v.Type().Field(i).Name) + } +} + +// updateNFSRequestsv3Stats collects statistics for NFSv3 requests. +func (c *nfsCollector) updateNFSRequestsv3Stats(ch chan<- prometheus.Metric, s *nfs.V3Stats) { + const proto = "3" + + v := reflect.ValueOf(s).Elem() + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + + ch <- prometheus.MustNewConstMetric(c.nfsProceduresDesc, prometheus.CounterValue, + float64(field.Uint()), proto, v.Type().Field(i).Name) + } +} + +// updateNFSRequestsv4Stats collects statistics for NFSv4 requests. +func (c *nfsCollector) updateNFSRequestsv4Stats(ch chan<- prometheus.Metric, s *nfs.ClientV4Stats) { + const proto = "4" + + v := reflect.ValueOf(s).Elem() + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + + ch <- prometheus.MustNewConstMetric(c.nfsProceduresDesc, prometheus.CounterValue, + float64(field.Uint()), proto, v.Type().Field(i).Name) + } +} diff --git a/collector/nfsd_linux.go b/collector/nfsd_linux.go new file mode 100644 index 0000000000..faa6c960b3 --- /dev/null +++ b/collector/nfsd_linux.go @@ -0,0 +1,404 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nonfsd +// +build !nonfsd + +package collector + +import ( + "errors" + "fmt" + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/nfs" +) + +// A nfsdCollector is a Collector which gathers metrics from /proc/net/rpc/nfsd. +// See: https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +type nfsdCollector struct { + fs nfs.FS + requestsDesc *prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("nfsd", defaultEnabled, NewNFSdCollector) +} + +const ( + nfsdSubsystem = "nfsd" +) + +// NewNFSdCollector returns a new Collector exposing /proc/net/rpc/nfsd statistics. +func NewNFSdCollector(logger log.Logger) (Collector, error) { + fs, err := nfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &nfsdCollector{ + fs: fs, + requestsDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "requests_total"), + "Total number NFSd Requests by method and protocol.", + []string{"proto", "method"}, nil, + ), + logger: logger, + }, nil +} + +// Update implements Collector. +func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := c.fs.ServerRPCStats() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "Not collecting NFSd metrics", "err", err) + return ErrNoData + } + return fmt.Errorf("failed to retrieve nfsd stats: %w", err) + } + + c.updateNFSdReplyCacheStats(ch, &stats.ReplyCache) + c.updateNFSdFileHandlesStats(ch, &stats.FileHandles) + c.updateNFSdInputOutputStats(ch, &stats.InputOutput) + c.updateNFSdThreadsStats(ch, &stats.Threads) + c.updateNFSdReadAheadCacheStats(ch, &stats.ReadAheadCache) + c.updateNFSdNetworkStats(ch, &stats.Network) + c.updateNFSdServerRPCStats(ch, &stats.ServerRPC) + c.updateNFSdRequestsv2Stats(ch, &stats.V2Stats) + c.updateNFSdRequestsv3Stats(ch, &stats.V3Stats) + c.updateNFSdRequestsv4Stats(ch, &stats.V4Ops) + + return nil +} + +// updateNFSdReplyCacheStats collects statistics for the reply cache. +func (c *nfsdCollector) updateNFSdReplyCacheStats(ch chan<- prometheus.Metric, s *nfs.ReplyCache) { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "reply_cache_hits_total"), + "Total number of NFSd Reply Cache hits (client lost server response).", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.Hits)) + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "reply_cache_misses_total"), + "Total number of NFSd Reply Cache an operation that requires caching (idempotent).", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.Misses)) + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "reply_cache_nocache_total"), + "Total number of NFSd Reply Cache non-idempotent operations (rename/delete/…).", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.NoCache)) +} + +// updateNFSdFileHandlesStats collects statistics for the file handles. +func (c *nfsdCollector) updateNFSdFileHandlesStats(ch chan<- prometheus.Metric, s *nfs.FileHandles) { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "file_handles_stale_total"), + "Total number of NFSd stale file handles", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.Stale)) + // NOTE: Other FileHandles entries are unused in the kernel. +} + +// updateNFSdInputOutputStats collects statistics for the bytes in/out. +func (c *nfsdCollector) updateNFSdInputOutputStats(ch chan<- prometheus.Metric, s *nfs.InputOutput) { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "disk_bytes_read_total"), + "Total NFSd bytes read.", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.Read)) + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "disk_bytes_written_total"), + "Total NFSd bytes written.", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.Write)) +} + +// updateNFSdThreadsStats collects statistics for kernel server threads. +func (c *nfsdCollector) updateNFSdThreadsStats(ch chan<- prometheus.Metric, s *nfs.Threads) { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "server_threads"), + "Total number of NFSd kernel threads that are running.", + nil, + nil, + ), + prometheus.GaugeValue, + float64(s.Threads)) +} + +// updateNFSdReadAheadCacheStats collects statistics for the read ahead cache. +func (c *nfsdCollector) updateNFSdReadAheadCacheStats(ch chan<- prometheus.Metric, s *nfs.ReadAheadCache) { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "read_ahead_cache_size_blocks"), + "How large the read ahead cache is in blocks.", + nil, + nil, + ), + prometheus.GaugeValue, + float64(s.CacheSize)) + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "read_ahead_cache_not_found_total"), + "Total number of NFSd read ahead cache not found.", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.NotFound)) +} + +// updateNFSdNetworkStats collects statistics for network packets/connections. +func (c *nfsdCollector) updateNFSdNetworkStats(ch chan<- prometheus.Metric, s *nfs.Network) { + packetDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "packets_total"), + "Total NFSd network packets (sent+received) by protocol type.", + []string{"proto"}, + nil, + ) + ch <- prometheus.MustNewConstMetric( + packetDesc, + prometheus.CounterValue, + float64(s.UDPCount), "udp") + ch <- prometheus.MustNewConstMetric( + packetDesc, + prometheus.CounterValue, + float64(s.TCPCount), "tcp") + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "connections_total"), + "Total number of NFSd TCP connections.", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.TCPConnect)) +} + +// updateNFSdServerRPCStats collects statistics for kernel server RPCs. +func (c *nfsdCollector) updateNFSdServerRPCStats(ch chan<- prometheus.Metric, s *nfs.ServerRPC) { + badRPCDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "rpc_errors_total"), + "Total number of NFSd RPC errors by error type.", + []string{"error"}, + nil, + ) + ch <- prometheus.MustNewConstMetric( + badRPCDesc, + prometheus.CounterValue, + float64(s.BadFmt), "fmt") + ch <- prometheus.MustNewConstMetric( + badRPCDesc, + prometheus.CounterValue, + float64(s.BadAuth), "auth") + ch <- prometheus.MustNewConstMetric( + badRPCDesc, + prometheus.CounterValue, + float64(s.BadcInt), "cInt") + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, nfsdSubsystem, "server_rpcs_total"), + "Total number of NFSd RPCs.", + nil, + nil, + ), + prometheus.CounterValue, + float64(s.RPCCount)) +} + +// updateNFSdRequestsv2Stats collects statistics for NFSv2 requests. +func (c *nfsdCollector) updateNFSdRequestsv2Stats(ch chan<- prometheus.Metric, s *nfs.V2Stats) { + const proto = "2" + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.GetAttr), proto, "GetAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SetAttr), proto, "SetAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Root), proto, "Root") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Lookup), proto, "Lookup") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.ReadLink), proto, "ReadLink") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Read), proto, "Read") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.WrCache), proto, "WrCache") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Write), proto, "Write") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Create), proto, "Create") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Remove), proto, "Remove") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Rename), proto, "Rename") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Link), proto, "Link") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SymLink), proto, "SymLink") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.MkDir), proto, "MkDir") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.RmDir), proto, "RmDir") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.ReadDir), proto, "ReadDir") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.FsStat), proto, "FsStat") +} + +// updateNFSdRequestsv3Stats collects statistics for NFSv3 requests. +func (c *nfsdCollector) updateNFSdRequestsv3Stats(ch chan<- prometheus.Metric, s *nfs.V3Stats) { + const proto = "3" + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.GetAttr), proto, "GetAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SetAttr), proto, "SetAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Lookup), proto, "Lookup") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Access), proto, "Access") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.ReadLink), proto, "ReadLink") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Read), proto, "Read") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Write), proto, "Write") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Create), proto, "Create") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.MkDir), proto, "MkDir") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SymLink), proto, "SymLink") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.MkNod), proto, "MkNod") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Remove), proto, "Remove") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.RmDir), proto, "RmDir") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Rename), proto, "Rename") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Link), proto, "Link") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.ReadDir), proto, "ReadDir") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.ReadDirPlus), proto, "ReadDirPlus") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.FsStat), proto, "FsStat") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.FsInfo), proto, "FsInfo") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.PathConf), proto, "PathConf") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Commit), proto, "Commit") +} + +// updateNFSdRequestsv4Stats collects statistics for NFSv4 requests. +func (c *nfsdCollector) updateNFSdRequestsv4Stats(ch chan<- prometheus.Metric, s *nfs.V4Ops) { + const proto = "4" + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Access), proto, "Access") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Close), proto, "Close") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Commit), proto, "Commit") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Create), proto, "Create") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.DelegPurge), proto, "DelegPurge") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.DelegReturn), proto, "DelegReturn") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.GetAttr), proto, "GetAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.GetFH), proto, "GetFH") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Link), proto, "Link") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Lock), proto, "Lock") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Lockt), proto, "Lockt") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Locku), proto, "Locku") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Lookup), proto, "Lookup") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.LookupRoot), proto, "LookupRoot") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Nverify), proto, "Nverify") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Open), proto, "Open") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.OpenAttr), proto, "OpenAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.OpenConfirm), proto, "OpenConfirm") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.OpenDgrd), proto, "OpenDgrd") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.PutFH), proto, "PutFH") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Read), proto, "Read") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.ReadDir), proto, "ReadDir") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.ReadLink), proto, "ReadLink") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Remove), proto, "Remove") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Rename), proto, "Rename") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Renew), proto, "Renew") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.RestoreFH), proto, "RestoreFH") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SaveFH), proto, "SaveFH") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SecInfo), proto, "SecInfo") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.SetAttr), proto, "SetAttr") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Verify), proto, "Verify") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.Write), proto, "Write") + ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, + float64(s.RelLockOwner), proto, "RelLockOwner") +} diff --git a/collector/ntp.go b/collector/ntp.go index 2580353ce8..25cb9452e8 100644 --- a/collector/ntp.go +++ b/collector/ntp.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nontp // +build !nontp package collector @@ -18,9 +19,11 @@ package collector import ( "fmt" "net" + "sync" "time" "github.com/beevik/ntp" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) @@ -33,18 +36,20 @@ const ( var ( ntpServer = kingpin.Flag("collector.ntp.server", "NTP server to use for ntp collector").Default("127.0.0.1").String() ntpProtocolVersion = kingpin.Flag("collector.ntp.protocol-version", "NTP protocol version").Default("4").Int() - ntpServerIsLocal = kingpin.Flag("collector.ntp.server-is-local", "Certify that collector.ntp.server address is the same local host as this collector.").Default("false").Bool() + ntpServerIsLocal = kingpin.Flag("collector.ntp.server-is-local", "Certify that collector.ntp.server address is not a public ntp server").Default("false").Bool() ntpIPTTL = kingpin.Flag("collector.ntp.ip-ttl", "IP TTL to use while sending NTP query").Default("1").Int() // 3.46608s ~ 1.5s + PHI * (1 << maxPoll), where 1.5s is MAXDIST from ntp.org, it is 1.0 in RFC5905 // max-distance option is used as-is without phi*(1<, value is always 1.", + []string{"device", "firmware_revision", "model", "serial", "state"}, + nil, + ) + infoValue := 1.0 + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, device.Name, device.FirmwareRevision, device.Model, device.Serial, device.State) + } + + return nil +} diff --git a/collector/os_release.go b/collector/os_release.go new file mode 100644 index 0000000000..cda1eb1f6a --- /dev/null +++ b/collector/os_release.go @@ -0,0 +1,178 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "errors" + "io" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + envparse "github.com/hashicorp/go-envparse" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + etcOSRelease = "/etc/os-release" + usrLibOSRelease = "/usr/lib/os-release" +) + +var ( + versionRegex = regexp.MustCompile(`^[0-9]+\.?[0-9]*`) +) + +type osRelease struct { + Name string + ID string + IDLike string + PrettyName string + Variant string + VariantID string + Version string + VersionID string + VersionCodename string + BuildID string + ImageID string + ImageVersion string +} + +type osReleaseCollector struct { + infoDesc *prometheus.Desc + logger log.Logger + os *osRelease + osFilename string // file name of cached release information + osMtime time.Time // mtime of cached release file + osMutex sync.Mutex + osReleaseFilenames []string // all os-release file names to check + version float64 + versionDesc *prometheus.Desc +} + +func init() { + registerCollector("os", defaultEnabled, NewOSCollector) +} + +// NewOSCollector returns a new Collector exposing os-release information. +func NewOSCollector(logger log.Logger) (Collector, error) { + return &osReleaseCollector{ + logger: logger, + infoDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "os", "info"), + "A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, "+ + "name, pretty_name, variant, variant_id, version, version_codename, version_id.", + []string{"build_id", "id", "id_like", "image_id", "image_version", "name", "pretty_name", + "variant", "variant_id", "version", "version_codename", "version_id"}, nil, + ), + osReleaseFilenames: []string{etcOSRelease, usrLibOSRelease}, + versionDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "os", "version"), + "Metric containing the major.minor part of the OS version.", + []string{"id", "id_like", "name"}, nil, + ), + }, nil +} + +func parseOSRelease(r io.Reader) (*osRelease, error) { + env, err := envparse.Parse(r) + return &osRelease{ + Name: env["NAME"], + ID: env["ID"], + IDLike: env["ID_LIKE"], + PrettyName: env["PRETTY_NAME"], + Variant: env["VARIANT"], + VariantID: env["VARIANT_ID"], + Version: env["VERSION"], + VersionID: env["VERSION_ID"], + VersionCodename: env["VERSION_CODENAME"], + BuildID: env["BUILD_ID"], + ImageID: env["IMAGE_ID"], + ImageVersion: env["IMAGE_VERSION"], + }, err +} + +func (c *osReleaseCollector) UpdateStruct(path string) error { + releaseFile, err := os.Open(path) + if err != nil { + return err + } + defer releaseFile.Close() + + stat, err := releaseFile.Stat() + if err != nil { + return err + } + + t := stat.ModTime() + if path == c.osFilename && t == c.osMtime { + // osReleaseCollector struct is already up-to-date. + return nil + } + + // Acquire a lock to update the osReleaseCollector struct. + c.osMutex.Lock() + defer c.osMutex.Unlock() + + level.Debug(c.logger).Log("msg", "file modification time has changed", + "file", path, "old_value", c.osMtime, "new_value", t) + c.osFilename = path + c.osMtime = t + + c.os, err = parseOSRelease(releaseFile) + if err != nil { + return err + } + + majorMinor := versionRegex.FindString(c.os.VersionID) + if majorMinor != "" { + c.version, err = strconv.ParseFloat(majorMinor, 64) + if err != nil { + return err + } + } else { + c.version = 0 + } + return nil +} + +func (c *osReleaseCollector) Update(ch chan<- prometheus.Metric) error { + for i, path := range c.osReleaseFilenames { + err := c.UpdateStruct(*rootfsPath + path) + if err == nil { + break + } + if errors.Is(err, os.ErrNotExist) { + if i >= (len(c.osReleaseFilenames) - 1) { + level.Debug(c.logger).Log("msg", "no os-release file found", "files", strings.Join(c.osReleaseFilenames, ",")) + return ErrNoData + } + continue + } + return err + } + + ch <- prometheus.MustNewConstMetric(c.infoDesc, prometheus.GaugeValue, 1.0, + c.os.BuildID, c.os.ID, c.os.IDLike, c.os.ImageID, c.os.ImageVersion, c.os.Name, c.os.PrettyName, + c.os.Variant, c.os.VariantID, c.os.Version, c.os.VersionCodename, c.os.VersionID) + if c.version > 0 { + ch <- prometheus.MustNewConstMetric(c.versionDesc, prometheus.GaugeValue, c.version, + c.os.ID, c.os.IDLike, c.os.Name) + } + return nil +} diff --git a/collector/os_release_test.go b/collector/os_release_test.go new file mode 100644 index 0000000000..e37cc34cbb --- /dev/null +++ b/collector/os_release_test.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "os" + "reflect" + "strings" + "testing" + + "github.com/go-kit/log" +) + +const debianBullseye string = `PRETTY_NAME="Debian GNU/Linux 11 (bullseye)" +NAME="Debian GNU/Linux" +VERSION_ID="11" +VERSION="11 (bullseye)" +VERSION_CODENAME=bullseye +ID=debian +HOME_URL="https://www.debian.org/" +SUPPORT_URL="https://www.debian.org/support" +BUG_REPORT_URL="https://bugs.debian.org/" +` + +func TestParseOSRelease(t *testing.T) { + want := &osRelease{ + Name: "Ubuntu", + ID: "ubuntu", + IDLike: "debian", + PrettyName: "Ubuntu 20.04.2 LTS", + Version: "20.04.2 LTS (Focal Fossa)", + VersionID: "20.04", + VersionCodename: "focal", + } + + osReleaseFile, err := os.Open("fixtures" + usrLibOSRelease) + if err != nil { + t.Fatal(err) + } + got, err := parseOSRelease(osReleaseFile) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %+v osRelease: got %+v", want, got) + } + + want = &osRelease{ + Name: "Debian GNU/Linux", + ID: "debian", + PrettyName: "Debian GNU/Linux 11 (bullseye)", + Version: "11 (bullseye)", + VersionID: "11", + VersionCodename: "bullseye", + } + got, err = parseOSRelease(strings.NewReader(debianBullseye)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("should have %+v osRelease: got %+v", want, got) + } +} + +func TestUpdateStruct(t *testing.T) { + wantedOS := &osRelease{ + Name: "Ubuntu", + ID: "ubuntu", + IDLike: "debian", + PrettyName: "Ubuntu 20.04.2 LTS", + Version: "20.04.2 LTS (Focal Fossa)", + VersionID: "20.04", + VersionCodename: "focal", + } + wantedVersion := 20.04 + + collector, err := NewOSCollector(log.NewNopLogger()) + if err != nil { + t.Fatal(err) + } + c := collector.(*osReleaseCollector) + + err = c.UpdateStruct("fixtures" + usrLibOSRelease) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(wantedOS, c.os) { + t.Fatalf("should have %+v osRelease: got %+v", wantedOS, c.os) + } + if wantedVersion != c.version { + t.Errorf("Expected '%v' but got '%v'", wantedVersion, c.version) + } +} diff --git a/collector/paths.go b/collector/paths.go index f8e6e6e535..5f5a7b44b3 100644 --- a/collector/paths.go +++ b/collector/paths.go @@ -14,22 +14,39 @@ package collector import ( - "path" + "path/filepath" + "strings" "github.com/prometheus/procfs" - "gopkg.in/alecthomas/kingpin.v2" + kingpin "gopkg.in/alecthomas/kingpin.v2" ) var ( // The path of the proc filesystem. - procPath = kingpin.Flag("path.procfs", "procfs mountpoint.").Default(procfs.DefaultMountPoint).String() - sysPath = kingpin.Flag("path.sysfs", "sysfs mountpoint.").Default("/sys").String() + procPath = kingpin.Flag("path.procfs", "procfs mountpoint.").Default(procfs.DefaultMountPoint).String() + sysPath = kingpin.Flag("path.sysfs", "sysfs mountpoint.").Default("/sys").String() + rootfsPath = kingpin.Flag("path.rootfs", "rootfs mountpoint.").Default("/").String() ) func procFilePath(name string) string { - return path.Join(*procPath, name) + return filepath.Join(*procPath, name) } func sysFilePath(name string) string { - return path.Join(*sysPath, name) + return filepath.Join(*sysPath, name) +} + +func rootfsFilePath(name string) string { + return filepath.Join(*rootfsPath, name) +} + +func rootfsStripPrefix(path string) string { + if *rootfsPath == "/" { + return path + } + stripped := strings.TrimPrefix(path, *rootfsPath) + if stripped == "" { + return "/" + } + return stripped } diff --git a/collector/perf_linux.go b/collector/perf_linux.go new file mode 100644 index 0000000000..7688352303 --- /dev/null +++ b/collector/perf_linux.go @@ -0,0 +1,824 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noperf +// +build !noperf + +package collector + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hodgesds/perf-utils" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" + kingpin "gopkg.in/alecthomas/kingpin.v2" +) + +const ( + perfSubsystem = "perf" +) + +var ( + perfCPUsFlag = kingpin.Flag("collector.perf.cpus", "List of CPUs from which perf metrics should be collected").Default("").String() + perfTracepointFlag = kingpin.Flag("collector.perf.tracepoint", "perf tracepoint that should be collected").Strings() +) + +func init() { + registerCollector(perfSubsystem, defaultDisabled, NewPerfCollector) +} + +// perfTracepointFlagToTracepoints returns the set of configured tracepoints. +func perfTracepointFlagToTracepoints(tracepointsFlag []string) ([]*perfTracepoint, error) { + tracepoints := make([]*perfTracepoint, len(tracepointsFlag)) + + for i, tracepoint := range tracepointsFlag { + split := strings.Split(tracepoint, ":") + if len(split) != 2 { + return nil, fmt.Errorf("Invalid tracepoint config %v", tracepoint) + } + tracepoints[i] = &perfTracepoint{ + subsystem: split[0], + event: split[1], + } + } + return tracepoints, nil +} + +// perfCPUFlagToCPUs returns a set of CPUs for the perf collectors to monitor. +func perfCPUFlagToCPUs(cpuFlag string) ([]int, error) { + var err error + cpus := []int{} + for _, subset := range strings.Split(cpuFlag, ",") { + // First parse a single CPU. + if !strings.Contains(subset, "-") { + cpu, err := strconv.Atoi(subset) + if err != nil { + return nil, err + } + cpus = append(cpus, cpu) + continue + } + + stride := 1 + // Handle strides, ie 1-10:5 should yield 1,5,10 + strideSet := strings.Split(subset, ":") + if len(strideSet) == 2 { + stride, err = strconv.Atoi(strideSet[1]) + if err != nil { + return nil, err + } + } + + rangeSet := strings.Split(strideSet[0], "-") + if len(rangeSet) != 2 { + return nil, fmt.Errorf("invalid flag value %q", cpuFlag) + } + start, err := strconv.Atoi(rangeSet[0]) + if err != nil { + return nil, err + } + end, err := strconv.Atoi(rangeSet[1]) + if err != nil { + return nil, err + } + for i := start; i <= end; i += stride { + cpus = append(cpus, i) + } + } + + return cpus, nil +} + +// perfTracepoint is a struct for holding tracepoint information. +type perfTracepoint struct { + subsystem string + event string +} + +// label returns the tracepoint name in the format of subsystem_tracepoint. +func (t *perfTracepoint) label() string { + return t.subsystem + "_" + t.event +} + +// tracepoint returns the tracepoint name in the format of subsystem:tracepoint. +func (t *perfTracepoint) tracepoint() string { + return t.subsystem + ":" + t.event +} + +// perfCollector is a Collector that uses the perf subsystem to collect +// metrics. It uses perf_event_open an ioctls for profiling. Due to the fact +// that the perf subsystem is highly dependent on kernel configuration and +// settings not all profiler values may be exposed on the target system at any +// given time. +type perfCollector struct { + hwProfilerCPUMap map[*perf.HardwareProfiler]int + swProfilerCPUMap map[*perf.SoftwareProfiler]int + cacheProfilerCPUMap map[*perf.CacheProfiler]int + perfHwProfilers map[int]*perf.HardwareProfiler + perfSwProfilers map[int]*perf.SoftwareProfiler + perfCacheProfilers map[int]*perf.CacheProfiler + desc map[string]*prometheus.Desc + logger log.Logger + tracepointCollector *perfTracepointCollector +} + +type perfTracepointCollector struct { + // desc is the mapping of subsystem to tracepoint *prometheus.Desc. + descs map[string]map[string]*prometheus.Desc + // collection order is the sorted configured collection order of the profiler. + collectionOrder []string + + logger log.Logger + profilers map[int]perf.GroupProfiler +} + +// update is used collect all tracepoints across all tracepoint profilers. +func (c *perfTracepointCollector) update(ch chan<- prometheus.Metric) error { + for cpu := range c.profilers { + if err := c.updateCPU(cpu, ch); err != nil { + return err + } + } + return nil +} + +// updateCPU is used to update metrics per CPU profiler. +func (c *perfTracepointCollector) updateCPU(cpu int, ch chan<- prometheus.Metric) error { + profiler := c.profilers[cpu] + p, err := profiler.Profile() + if err != nil { + level.Error(c.logger).Log("msg", "Failed to collect tracepoint profile", "err", err) + return err + } + + cpuid := strconv.Itoa(cpu) + + for i, value := range p.Values { + // Get the Desc from the ordered group value. + descKey := c.collectionOrder[i] + descKeySlice := strings.Split(descKey, ":") + ch <- prometheus.MustNewConstMetric( + c.descs[descKeySlice[0]][descKeySlice[1]], + prometheus.CounterValue, + float64(value), + cpuid, + ) + } + return nil +} + +// newPerfTracepointCollector returns a configured perfTracepointCollector. +func newPerfTracepointCollector( + logger log.Logger, + tracepointsFlag []string, + cpus []int, +) (*perfTracepointCollector, error) { + tracepoints, err := perfTracepointFlagToTracepoints(tracepointsFlag) + if err != nil { + return nil, err + } + + collectionOrder := make([]string, len(tracepoints)) + descs := map[string]map[string]*prometheus.Desc{} + eventAttrs := make([]unix.PerfEventAttr, len(tracepoints)) + + for i, tracepoint := range tracepoints { + eventAttr, err := perf.TracepointEventAttr(tracepoint.subsystem, tracepoint.event) + if err != nil { + return nil, err + } + eventAttrs[i] = *eventAttr + collectionOrder[i] = tracepoint.tracepoint() + if _, ok := descs[tracepoint.subsystem]; !ok { + descs[tracepoint.subsystem] = map[string]*prometheus.Desc{} + } + descs[tracepoint.subsystem][tracepoint.event] = prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + tracepoint.label(), + ), + "Perf tracepoint "+tracepoint.tracepoint(), + []string{"cpu"}, + nil, + ) + } + + profilers := make(map[int]perf.GroupProfiler, len(cpus)) + for _, cpu := range cpus { + profiler, err := perf.NewGroupProfiler(-1, cpu, 0, eventAttrs...) + if err != nil { + return nil, err + } + profilers[cpu] = profiler + } + + c := &perfTracepointCollector{ + descs: descs, + collectionOrder: collectionOrder, + profilers: profilers, + logger: logger, + } + + for _, profiler := range c.profilers { + if err := profiler.Start(); err != nil { + return nil, err + } + } + return c, nil +} + +// NewPerfCollector returns a new perf based collector, it creates a profiler +// per CPU. +func NewPerfCollector(logger log.Logger) (Collector, error) { + collector := &perfCollector{ + perfHwProfilers: map[int]*perf.HardwareProfiler{}, + perfSwProfilers: map[int]*perf.SoftwareProfiler{}, + perfCacheProfilers: map[int]*perf.CacheProfiler{}, + hwProfilerCPUMap: map[*perf.HardwareProfiler]int{}, + swProfilerCPUMap: map[*perf.SoftwareProfiler]int{}, + cacheProfilerCPUMap: map[*perf.CacheProfiler]int{}, + logger: logger, + } + + var ( + cpus []int + err error + ) + if perfCPUsFlag != nil && *perfCPUsFlag != "" { + cpus, err = perfCPUFlagToCPUs(*perfCPUsFlag) + if err != nil { + return nil, err + } + } else { + cpus = make([]int, runtime.NumCPU()) + for i := range cpus { + cpus[i] = i + } + } + + // First configure any tracepoints. + if *perfTracepointFlag != nil && len(*perfTracepointFlag) > 0 { + tracepointCollector, err := newPerfTracepointCollector(logger, *perfTracepointFlag, cpus) + if err != nil { + return nil, err + } + collector.tracepointCollector = tracepointCollector + } + + // Configure all profilers for the specified CPUs. + for _, cpu := range cpus { + // Use -1 to profile all processes on the CPU, see: + // man perf_event_open + hwProf, err := perf.NewHardwareProfiler(-1, cpu) + if err != nil { + return nil, err + } + if err := hwProf.Start(); err != nil { + return nil, err + } + collector.perfHwProfilers[cpu] = &hwProf + collector.hwProfilerCPUMap[&hwProf] = cpu + + swProf, err := perf.NewSoftwareProfiler(-1, cpu) + if err != nil { + return nil, err + } + if err := swProf.Start(); err != nil { + return nil, err + } + collector.perfSwProfilers[cpu] = &swProf + collector.swProfilerCPUMap[&swProf] = cpu + + cacheProf, err := perf.NewCacheProfiler(-1, cpu) + if err != nil { + return nil, err + } + if err := cacheProf.Start(); err != nil { + return nil, err + } + collector.perfCacheProfilers[cpu] = &cacheProf + collector.cacheProfilerCPUMap[&cacheProf] = cpu + } + + collector.desc = map[string]*prometheus.Desc{ + "cpucycles_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cpucycles_total", + ), + "Number of CPU cycles (frequency scaled)", + []string{"cpu"}, + nil, + ), + "instructions_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "instructions_total", + ), + "Number of CPU instructions", + []string{"cpu"}, + nil, + ), + "branch_instructions_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "branch_instructions_total", + ), + "Number of CPU branch instructions", + []string{"cpu"}, + nil, + ), + "branch_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "branch_misses_total", + ), + "Number of CPU branch misses", + []string{"cpu"}, + nil, + ), + "cache_refs_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_refs_total", + ), + "Number of cache references (non frequency scaled)", + []string{"cpu"}, + nil, + ), + "cache_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_misses_total", + ), + "Number of cache misses", + []string{"cpu"}, + nil, + ), + "ref_cpucycles_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "ref_cpucycles_total", + ), + "Number of CPU cycles", + []string{"cpu"}, + nil, + ), + "page_faults_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "page_faults_total", + ), + "Number of page faults", + []string{"cpu"}, + nil, + ), + "context_switches_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "context_switches_total", + ), + "Number of context switches", + []string{"cpu"}, + nil, + ), + "cpu_migrations_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cpu_migrations_total", + ), + "Number of CPU process migrations", + []string{"cpu"}, + nil, + ), + "minor_faults_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "minor_faults_total", + ), + "Number of minor page faults", + []string{"cpu"}, + nil, + ), + "major_faults_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "major_faults_total", + ), + "Number of major page faults", + []string{"cpu"}, + nil, + ), + "cache_l1d_read_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_l1d_read_hits_total", + ), + "Number L1 data cache read hits", + []string{"cpu"}, + nil, + ), + "cache_l1d_read_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_l1d_read_misses_total", + ), + "Number L1 data cache read misses", + []string{"cpu"}, + nil, + ), + "cache_l1d_write_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_l1d_write_hits_total", + ), + "Number L1 data cache write hits", + []string{"cpu"}, + nil, + ), + "cache_l1_instr_read_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_l1_instr_read_misses_total", + ), + "Number instruction L1 instruction read misses", + []string{"cpu"}, + nil, + ), + "cache_tlb_instr_read_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_tlb_instr_read_hits_total", + ), + "Number instruction TLB read hits", + []string{"cpu"}, + nil, + ), + "cache_tlb_instr_read_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_tlb_instr_read_misses_total", + ), + "Number instruction TLB read misses", + []string{"cpu"}, + nil, + ), + "cache_ll_read_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_ll_read_hits_total", + ), + "Number last level read hits", + []string{"cpu"}, + nil, + ), + "cache_ll_read_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_ll_read_misses_total", + ), + "Number last level read misses", + []string{"cpu"}, + nil, + ), + "cache_ll_write_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_ll_write_hits_total", + ), + "Number last level write hits", + []string{"cpu"}, + nil, + ), + "cache_ll_write_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_ll_write_misses_total", + ), + "Number last level write misses", + []string{"cpu"}, + nil, + ), + "cache_bpu_read_hits_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_bpu_read_hits_total", + ), + "Number BPU read hits", + []string{"cpu"}, + nil, + ), + "cache_bpu_read_misses_total": prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + perfSubsystem, + "cache_bpu_read_misses_total", + ), + "Number BPU read misses", + []string{"cpu"}, + nil, + ), + } + + return collector, nil +} + +// Update implements the Collector interface and will collect metrics per CPU. +func (c *perfCollector) Update(ch chan<- prometheus.Metric) error { + if err := c.updateHardwareStats(ch); err != nil { + return err + } + + if err := c.updateSoftwareStats(ch); err != nil { + return err + } + + if err := c.updateCacheStats(ch); err != nil { + return err + } + if c.tracepointCollector != nil { + return c.tracepointCollector.update(ch) + } + + return nil +} + +func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { + for _, profiler := range c.perfHwProfilers { + hwProfile, err := (*profiler).Profile() + if err != nil { + return err + } + if hwProfile == nil { + continue + } + + cpuid := strconv.Itoa(c.hwProfilerCPUMap[profiler]) + + if hwProfile.CPUCycles != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cpucycles_total"], + prometheus.CounterValue, float64(*hwProfile.CPUCycles), + cpuid, + ) + } + + if hwProfile.Instructions != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["instructions_total"], + prometheus.CounterValue, float64(*hwProfile.Instructions), + cpuid, + ) + } + + if hwProfile.BranchInstr != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["branch_instructions_total"], + prometheus.CounterValue, float64(*hwProfile.BranchInstr), + cpuid, + ) + } + + if hwProfile.BranchMisses != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["branch_misses_total"], + prometheus.CounterValue, float64(*hwProfile.BranchMisses), + cpuid, + ) + } + + if hwProfile.CacheRefs != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_refs_total"], + prometheus.CounterValue, float64(*hwProfile.CacheRefs), + cpuid, + ) + } + + if hwProfile.CacheMisses != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_misses_total"], + prometheus.CounterValue, float64(*hwProfile.CacheMisses), + cpuid, + ) + } + + if hwProfile.RefCPUCycles != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["ref_cpucycles_total"], + prometheus.CounterValue, float64(*hwProfile.RefCPUCycles), + cpuid, + ) + } + } + + return nil +} + +func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { + for _, profiler := range c.perfSwProfilers { + swProfile, err := (*profiler).Profile() + if err != nil { + return err + } + if swProfile == nil { + continue + } + + cpuid := strconv.Itoa(c.swProfilerCPUMap[profiler]) + + if swProfile.PageFaults != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["page_faults_total"], + prometheus.CounterValue, float64(*swProfile.PageFaults), + cpuid, + ) + } + + if swProfile.ContextSwitches != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["context_switches_total"], + prometheus.CounterValue, float64(*swProfile.ContextSwitches), + cpuid, + ) + } + + if swProfile.CPUMigrations != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cpu_migrations_total"], + prometheus.CounterValue, float64(*swProfile.CPUMigrations), + cpuid, + ) + } + + if swProfile.MinorPageFaults != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["minor_faults_total"], + prometheus.CounterValue, float64(*swProfile.MinorPageFaults), + cpuid, + ) + } + + if swProfile.MajorPageFaults != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["major_faults_total"], + prometheus.CounterValue, float64(*swProfile.MajorPageFaults), + cpuid, + ) + } + } + + return nil +} + +func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { + for _, profiler := range c.perfCacheProfilers { + cacheProfile, err := (*profiler).Profile() + if err != nil { + return err + } + if cacheProfile == nil { + continue + } + + cpuid := strconv.Itoa(c.cacheProfilerCPUMap[profiler]) + + if cacheProfile.L1DataReadHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_l1d_read_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.L1DataReadHit), + cpuid, + ) + } + + if cacheProfile.L1DataReadMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_l1d_read_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.L1DataReadMiss), + cpuid, + ) + } + + if cacheProfile.L1DataWriteHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_l1d_write_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.L1DataWriteHit), + cpuid, + ) + } + + if cacheProfile.L1InstrReadMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_l1_instr_read_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.L1InstrReadMiss), + cpuid, + ) + } + + if cacheProfile.InstrTLBReadHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_tlb_instr_read_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadHit), + cpuid, + ) + } + + if cacheProfile.InstrTLBReadMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_tlb_instr_read_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadMiss), + cpuid, + ) + } + + if cacheProfile.LastLevelReadHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_ll_read_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.LastLevelReadHit), + cpuid, + ) + } + + if cacheProfile.LastLevelReadMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_ll_read_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.LastLevelReadMiss), + cpuid, + ) + } + + if cacheProfile.LastLevelWriteHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_ll_write_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteHit), + cpuid, + ) + } + + if cacheProfile.LastLevelWriteMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_ll_write_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteMiss), + cpuid, + ) + } + + if cacheProfile.BPUReadHit != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_bpu_read_hits_total"], + prometheus.CounterValue, float64(*cacheProfile.BPUReadHit), + cpuid, + ) + } + + if cacheProfile.BPUReadMiss != nil { + ch <- prometheus.MustNewConstMetric( + c.desc["cache_bpu_read_misses_total"], + prometheus.CounterValue, float64(*cacheProfile.BPUReadMiss), + cpuid, + ) + } + } + + return nil +} diff --git a/collector/perf_linux_test.go b/collector/perf_linux_test.go new file mode 100644 index 0000000000..54ef199acf --- /dev/null +++ b/collector/perf_linux_test.go @@ -0,0 +1,257 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noprocesses +// +build !noprocesses + +package collector + +import ( + "io/ioutil" + "runtime" + "strconv" + "strings" + "testing" + + "github.com/go-kit/log" + + "github.com/prometheus/client_golang/prometheus" +) + +func canTestPerf(t *testing.T) { + paranoidBytes, err := ioutil.ReadFile("/proc/sys/kernel/perf_event_paranoid") + if err != nil { + t.Skip("Procfs not mounted, skipping perf tests") + } + paranoidStr := strings.Replace(string(paranoidBytes), "\n", "", -1) + paranoid, err := strconv.Atoi(paranoidStr) + if err != nil { + t.Fatalf("Expected perf_event_paranoid to be an int, got: %s", paranoidStr) + } + if paranoid >= 1 { + t.Skip("Skipping perf tests, set perf_event_paranoid to 0") + } +} + +func TestPerfCollector(t *testing.T) { + canTestPerf(t) + collector, err := NewPerfCollector(log.NewNopLogger()) + if err != nil { + t.Fatal(err) + } + + // Setup background goroutine to capture metrics. + metrics := make(chan prometheus.Metric) + defer close(metrics) + go func() { + for range metrics { + } + }() + if err := collector.Update(metrics); err != nil { + t.Fatal(err) + } +} + +func TestPerfCollectorStride(t *testing.T) { + canTestPerf(t) + + tests := []struct { + name string + flag string + exCPUs []int + }{ + { + name: "valid single CPU", + flag: "1", + exCPUs: []int{1}, + }, + { + name: "valid range CPUs", + flag: "1-5", + exCPUs: []int{1, 2, 3, 4, 5}, + }, + { + name: "valid stride", + flag: "1-8:2", + exCPUs: []int{1, 3, 5, 7}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ncpu := runtime.NumCPU() + for _, cpu := range test.exCPUs { + if cpu > ncpu { + t.Skipf("Skipping test because runtime.NumCPU < %d", cpu) + } + } + perfCPUsFlag = &test.flag + collector, err := NewPerfCollector(log.NewNopLogger()) + if err != nil { + t.Fatal(err) + } + + c := collector.(*perfCollector) + for _, cpu := range test.exCPUs { + if _, ok := c.perfHwProfilers[cpu]; !ok { + t.Fatalf("Expected CPU %v in hardware profilers", cpu) + } + if _, ok := c.perfSwProfilers[cpu]; !ok { + t.Fatalf("Expected CPU %v in software profilers", cpu) + } + if _, ok := c.perfCacheProfilers[cpu]; !ok { + t.Fatalf("Expected CPU %v in cache profilers", cpu) + } + } + }) + } +} + +func TestPerfCPUFlagToCPUs(t *testing.T) { + tests := []struct { + name string + flag string + exCpus []int + errStr string + }{ + { + name: "valid single CPU", + flag: "1", + exCpus: []int{1}, + }, + { + name: "valid range CPUs", + flag: "1-5", + exCpus: []int{1, 2, 3, 4, 5}, + }, + { + name: "valid double digit", + flag: "10", + exCpus: []int{10}, + }, + { + name: "valid double digit range", + flag: "10-12", + exCpus: []int{10, 11, 12}, + }, + { + name: "valid double digit stride", + flag: "10-20:5", + exCpus: []int{10, 15, 20}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cpus, err := perfCPUFlagToCPUs(test.flag) + if test.errStr != "" { + if err != nil { + t.Fatal("expected error to not be nil") + } + if test.errStr != err.Error() { + t.Fatalf( + "expected error %q, got %q", + test.errStr, + err.Error(), + ) + } + return + } + if err != nil { + t.Fatal(err) + } + if len(cpus) != len(test.exCpus) { + t.Fatalf( + "expected CPUs %v, got %v", + test.exCpus, + cpus, + ) + } + for i := range cpus { + if test.exCpus[i] != cpus[i] { + t.Fatalf( + "expected CPUs %v, got %v", + test.exCpus[i], + cpus[i], + ) + } + } + }) + } +} + +func TestPerfTracepointFlagToTracepoints(t *testing.T) { + tests := []struct { + name string + flag []string + exTracepoints []*perfTracepoint + errStr string + }{ + { + name: "valid single tracepoint", + flag: []string{"sched:sched_kthread_stop"}, + exTracepoints: []*perfTracepoint{ + { + subsystem: "sched", + event: "sched_kthread_stop", + }, + }, + }, + { + name: "valid multiple tracepoints", + flag: []string{"sched:sched_kthread_stop", "sched:sched_process_fork"}, + exTracepoints: []*perfTracepoint{ + { + subsystem: "sched", + event: "sched_kthread_stop", + }, + { + subsystem: "sched", + event: "sched_process_fork", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tracepoints, err := perfTracepointFlagToTracepoints(test.flag) + if test.errStr != "" { + if err != nil { + t.Fatal("expected error to not be nil") + } + if test.errStr != err.Error() { + t.Fatalf( + "expected error %q, got %q", + test.errStr, + err.Error(), + ) + } + return + } + if err != nil { + t.Fatal(err) + } + for i := range tracepoints { + if test.exTracepoints[i].event != tracepoints[i].event && + test.exTracepoints[i].subsystem != tracepoints[i].subsystem { + t.Fatalf( + "expected tracepoint %v, got %v", + test.exTracepoints[i], + tracepoints[i], + ) + } + } + }) + } +} diff --git a/collector/powersupplyclass.go b/collector/powersupplyclass.go new file mode 100644 index 0000000000..ffa4d4e61d --- /dev/null +++ b/collector/powersupplyclass.go @@ -0,0 +1,51 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopowersupplyclass && (linux || darwin) +// +build !nopowersupplyclass +// +build linux darwin + +package collector + +import ( + "regexp" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + powerSupplyClassIgnoredPowerSupplies = kingpin.Flag("collector.powersupply.ignored-supplies", "Regexp of power supplies to ignore for powersupplyclass collector.").Default("^$").String() +) + +type powerSupplyClassCollector struct { + subsystem string + ignoredPattern *regexp.Regexp + metricDescs map[string]*prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("powersupplyclass", defaultEnabled, NewPowerSupplyClassCollector) +} + +func NewPowerSupplyClassCollector(logger log.Logger) (Collector, error) { + pattern := regexp.MustCompile(*powerSupplyClassIgnoredPowerSupplies) + return &powerSupplyClassCollector{ + subsystem: "power_supply", + ignoredPattern: pattern, + metricDescs: map[string]*prometheus.Desc{}, + logger: logger, + }, nil +} diff --git a/collector/powersupplyclass_darwin.go b/collector/powersupplyclass_darwin.go new file mode 100644 index 0000000000..a070f64d19 --- /dev/null +++ b/collector/powersupplyclass_darwin.go @@ -0,0 +1,418 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopowersupplyclass +// +build !nopowersupplyclass + +package collector + +/* +#cgo LDFLAGS: -framework IOKit -framework CoreFoundation +#include +#include +#include + +// values collected from IOKit Power Source APIs +// Functions documentation available at +// https://developer.apple.com/documentation/iokit/iopowersources_h +// CFDictionary keys definition +// https://developer.apple.com/documentation/iokit/iopskeys_h/defines +struct macos_powersupply { + char *Name; + char *PowerSourceState; + char *Type; + char *TransportType; + char *BatteryHealth; + char *HardwareSerialNumber; + + int *PowerSourceID; + int *CurrentCapacity; + int *MaxCapacity; + int *DesignCapacity; + int *NominalCapacity; + + int *TimeToEmpty; + int *TimeToFullCharge; + + int *Voltage; + int *Current; + + int *Temperature; + + // boolean values + int *IsCharged; + int *IsCharging; + int *InternalFailure; + int *IsPresent; +}; + +int *CFDictionaryGetInt(CFDictionaryRef theDict, const void *key) { + CFNumberRef tmp; + int *value; + + tmp = CFDictionaryGetValue(theDict, key); + + if (tmp == NULL) + return NULL; + + value = (int*)malloc(sizeof(int)); + if (CFNumberGetValue(tmp, kCFNumberIntType, value)) { + return value; + } + + free(value); + return NULL; +} + +int *CFDictionaryGetBoolean(CFDictionaryRef theDict, const void *key) { + CFBooleanRef tmp; + int *value; + + tmp = CFDictionaryGetValue(theDict, key); + + if (tmp == NULL) + return NULL; + + value = (int*)malloc(sizeof(int)); + if (CFBooleanGetValue(tmp)) { + *value = 1; + } else { + *value = 0; + } + + return value; +} + +char *CFDictionaryGetSring(CFDictionaryRef theDict, const void *key) { + CFStringRef tmp; + CFIndex size; + char *value; + + tmp = CFDictionaryGetValue(theDict, key); + + if (tmp == NULL) + return NULL; + + size = CFStringGetLength(tmp) + 1; + value = (char*)malloc(size); + + if(CFStringGetCString(tmp, value, size, kCFStringEncodingUTF8)) { + return value; + } + + free(value); + return NULL; +} + +struct macos_powersupply* getPowerSupplyInfo(CFDictionaryRef powerSourceInformation) { + struct macos_powersupply *ret; + + if (powerSourceInformation == NULL) + return NULL; + + ret = (struct macos_powersupply*)malloc(sizeof(struct macos_powersupply)); + + ret->PowerSourceID = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSPowerSourceIDKey)); + ret->CurrentCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSCurrentCapacityKey)); + ret->MaxCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSMaxCapacityKey)); + ret->DesignCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSDesignCapacityKey)); + ret->NominalCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSNominalCapacityKey)); + ret->TimeToEmpty = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTimeToEmptyKey)); + ret->TimeToFullCharge = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTimeToFullChargeKey)); + ret->Voltage = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSVoltageKey)); + ret->Current = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSCurrentKey)); + ret->Temperature = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTemperatureKey)); + + ret->Name = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSNameKey)); + ret->PowerSourceState = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSPowerSourceStateKey)); + ret->Type = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSTypeKey)); + ret->TransportType = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSTransportTypeKey)); + ret->BatteryHealth = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSBatteryHealthKey)); + ret->HardwareSerialNumber = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSHardwareSerialNumberKey)); + + ret->IsCharged = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsChargedKey)); + ret->IsCharging = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsChargingKey)); + ret->InternalFailure = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSInternalFailureKey)); + ret->IsPresent = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsPresentKey)); + + return ret; +} + + + +void releasePowerSupply(struct macos_powersupply *ps) { + free(ps->Name); + free(ps->PowerSourceState); + free(ps->Type); + free(ps->TransportType); + free(ps->BatteryHealth); + free(ps->HardwareSerialNumber); + + free(ps->PowerSourceID); + free(ps->CurrentCapacity); + free(ps->MaxCapacity); + free(ps->DesignCapacity); + free(ps->NominalCapacity); + free(ps->TimeToEmpty); + free(ps->TimeToFullCharge); + free(ps->Voltage); + free(ps->Current); + free(ps->Temperature); + + free(ps->IsCharged); + free(ps->IsCharging); + free(ps->InternalFailure); + free(ps->IsPresent); + + free(ps); +} +*/ +import "C" + +import ( + "fmt" + "strconv" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { + psList, err := getPowerSourceList() + if err != nil { + return fmt.Errorf("couldn't get IOPPowerSourcesList: %w", err) + } + + for _, info := range psList { + labels := getPowerSourceDescriptorLabels(info) + powerSupplyName := labels["power_supply"] + + if c.ignoredPattern.MatchString(powerSupplyName) { + continue + } + + for name, value := range getPowerSourceDescriptorMap(info) { + if value == nil { + continue + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, name), + fmt.Sprintf("IOKit Power Source information field %s for .", name), + []string{"power_supply"}, nil, + ), + prometheus.GaugeValue, *value, powerSupplyName, + ) + } + + pushEnumMetric( + ch, + getPowerSourceDescriptorState(info), + "power_source_state", + c.subsystem, + powerSupplyName, + ) + + pushEnumMetric( + ch, + getPowerSourceDescriptorBatteryHealth(info), + "battery_health", + c.subsystem, + powerSupplyName, + ) + + var ( + keys []string + values []string + ) + for name, value := range labels { + if value != "" { + keys = append(keys, name) + values = append(values, value) + } + } + fieldDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "IOKit Power Source information for .", + keys, + nil, + ) + ch <- prometheus.MustNewConstMetric(fieldDesc, prometheus.GaugeValue, 1.0, values...) + + C.releasePowerSupply(info) + } + + return nil +} + +// getPowerSourceList fetches information from IOKit APIs +// +// Data is provided as opaque CoreFoundation references +// C.getPowerSupplyInfo will convert those objects in something +// easily manageable in Go. +// https://developer.apple.com/documentation/iokit/iopowersources_h +func getPowerSourceList() ([]*C.struct_macos_powersupply, error) { + infos, err := C.IOPSCopyPowerSourcesInfo() + if err != nil { + return nil, err + } + defer C.CFRelease(infos) + + psList, err := C.IOPSCopyPowerSourcesList(infos) + if err != nil { + return nil, err + } + + if psList == C.CFArrayRef(0) { + return nil, nil + } + defer C.CFRelease(C.CFTypeRef(psList)) + + size, err := C.CFArrayGetCount(psList) + if err != nil { + return nil, err + } + + ret := make([]*C.struct_macos_powersupply, size) + for i := C.CFIndex(0); i < size; i++ { + ps, err := C.CFArrayGetValueAtIndex(psList, i) + if err != nil { + return nil, err + } + + dict, err := C.IOPSGetPowerSourceDescription(infos, (C.CFTypeRef)(ps)) + if err != nil { + return nil, err + } + + info, err := C.getPowerSupplyInfo(dict) + if err != nil { + return nil, err + } + + ret[int(i)] = info + } + + return ret, nil +} + +func getPowerSourceDescriptorMap(info *C.struct_macos_powersupply) map[string]*float64 { + return map[string]*float64{ + "current_capacity": convertValue(info.CurrentCapacity), + "max_capacity": convertValue(info.MaxCapacity), + "design_capacity": convertValue(info.DesignCapacity), + "nominal_capacity": convertValue(info.NominalCapacity), + "time_to_empty_seconds": minutesToSeconds(info.TimeToEmpty), + "time_to_full_seconds": minutesToSeconds(info.TimeToFullCharge), + "voltage_volt": scaleValue(info.Voltage, 1e3), + "current_ampere": scaleValue(info.Current, 1e3), + "temp_celsius": convertValue(info.Temperature), + "present": convertValue(info.IsPresent), + "charging": convertValue(info.IsCharging), + "charged": convertValue(info.IsCharged), + "internal_failure": convertValue(info.InternalFailure), + } +} + +func getPowerSourceDescriptorLabels(info *C.struct_macos_powersupply) map[string]string { + return map[string]string{ + "id": strconv.FormatInt(int64(*info.PowerSourceID), 10), + "power_supply": C.GoString(info.Name), + "type": C.GoString(info.Type), + "transport_type": C.GoString(info.TransportType), + "serial_number": C.GoString(info.HardwareSerialNumber), + } +} + +func getPowerSourceDescriptorState(info *C.struct_macos_powersupply) map[string]float64 { + stateMap := map[string]float64{ + "Off Line": 0, + "AC Power": 0, + "Battery Power": 0, + } + + // This field is always present + // https://developer.apple.com/documentation/iokit/kiopspowersourcestatekey + stateMap[C.GoString(info.PowerSourceState)] = 1 + + return stateMap +} + +func getPowerSourceDescriptorBatteryHealth(info *C.struct_macos_powersupply) map[string]float64 { + // This field is optional + // https://developer.apple.com/documentation/iokit/kiopsBatteryHealthkey + if info.BatteryHealth == nil { + return nil + } + + stateMap := map[string]float64{ + "Good": 0, + "Fair": 0, + "Poor": 0, + } + + stateMap[C.GoString(info.BatteryHealth)] = 1 + + return stateMap +} + +func convertValue(value *C.int) *float64 { + if value == nil { + return nil + } + + ret := new(float64) + *ret = (float64)(*value) + return ret +} + +func scaleValue(value *C.int, scale float64) *float64 { + ret := convertValue(value) + if ret == nil { + return nil + } + + *ret /= scale + + return ret +} + +// minutesToSeconds converts *C.int minutes into *float64 seconds. +// +// Only positive values will be scaled to seconds, because negative ones +// have special meanings. I.e. -1 indicates "Still Calculating the Time" +func minutesToSeconds(minutes *C.int) *float64 { + ret := convertValue(minutes) + if ret == nil { + return nil + } + + if *ret > 0 { + *ret *= 60 + } + + return ret +} + +func pushEnumMetric(ch chan<- prometheus.Metric, values map[string]float64, name, subsystem, powerSupply string) { + for state, value := range values { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, name), + fmt.Sprintf("IOKit Power Source information field %s for .", name), + []string{"power_supply", "state"}, nil, + ), + prometheus.GaugeValue, value, powerSupply, state, + ) + } +} diff --git a/collector/powersupplyclass_linux.go b/collector/powersupplyclass_linux.go new file mode 100644 index 0000000000..86e81fb6a2 --- /dev/null +++ b/collector/powersupplyclass_linux.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopowersupplyclass +// +build !nopowersupplyclass + +package collector + +import ( + "errors" + "fmt" + "os" + "regexp" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { + powerSupplyClass, err := getPowerSupplyClassInfo(c.ignoredPattern) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return ErrNoData + } + return fmt.Errorf("could not get power_supply class info: %w", err) + } + for _, powerSupply := range powerSupplyClass { + + for name, value := range map[string]*int64{ + "authentic": powerSupply.Authentic, + "calibrate": powerSupply.Calibrate, + "capacity": powerSupply.Capacity, + "capacity_alert_max": powerSupply.CapacityAlertMax, + "capacity_alert_min": powerSupply.CapacityAlertMin, + "cyclecount": powerSupply.CycleCount, + "online": powerSupply.Online, + "present": powerSupply.Present, + "time_to_empty_seconds": powerSupply.TimeToEmptyNow, + "time_to_full_seconds": powerSupply.TimeToFullNow, + } { + if value != nil { + pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value), powerSupply.Name, prometheus.GaugeValue) + } + } + + for name, value := range map[string]*int64{ + "current_boot": powerSupply.CurrentBoot, + "current_max": powerSupply.CurrentMax, + "current_ampere": powerSupply.CurrentNow, + "energy_empty": powerSupply.EnergyEmpty, + "energy_empty_design": powerSupply.EnergyEmptyDesign, + "energy_full": powerSupply.EnergyFull, + "energy_full_design": powerSupply.EnergyFullDesign, + "energy_watthour": powerSupply.EnergyNow, + "voltage_boot": powerSupply.VoltageBoot, + "voltage_max": powerSupply.VoltageMax, + "voltage_max_design": powerSupply.VoltageMaxDesign, + "voltage_min": powerSupply.VoltageMin, + "voltage_min_design": powerSupply.VoltageMinDesign, + "voltage_volt": powerSupply.VoltageNow, + "voltage_ocv": powerSupply.VoltageOCV, + "charge_control_limit": powerSupply.ChargeControlLimit, + "charge_control_limit_max": powerSupply.ChargeControlLimitMax, + "charge_counter": powerSupply.ChargeCounter, + "charge_empty": powerSupply.ChargeEmpty, + "charge_empty_design": powerSupply.ChargeEmptyDesign, + "charge_full": powerSupply.ChargeFull, + "charge_full_design": powerSupply.ChargeFullDesign, + "charge_ampere": powerSupply.ChargeNow, + "charge_term_current": powerSupply.ChargeTermCurrent, + "constant_charge_current": powerSupply.ConstantChargeCurrent, + "constant_charge_current_max": powerSupply.ConstantChargeCurrentMax, + "constant_charge_voltage": powerSupply.ConstantChargeVoltage, + "constant_charge_voltage_max": powerSupply.ConstantChargeVoltageMax, + "precharge_current": powerSupply.PrechargeCurrent, + "input_current_limit": powerSupply.InputCurrentLimit, + "power_watt": powerSupply.PowerNow, + } { + if value != nil { + pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/1e6, powerSupply.Name, prometheus.GaugeValue) + } + } + + for name, value := range map[string]*int64{ + "temp_celsius": powerSupply.Temp, + "temp_alert_max_celsius": powerSupply.TempAlertMax, + "temp_alert_min_celsius": powerSupply.TempAlertMin, + "temp_ambient_celsius": powerSupply.TempAmbient, + "temp_ambient_max_celsius": powerSupply.TempAmbientMax, + "temp_ambient_min_celsius": powerSupply.TempAmbientMin, + "temp_max_celsius": powerSupply.TempMax, + "temp_min_celsius": powerSupply.TempMin, + } { + if value != nil { + pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/10.0, powerSupply.Name, prometheus.GaugeValue) + } + } + + var ( + keys []string + values []string + ) + for name, value := range map[string]string{ + "power_supply": powerSupply.Name, + "capacity_level": powerSupply.CapacityLevel, + "charge_type": powerSupply.ChargeType, + "health": powerSupply.Health, + "manufacturer": powerSupply.Manufacturer, + "model_name": powerSupply.ModelName, + "serial_number": powerSupply.SerialNumber, + "status": powerSupply.Status, + "technology": powerSupply.Technology, + "type": powerSupply.Type, + "usb_type": powerSupply.UsbType, + "scope": powerSupply.Scope, + } { + if value != "" { + keys = append(keys, name) + values = append(values, strings.ToValidUTF8(value, "�")) + } + } + + fieldDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "info of /sys/class/power_supply/.", + keys, + nil, + ) + ch <- prometheus.MustNewConstMetric(fieldDesc, prometheus.GaugeValue, 1.0, values...) + + } + + return nil +} + +func pushPowerSupplyMetric(ch chan<- prometheus.Metric, subsystem string, name string, value float64, powerSupplyName string, valueType prometheus.ValueType) { + fieldDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, name), + fmt.Sprintf("%s value of /sys/class/power_supply/.", name), + []string{"power_supply"}, + nil, + ) + + ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, value, powerSupplyName) +} + +func getPowerSupplyClassInfo(ignore *regexp.Regexp) (sysfs.PowerSupplyClass, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, err + } + powerSupplyClass, err := fs.PowerSupplyClass() + + if err != nil { + return powerSupplyClass, fmt.Errorf("error obtaining power_supply class info: %w", err) + } + + for device := range powerSupplyClass { + if ignore.MatchString(device) { + delete(powerSupplyClass, device) + } + } + + return powerSupplyClass, nil +} diff --git a/collector/pressure_linux.go b/collector/pressure_linux.go new file mode 100644 index 0000000000..ceaced7eef --- /dev/null +++ b/collector/pressure_linux.go @@ -0,0 +1,120 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nopressure +// +build !nopressure + +package collector + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +var ( + psiResources = []string{"cpu", "io", "memory"} +) + +type pressureStatsCollector struct { + cpu *prometheus.Desc + io *prometheus.Desc + ioFull *prometheus.Desc + mem *prometheus.Desc + memFull *prometheus.Desc + + fs procfs.FS + + logger log.Logger +} + +func init() { + registerCollector("pressure", defaultEnabled, NewPressureStatsCollector) +} + +// NewPressureStatsCollector returns a Collector exposing pressure stall information +func NewPressureStatsCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &pressureStatsCollector{ + cpu: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "pressure", "cpu_waiting_seconds_total"), + "Total time in seconds that processes have waited for CPU time", + nil, nil, + ), + io: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "pressure", "io_waiting_seconds_total"), + "Total time in seconds that processes have waited due to IO congestion", + nil, nil, + ), + ioFull: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "pressure", "io_stalled_seconds_total"), + "Total time in seconds no process could make progress due to IO congestion", + nil, nil, + ), + mem: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "pressure", "memory_waiting_seconds_total"), + "Total time in seconds that processes have waited for memory", + nil, nil, + ), + memFull: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "pressure", "memory_stalled_seconds_total"), + "Total time in seconds no process could make progress due to memory congestion", + nil, nil, + ), + fs: fs, + logger: logger, + }, nil +} + +// Update calls procfs.NewPSIStatsForResource for the different resources and updates the values +func (c *pressureStatsCollector) Update(ch chan<- prometheus.Metric) error { + for _, res := range psiResources { + level.Debug(c.logger).Log("msg", "collecting statistics for resource", "resource", res) + vals, err := c.fs.PSIStatsForResource(res) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel") + return ErrNoData + } + if errors.Is(err, syscall.ENOTSUP) { + level.Debug(c.logger).Log("msg", "pressure information is disabled, add psi=1 kernel command line to enable it") + return ErrNoData + } + return fmt.Errorf("failed to retrieve pressure stats: %w", err) + } + switch res { + case "cpu": + ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) + case "io": + ch <- prometheus.MustNewConstMetric(c.io, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) + ch <- prometheus.MustNewConstMetric(c.ioFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0) + case "memory": + ch <- prometheus.MustNewConstMetric(c.mem, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) + ch <- prometheus.MustNewConstMetric(c.memFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0) + default: + level.Debug(c.logger).Log("msg", "did not account for resource", "resource", res) + } + } + + return nil +} diff --git a/collector/processes_linux.go b/collector/processes_linux.go new file mode 100644 index 0000000000..798aeaeb30 --- /dev/null +++ b/collector/processes_linux.go @@ -0,0 +1,194 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noprocesses +// +build !noprocesses + +package collector + +import ( + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "syscall" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type processCollector struct { + fs procfs.FS + threadAlloc *prometheus.Desc + threadLimit *prometheus.Desc + threadsState *prometheus.Desc + procsState *prometheus.Desc + pidUsed *prometheus.Desc + pidMax *prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("processes", defaultDisabled, NewProcessStatCollector) +} + +// NewProcessStatCollector returns a new Collector exposing process data read from the proc filesystem. +func NewProcessStatCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + subsystem := "processes" + return &processCollector{ + fs: fs, + threadAlloc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "threads"), + "Allocated threads in system", + nil, nil, + ), + threadLimit: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "max_threads"), + "Limit of threads in the system", + nil, nil, + ), + threadsState: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "threads_state"), + "Number of threads in each state.", + []string{"thread_state"}, nil, + ), + procsState: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "state"), + "Number of processes in each state.", + []string{"state"}, nil, + ), + pidUsed: prometheus.NewDesc(prometheus.BuildFQName(namespace, subsystem, "pids"), + "Number of PIDs", nil, nil, + ), + pidMax: prometheus.NewDesc(prometheus.BuildFQName(namespace, subsystem, "max_processes"), + "Number of max PIDs limit", nil, nil, + ), + logger: logger, + }, nil +} +func (c *processCollector) Update(ch chan<- prometheus.Metric) error { + pids, states, threads, threadStates, err := c.getAllocatedThreads() + if err != nil { + return fmt.Errorf("unable to retrieve number of allocated threads: %w", err) + } + + ch <- prometheus.MustNewConstMetric(c.threadAlloc, prometheus.GaugeValue, float64(threads)) + maxThreads, err := readUintFromFile(procFilePath("sys/kernel/threads-max")) + if err != nil { + return fmt.Errorf("unable to retrieve limit number of threads: %w", err) + } + ch <- prometheus.MustNewConstMetric(c.threadLimit, prometheus.GaugeValue, float64(maxThreads)) + + for state := range states { + ch <- prometheus.MustNewConstMetric(c.procsState, prometheus.GaugeValue, float64(states[state]), state) + } + + for state := range threadStates { + ch <- prometheus.MustNewConstMetric(c.threadsState, prometheus.GaugeValue, float64(threadStates[state]), state) + } + + pidM, err := readUintFromFile(procFilePath("sys/kernel/pid_max")) + if err != nil { + return fmt.Errorf("unable to retrieve limit number of maximum pids alloved: %w", err) + } + ch <- prometheus.MustNewConstMetric(c.pidUsed, prometheus.GaugeValue, float64(pids)) + ch <- prometheus.MustNewConstMetric(c.pidMax, prometheus.GaugeValue, float64(pidM)) + + return nil +} + +func (c *processCollector) getAllocatedThreads() (int, map[string]int32, int, map[string]int32, error) { + p, err := c.fs.AllProcs() + if err != nil { + return 0, nil, 0, nil, fmt.Errorf("unable to list all processes: %w", err) + } + pids := 0 + thread := 0 + procStates := make(map[string]int32) + threadStates := make(map[string]int32) + + for _, pid := range p { + stat, err := pid.Stat() + if err != nil { + // PIDs can vanish between getting the list and getting stats. + if c.isIgnoredError(err) { + level.Debug(c.logger).Log("msg", "file not found when retrieving stats for pid", "pid", pid.PID, "err", err) + continue + } + level.Debug(c.logger).Log("msg", "error reading stat for pid", "pid", pid.PID, "err", err) + return 0, nil, 0, nil, fmt.Errorf("error reading stat for pid %d: %w", pid.PID, err) + } + pids++ + procStates[stat.State]++ + thread += stat.NumThreads + err = c.getThreadStates(pid.PID, stat, threadStates) + if err != nil { + return 0, nil, 0, nil, err + } + } + return pids, procStates, thread, threadStates, nil +} + +func (c *processCollector) getThreadStates(pid int, pidStat procfs.ProcStat, threadStates map[string]int32) error { + fs, err := procfs.NewFS(procFilePath(path.Join(strconv.Itoa(pid), "task"))) + if err != nil { + if c.isIgnoredError(err) { + level.Debug(c.logger).Log("msg", "file not found when retrieving tasks for pid", "pid", pid, "err", err) + return nil + } + level.Debug(c.logger).Log("msg", "error reading tasks for pid", "pid", pid, "err", err) + return fmt.Errorf("error reading task for pid %d: %w", pid, err) + } + + t, err := fs.AllProcs() + if err != nil { + if c.isIgnoredError(err) { + level.Debug(c.logger).Log("msg", "file not found when retrieving tasks for pid", "pid", pid, "err", err) + return nil + } + return fmt.Errorf("unable to list all threads for pid: %d %w", pid, err) + } + + for _, thread := range t { + if pid == thread.PID { + threadStates[pidStat.State]++ + continue + } + threadStat, err := thread.Stat() + if err != nil { + if c.isIgnoredError(err) { + level.Debug(c.logger).Log("msg", "file not found when retrieving stats for thread", "pid", pid, "threadId", thread.PID, "err", err) + continue + } + level.Debug(c.logger).Log("msg", "error reading stat for thread", "pid", pid, "threadId", thread.PID, "err", err) + return fmt.Errorf("error reading stat for pid:%d thread:%d err:%w", pid, thread.PID, err) + } + threadStates[threadStat.State]++ + } + return nil +} + +func (c *processCollector) isIgnoredError(err error) bool { + if errors.Is(err, os.ErrNotExist) || strings.Contains(err.Error(), syscall.ESRCH.Error()) { + return true + } + return false +} diff --git a/collector/processes_linux_test.go b/collector/processes_linux_test.go new file mode 100644 index 0000000000..9a5c86f5be --- /dev/null +++ b/collector/processes_linux_test.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noprocesses +// +build !noprocesses + +package collector + +import ( + "testing" + + "github.com/go-kit/log" + "github.com/prometheus/procfs" + kingpin "gopkg.in/alecthomas/kingpin.v2" +) + +func TestReadProcessStatus(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "fixtures/proc"}); err != nil { + t.Fatal(err) + } + want := 1 + fs, err := procfs.NewFS(*procPath) + if err != nil { + t.Errorf("failed to open procfs: %v", err) + } + c := processCollector{fs: fs, logger: log.NewNopLogger()} + pids, states, threads, _, err := c.getAllocatedThreads() + if err != nil { + t.Fatalf("Cannot retrieve data from procfs getAllocatedThreads function: %v ", err) + } + if threads < want { + t.Fatalf("Current threads: %d Shouldn't be less than wanted %d", threads, want) + } + if states == nil { + + t.Fatalf("Process states cannot be nil %v:", states) + } + maxPid, err := readUintFromFile(procFilePath("sys/kernel/pid_max")) + if err != nil { + t.Fatalf("Unable to retrieve limit number of maximum pids alloved %v\n", err) + } + if uint64(pids) > maxPid || pids == 0 { + t.Fatalf("Total running pids cannot be greater than %d or equals to 0", maxPid) + } +} diff --git a/collector/qdisc_linux.go b/collector/qdisc_linux.go index d96e7bd446..3050318ab4 100644 --- a/collector/qdisc_linux.go +++ b/collector/qdisc_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noqdisc // +build !noqdisc package collector @@ -21,6 +22,7 @@ import ( "path/filepath" "github.com/ema/qdisc" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) @@ -31,6 +33,9 @@ type qdiscStatCollector struct { drops typedDesc requeues typedDesc overlimits typedDesc + qlength typedDesc + backlog typedDesc + logger log.Logger } var ( @@ -41,7 +46,8 @@ func init() { registerCollector("qdisc", defaultDisabled, NewQdiscStatCollector) } -func NewQdiscStatCollector() (Collector, error) { +// NewQdiscStatCollector returns a new Collector exposing queuing discipline statistics. +func NewQdiscStatCollector(logger log.Logger) (Collector, error) { return &qdiscStatCollector{ bytes: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "bytes_total"), @@ -68,6 +74,17 @@ func NewQdiscStatCollector() (Collector, error) { "Number of overlimit packets.", []string{"device", "kind"}, nil, ), prometheus.CounterValue}, + qlength: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, "qdisc", "current_queue_length"), + "Number of packets currently in queue to be sent.", + []string{"device", "kind"}, nil, + ), prometheus.GaugeValue}, + backlog: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, "qdisc", "backlog"), + "Number of bytes currently in queue to be sent.", + []string{"device", "kind"}, nil, + ), prometheus.GaugeValue}, + logger: logger, }, nil } @@ -110,6 +127,8 @@ func (c *qdiscStatCollector) Update(ch chan<- prometheus.Metric) error { ch <- c.drops.mustNewConstMetric(float64(msg.Drops), msg.IfaceName, msg.Kind) ch <- c.requeues.mustNewConstMetric(float64(msg.Requeues), msg.IfaceName, msg.Kind) ch <- c.overlimits.mustNewConstMetric(float64(msg.Overlimits), msg.IfaceName, msg.Kind) + ch <- c.qlength.mustNewConstMetric(float64(msg.Qlen), msg.IfaceName, msg.Kind) + ch <- c.backlog.mustNewConstMetric(float64(msg.Backlog), msg.IfaceName, msg.Kind) } return nil diff --git a/collector/rapl_linux.go b/collector/rapl_linux.go new file mode 100644 index 0000000000..5ed343bbe8 --- /dev/null +++ b/collector/rapl_linux.go @@ -0,0 +1,97 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !norapl +// +build !norapl + +package collector + +import ( + "errors" + "fmt" + "os" + "strconv" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +type raplCollector struct { + fs sysfs.FS + logger log.Logger +} + +func init() { + registerCollector("rapl", defaultEnabled, NewRaplCollector) +} + +// NewRaplCollector returns a new Collector exposing RAPL metrics. +func NewRaplCollector(logger log.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + + if err != nil { + return nil, err + } + + collector := raplCollector{ + fs: fs, + logger: logger, + } + return &collector, nil +} + +// Update implements Collector and exposes RAPL related metrics. +func (c *raplCollector) Update(ch chan<- prometheus.Metric) error { + // nil zones are fine when platform doesn't have powercap files present. + zones, err := sysfs.GetRaplZones(c.fs) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "Platform doesn't have powercap files present", "err", err) + return ErrNoData + } + if errors.Is(err, os.ErrPermission) { + level.Debug(c.logger).Log("msg", "Can't access powercap files", "err", err) + return ErrNoData + } + return fmt.Errorf("failed to retrieve rapl stats: %w", err) + } + + for _, rz := range zones { + newMicrojoules, err := rz.GetEnergyMicrojoules() + if err != nil { + if errors.Is(err, os.ErrPermission) { + level.Debug(c.logger).Log("msg", "Can't access energy_uj file", "zone", rz, "err", err) + return ErrNoData + } + return err + } + index := strconv.Itoa(rz.Index) + + descriptor := prometheus.NewDesc( + prometheus.BuildFQName(namespace, "rapl", rz.Name+"_joules_total"), + "Current RAPL "+rz.Name+" value in joules", + []string{"index", "path"}, nil, + ) + + ch <- prometheus.MustNewConstMetric( + descriptor, + prometheus.CounterValue, + float64(newMicrojoules)/1000000.0, + index, + rz.Path, + ) + } + return nil +} diff --git a/collector/runit.go b/collector/runit.go index 2608e1bbc6..2449691dbb 100644 --- a/collector/runit.go +++ b/collector/runit.go @@ -11,13 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !norunit // +build !norunit package collector import ( + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "github.com/soundcloud/go-runit/runit" "gopkg.in/alecthomas/kingpin.v2" ) @@ -25,7 +27,11 @@ import ( var runitServiceDir = kingpin.Flag("collector.runit.servicedir", "Path to runit service directory.").Default("/etc/service").String() type runitCollector struct { - state, stateDesired, stateNormal, stateTimestamp typedDesc + state typedDesc + stateDesired typedDesc + stateNormal typedDesc + stateTimestamp typedDesc + logger log.Logger } func init() { @@ -33,7 +39,7 @@ func init() { } // NewRunitCollector returns a new Collector exposing runit statistics. -func NewRunitCollector() (Collector, error) { +func NewRunitCollector(logger log.Logger) (Collector, error) { var ( subsystem = "service" constLabels = prometheus.Labels{"supervisor": "runit"} @@ -61,6 +67,7 @@ func NewRunitCollector() (Collector, error) { "Unix timestamp of the last runit service state change.", labelNames, constLabels, ), prometheus.GaugeValue}, + logger: logger, }, nil } @@ -73,11 +80,11 @@ func (c *runitCollector) Update(ch chan<- prometheus.Metric) error { for _, service := range services { status, err := service.Status() if err != nil { - log.Debugf("Couldn't get status for %s: %s, skipping...", service.Name, err) + level.Debug(c.logger).Log("msg", "Couldn't get status", "service", service.Name, "err", err) continue } - log.Debugf("%s is %d on pid %d for %d seconds", service.Name, status.State, status.Pid, status.Duration) + level.Debug(c.logger).Log("msg", "duration", "service", service.Name, "status", status.State, "pid", status.Pid, "duration_seconds", status.Duration) ch <- c.state.mustNewConstMetric(float64(status.State), service.Name) ch <- c.stateDesired.mustNewConstMetric(float64(status.Want), service.Name) ch <- c.stateTimestamp.mustNewConstMetric(float64(status.Timestamp.Unix()), service.Name) diff --git a/collector/schedstat_linux.go b/collector/schedstat_linux.go new file mode 100644 index 0000000000..e5016561da --- /dev/null +++ b/collector/schedstat_linux.go @@ -0,0 +1,108 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noshedstat +// +build !noshedstat + +package collector + +import ( + "errors" + "fmt" + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +const nsPerSec = 1e9 + +var ( + runningSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "schedstat", "running_seconds_total"), + "Number of seconds CPU spent running a process.", + []string{"cpu"}, + nil, + ) + + waitingSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "schedstat", "waiting_seconds_total"), + "Number of seconds spent by processing waiting for this CPU.", + []string{"cpu"}, + nil, + ) + + timeslicesTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "schedstat", "timeslices_total"), + "Number of timeslices executed by CPU.", + []string{"cpu"}, + nil, + ) +) + +// NewSchedstatCollector returns a new Collector exposing task scheduler statistics +func NewSchedstatCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &schedstatCollector{fs, logger}, nil +} + +type schedstatCollector struct { + fs procfs.FS + logger log.Logger +} + +func init() { + registerCollector("schedstat", defaultEnabled, NewSchedstatCollector) +} + +func (c *schedstatCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := c.fs.Schedstat() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "schedstat file does not exist") + return ErrNoData + } + return err + } + + for _, cpu := range stats.CPUs { + ch <- prometheus.MustNewConstMetric( + runningSecondsTotal, + prometheus.CounterValue, + float64(cpu.RunningNanoseconds)/nsPerSec, + cpu.CPUNum, + ) + + ch <- prometheus.MustNewConstMetric( + waitingSecondsTotal, + prometheus.CounterValue, + float64(cpu.WaitingNanoseconds)/nsPerSec, + cpu.CPUNum, + ) + + ch <- prometheus.MustNewConstMetric( + timeslicesTotal, + prometheus.CounterValue, + float64(cpu.RunTimeslices), + cpu.CPUNum, + ) + } + + return nil +} diff --git a/collector/sockstat_linux.go b/collector/sockstat_linux.go index 92847be4ec..480c83b1da 100644 --- a/collector/sockstat_linux.go +++ b/collector/sockstat_linux.go @@ -11,19 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nosockstat // +build !nosockstat package collector import ( - "bufio" + "errors" "fmt" - "io" "os" - "strconv" - "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" ) const ( @@ -33,90 +34,152 @@ const ( // Used for calculating the total memory bytes on TCP and UDP. var pageSize = os.Getpagesize() -type sockStatCollector struct{} +type sockStatCollector struct { + logger log.Logger +} func init() { registerCollector(sockStatSubsystem, defaultEnabled, NewSockStatCollector) } // NewSockStatCollector returns a new Collector exposing socket stats. -func NewSockStatCollector() (Collector, error) { - return &sockStatCollector{}, nil +func NewSockStatCollector(logger log.Logger) (Collector, error) { + return &sockStatCollector{logger}, nil } func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error { - sockStats, err := getSockStats(procFilePath("net/sockstat")) + fs, err := procfs.NewFS(*procPath) if err != nil { - return fmt.Errorf("couldn't get sockstats: %s", err) + return fmt.Errorf("failed to open procfs: %w", err) } - for protocol, protocolStats := range sockStats { - for name, value := range protocolStats { - v, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("invalid value %s in sockstats: %s", value, err) - } - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(namespace, sockStatSubsystem, protocol+"_"+name), - fmt.Sprintf("Number of %s sockets in state %s.", protocol, name), - nil, nil, - ), - prometheus.GaugeValue, v, - ) - } + + // If IPv4 and/or IPv6 are disabled on this kernel, handle it gracefully. + stat4, err := fs.NetSockstat() + switch { + case err == nil: + case errors.Is(err, os.ErrNotExist): + level.Debug(c.logger).Log("msg", "IPv4 sockstat statistics not found, skipping") + default: + return fmt.Errorf("failed to get IPv4 sockstat data: %w", err) } - return err -} -func getSockStats(fileName string) (map[string]map[string]string, error) { - file, err := os.Open(fileName) - if err != nil { - return nil, err + stat6, err := fs.NetSockstat6() + switch { + case err == nil: + case errors.Is(err, os.ErrNotExist): + level.Debug(c.logger).Log("msg", "IPv6 sockstat statistics not found, skipping") + default: + return fmt.Errorf("failed to get IPv6 sockstat data: %w", err) + } + + stats := []struct { + isIPv6 bool + stat *procfs.NetSockstat + }{ + { + stat: stat4, + }, + { + isIPv6: true, + stat: stat6, + }, + } + + for _, s := range stats { + c.update(ch, s.isIPv6, s.stat) } - defer file.Close() - return parseSockStats(file, fileName) + return nil } -func parseSockStats(r io.Reader, fileName string) (map[string]map[string]string, error) { - var ( - sockStat = map[string]map[string]string{} - scanner = bufio.NewScanner(r) - ) - - for scanner.Scan() { - line := strings.Split(scanner.Text(), " ") - // Remove trailing ':'. - protocol := line[0][:len(line[0])-1] - sockStat[protocol] = map[string]string{} - - for i := 1; i < len(line) && i+1 < len(line); i++ { - sockStat[protocol][line[i]] = line[i+1] - i++ - } +func (c *sockStatCollector) update(ch chan<- prometheus.Metric, isIPv6 bool, s *procfs.NetSockstat) { + if s == nil { + // IPv6 disabled or similar; nothing to do. + return } - if err := scanner.Err(); err != nil { - return nil, err + + // If sockstat contains the number of used sockets, export it. + if !isIPv6 && s.Used != nil { + // TODO: this must be updated if sockstat6 ever exports this data. + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, sockStatSubsystem, "sockets_used"), + "Number of IPv4 sockets in use.", + nil, + nil, + ), + prometheus.GaugeValue, + float64(*s.Used), + ) } - // The mem metrics is the count of pages used. Multiply the mem metrics by - // the page size from the kernel to get the number of bytes used. - // - // Update the TCP mem from page count to bytes. - pageCount, err := strconv.Atoi(sockStat["TCP"]["mem"]) - if err != nil { - return nil, fmt.Errorf("invalid value %s in sockstats: %s", sockStat["TCP"]["mem"], err) + // A name and optional value for a sockstat metric. + type ssPair struct { + name string + v *int } - sockStat["TCP"]["mem_bytes"] = strconv.Itoa(pageCount * pageSize) - // Update the UDP mem from page count to bytes. - if udpMem := sockStat["UDP"]["mem"]; udpMem != "" { - pageCount, err = strconv.Atoi(udpMem) - if err != nil { - return nil, fmt.Errorf("invalid value %s in sockstats: %s", sockStat["UDP"]["mem"], err) + // Previously these metric names were generated directly from the file output. + // In order to keep the same level of compatibility, we must map the fields + // to their correct names. + for _, p := range s.Protocols { + pairs := []ssPair{ + { + name: "inuse", + v: &p.InUse, + }, + { + name: "orphan", + v: p.Orphan, + }, + { + name: "tw", + v: p.TW, + }, + { + name: "alloc", + v: p.Alloc, + }, + { + name: "mem", + v: p.Mem, + }, + { + name: "memory", + v: p.Memory, + }, } - sockStat["UDP"]["mem_bytes"] = strconv.Itoa(pageCount * pageSize) - } - return sockStat, nil + // Also export mem_bytes values for sockets which have a mem value + // stored in pages. + if p.Mem != nil { + v := *p.Mem * pageSize + pairs = append(pairs, ssPair{ + name: "mem_bytes", + v: &v, + }) + } + + for _, pair := range pairs { + if pair.v == nil { + // This value is not set for this protocol; nothing to do. + continue + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + sockStatSubsystem, + fmt.Sprintf("%s_%s", p.Protocol, pair.name), + ), + fmt.Sprintf("Number of %s sockets in state %s.", p.Protocol, pair.name), + nil, + nil, + ), + prometheus.GaugeValue, + float64(*pair.v), + ) + } + } } diff --git a/collector/sockstat_linux_test.go b/collector/sockstat_linux_test.go deleted file mode 100644 index 70bedba900..0000000000 --- a/collector/sockstat_linux_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "os" - "strconv" - "testing" -) - -func TestSockStats(t *testing.T) { - testSockStats(t, "fixtures/proc/net/sockstat") - testSockStats(t, "fixtures/proc/net/sockstat_rhe4") -} - -func testSockStats(t *testing.T, fixture string) { - file, err := os.Open(fixture) - if err != nil { - t.Fatal(err) - } - - defer file.Close() - - sockStats, err := parseSockStats(file, fixture) - if err != nil { - t.Fatal(err) - } - - if want, got := "229", sockStats["sockets"]["used"]; want != got { - t.Errorf("want sockstat sockets used %s, got %s", want, got) - } - - if want, got := "4", sockStats["TCP"]["tw"]; want != got { - t.Errorf("want sockstat TCP tw %s, got %s", want, got) - } - - if want, got := "17", sockStats["TCP"]["alloc"]; want != got { - t.Errorf("want sockstat TCP alloc %s, got %s", want, got) - } - - // The test file has 1 for TCP mem, which is one page. So we should get the - // page size in bytes back from sockstat_linux. We get the page size from - // os here because this value can change from system to system. The value is - // 4096 by default from linux 2.4 onward. - if want, got := strconv.Itoa(os.Getpagesize()), sockStats["TCP"]["mem_bytes"]; want != got { - t.Errorf("want sockstat TCP mem_bytes %s, got %s", want, got) - } -} diff --git a/collector/softnet_linux.go b/collector/softnet_linux.go new file mode 100644 index 0000000000..a46b6808be --- /dev/null +++ b/collector/softnet_linux.go @@ -0,0 +1,103 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nosoftnet +// +build !nosoftnet + +package collector + +import ( + "fmt" + "strconv" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type softnetCollector struct { + fs procfs.FS + processed *prometheus.Desc + dropped *prometheus.Desc + timeSqueezed *prometheus.Desc + logger log.Logger +} + +const ( + softnetSubsystem = "softnet" +) + +func init() { + registerCollector("softnet", defaultEnabled, NewSoftnetCollector) +} + +// NewSoftnetCollector returns a new Collector exposing softnet metrics. +func NewSoftnetCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + + return &softnetCollector{ + fs: fs, + processed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, softnetSubsystem, "processed_total"), + "Number of processed packets", + []string{"cpu"}, nil, + ), + dropped: prometheus.NewDesc( + prometheus.BuildFQName(namespace, softnetSubsystem, "dropped_total"), + "Number of dropped packets", + []string{"cpu"}, nil, + ), + timeSqueezed: prometheus.NewDesc( + prometheus.BuildFQName(namespace, softnetSubsystem, "times_squeezed_total"), + "Number of times processing packets ran out of quota", + []string{"cpu"}, nil, + ), + logger: logger, + }, nil +} + +// Update gets parsed softnet statistics using procfs. +func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error { + stats, err := c.fs.NetSoftnetStat() + if err != nil { + return fmt.Errorf("could not get softnet statistics: %w", err) + } + + for cpuNumber, cpuStats := range stats { + cpu := strconv.Itoa(cpuNumber) + + ch <- prometheus.MustNewConstMetric( + c.processed, + prometheus.CounterValue, + float64(cpuStats.Processed), + cpu, + ) + ch <- prometheus.MustNewConstMetric( + c.dropped, + prometheus.CounterValue, + float64(cpuStats.Dropped), + cpu, + ) + ch <- prometheus.MustNewConstMetric( + c.timeSqueezed, + prometheus.CounterValue, + float64(cpuStats.TimeSqueezed), + cpu, + ) + } + + return nil +} diff --git a/collector/stat_linux.go b/collector/stat_linux.go index 6a0e33f1ce..941c5f233b 100644 --- a/collector/stat_linux.go +++ b/collector/stat_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nostat // +build !nostat package collector @@ -18,19 +19,20 @@ package collector import ( "fmt" - "github.com/prometheus/procfs" - + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" ) type statCollector struct { - cpu *prometheus.Desc + fs procfs.FS intr *prometheus.Desc ctxt *prometheus.Desc forks *prometheus.Desc btime *prometheus.Desc procsRunning *prometheus.Desc procsBlocked *prometheus.Desc + logger log.Logger } func init() { @@ -38,30 +40,30 @@ func init() { } // NewStatCollector returns a new Collector exposing kernel/system statistics. -func NewStatCollector() (Collector, error) { +func NewStatCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } return &statCollector{ - cpu: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "cpu"), - "Seconds the cpus spent in each mode.", - []string{"cpu", "mode"}, nil, - ), + fs: fs, intr: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "intr"), + prometheus.BuildFQName(namespace, "", "intr_total"), "Total number of interrupts serviced.", nil, nil, ), ctxt: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "context_switches"), + prometheus.BuildFQName(namespace, "", "context_switches_total"), "Total number of context switches.", nil, nil, ), forks: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "forks"), + prometheus.BuildFQName(namespace, "", "forks_total"), "Total number of forks.", nil, nil, ), btime: prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "boot_time"), + prometheus.BuildFQName(namespace, "", "boot_time_seconds"), "Node boot time, in unixtime.", nil, nil, ), @@ -75,16 +77,13 @@ func NewStatCollector() (Collector, error) { "Number of processes blocked waiting for I/O to complete.", nil, nil, ), + logger: logger, }, nil } // Update implements Collector and exposes kernel and system statistics. func (c *statCollector) Update(ch chan<- prometheus.Metric) error { - fs, err := procfs.NewFS(*procPath) - if err != nil { - return fmt.Errorf("failed to open procfs: %v", err) - } - stats, err := fs.NewStat() + stats, err := c.fs.Stat() if err != nil { return err } diff --git a/collector/supervisord.go b/collector/supervisord.go index 2a7aedc4be..b3b0f1372c 100644 --- a/collector/supervisord.go +++ b/collector/supervisord.go @@ -11,27 +11,37 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nosupervisord // +build !nosupervisord package collector import ( - "github.com/kolo/xmlrpc" + "context" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/mattn/go-xmlrpc" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "gopkg.in/alecthomas/kingpin.v2" ) var ( - supervisordURL = kingpin.Flag("collector.supervisord.url", "XML RPC endpoint.").Default("http://localhost:9001/RPC2").String() + supervisordURL = kingpin.Flag("collector.supervisord.url", "XML RPC endpoint.").Default("http://localhost:9001/RPC2").Envar("SUPERVISORD_URL").String() + xrpc *xmlrpc.Client ) type supervisordCollector struct { - client *xmlrpc.Client upDesc *prometheus.Desc stateDesc *prometheus.Desc exitStatusDesc *prometheus.Desc - uptimeDesc *prometheus.Desc + startTimeDesc *prometheus.Desc + logger log.Logger } func init() { @@ -39,18 +49,27 @@ func init() { } // NewSupervisordCollector returns a new Collector exposing supervisord statistics. -func NewSupervisordCollector() (Collector, error) { - client, err := xmlrpc.NewClient(*supervisordURL, nil) - if err != nil { - return nil, err - } - +func NewSupervisordCollector(logger log.Logger) (Collector, error) { var ( subsystem = "supervisord" labelNames = []string{"name", "group"} ) + + if u, err := url.Parse(*supervisordURL); err == nil && u.Scheme == "unix" { + // Fake the URI scheme as http, since net/http.*Transport.roundTrip will complain + // about a non-http(s) transport. + xrpc = xmlrpc.NewClient("http://unix/RPC2") + xrpc.HttpClient.Transport = &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + d := net.Dialer{Timeout: 10 * time.Second} + return d.DialContext(ctx, "unix", u.Path) + }, + } + } else { + xrpc = xmlrpc.NewClient(*supervisordURL) + } + return &supervisordCollector{ - client: client, upDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "up"), "Process Up", @@ -69,26 +88,27 @@ func NewSupervisordCollector() (Collector, error) { labelNames, nil, ), - uptimeDesc: prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "uptime"), - "Process Uptime", + startTimeDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "start_time_seconds"), + "Process start time", labelNames, nil, ), + logger: logger, }, nil } func (c *supervisordCollector) isRunning(state int) bool { // http://supervisord.org/subprocess.html#process-states const ( - STOPPED = 0 + // STOPPED = 0 STARTING = 10 RUNNING = 20 - BACKOFF = 30 + // BACKOFF = 30 STOPPING = 40 - EXITED = 100 - FATAL = 200 - UNKNOWN = 1000 + // EXITED = 100 + // FATAL = 200 + // UNKNOWN = 1000 ) switch state { case STARTING, RUNNING, STOPPING: @@ -98,7 +118,7 @@ func (c *supervisordCollector) isRunning(state int) bool { } func (c *supervisordCollector) Update(ch chan<- prometheus.Metric) error { - var infos []struct { + var info struct { Name string `xmlrpc:"name"` Group string `xmlrpc:"group"` Start int `xmlrpc:"start"` @@ -112,23 +132,47 @@ func (c *supervisordCollector) Update(ch chan<- prometheus.Metric) error { StderrLogfile string `xmlrcp:"stderr_logfile"` PID int `xmlrpc:"pid"` } - if err := c.client.Call("supervisor.getAllProcessInfo", nil, &infos); err != nil { - return err + + res, err := xrpc.Call("supervisor.getAllProcessInfo") + if err != nil { + return fmt.Errorf("unable to call supervisord: %w", err) } - for _, info := range infos { - lables := []string{info.Name, info.Group} - ch <- prometheus.MustNewConstMetric(c.stateDesc, prometheus.GaugeValue, float64(info.State), lables...) - ch <- prometheus.MustNewConstMetric(c.exitStatusDesc, prometheus.GaugeValue, float64(info.ExitStatus), lables...) + for _, p := range res.(xmlrpc.Array) { + for k, v := range p.(xmlrpc.Struct) { + switch k { + case "name": + info.Name = v.(string) + case "group": + info.Group = v.(string) + case "start": + info.Start = v.(int) + case "stop": + info.Stop = v.(int) + case "now": + info.Now = v.(int) + case "state": + info.State = v.(int) + case "statename": + info.StateName = v.(string) + case "exitstatus": + info.ExitStatus = v.(int) + case "pid": + info.PID = v.(int) + } + } + labels := []string{info.Name, info.Group} + + ch <- prometheus.MustNewConstMetric(c.stateDesc, prometheus.GaugeValue, float64(info.State), labels...) + ch <- prometheus.MustNewConstMetric(c.exitStatusDesc, prometheus.GaugeValue, float64(info.ExitStatus), labels...) if c.isRunning(info.State) { - ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 1, lables...) - ch <- prometheus.MustNewConstMetric(c.uptimeDesc, prometheus.CounterValue, float64(info.Now-info.Start), lables...) + ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 1, labels...) + ch <- prometheus.MustNewConstMetric(c.startTimeDesc, prometheus.CounterValue, float64(info.Start), labels...) } else { - ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 0, lables...) - ch <- prometheus.MustNewConstMetric(c.uptimeDesc, prometheus.CounterValue, 0, lables...) + ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 0, labels...) } - log.Debugf("%s:%s is %s on pid %d", info.Group, info.Name, info.StateName, info.PID) + level.Debug(c.logger).Log("msg", "process info", "group", info.Group, "name", info.Name, "state", info.StateName, "pid", info.PID) } return nil diff --git a/collector/sysctl_bsd.go b/collector/sysctl_bsd.go index 1aed553063..2ab248ed2e 100644 --- a/collector/sysctl_bsd.go +++ b/collector/sysctl_bsd.go @@ -11,16 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build freebsd dragonfly -// +build !nomeminfo +//go:build (freebsd || dragonfly || openbsd || netbsd || darwin) && cgo +// +build freebsd dragonfly openbsd netbsd darwin +// +build cgo package collector import ( "fmt" + "unsafe" + "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" - "unsafe" ) // #include @@ -34,10 +36,11 @@ const ( // Default to uint32. bsdSysctlTypeUint32 bsdSysctlType = iota bsdSysctlTypeUint64 - bsdSysctlTypeStructTimeval + bsdSysctlTypeCLong ) -// Contains all the info needed to map a single bsd-sysctl to a prometheus value. +// Contains all the info needed to map a single bsd-sysctl to a prometheus +// value. type bsdSysctl struct { // Prometheus name name string @@ -71,43 +74,8 @@ func (b bsdSysctl) Value() (float64, error) { case bsdSysctlTypeUint64: tmp64, err = unix.SysctlUint64(b.mib) tmpf64 = float64(tmp64) - case bsdSysctlTypeStructTimeval: - raw, err := unix.SysctlRaw(b.mib) - if err != nil { - return 0, err - } - - /* - * From 10.3-RELEASE sources: - * - * /usr/include/sys/_timeval.h:47 - * time_t tv_sec - * suseconds_t tv_usec - * - * /usr/include/sys/_types.h:60 - * long __suseconds_t - * - * ... architecture dependent, via #ifdef: - * typedef __int64_t __time_t; - * typedef __int32_t __time_t; - */ - if len(raw) != (C.sizeof_time_t + C.sizeof_suseconds_t) { - // Shouldn't get here, unless the ABI changes... - return 0, fmt.Errorf( - "length of bytes received from sysctl (%d) does not match expected bytes (%d)", - len(raw), - C.sizeof_time_t+C.sizeof_suseconds_t, - ) - } - - secondsUp := unsafe.Pointer(&raw[0]) - susecondsUp := uintptr(secondsUp) + C.sizeof_time_t - unix := float64(*(*C.time_t)(secondsUp)) - usec := float64(*(*C.suseconds_t)(unsafe.Pointer(susecondsUp))) - - // This conversion maintains the usec precision. Using - // the time package did not. - tmpf64 = unix + (usec / float64(1000*1000)) + case bsdSysctlTypeCLong: + tmpf64, err = b.getCLong() } if err != nil { @@ -120,3 +88,30 @@ func (b bsdSysctl) Value() (float64, error) { return tmpf64, nil } + +func (b bsdSysctl) getCLong() (float64, error) { + raw, err := unix.SysctlRaw(b.mib) + if err != nil { + return 0, err + } + + if len(raw) == C.sizeof_long { + return float64(*(*C.long)(unsafe.Pointer(&raw[0]))), nil + } + + if len(raw) == C.sizeof_int { + // This is valid for at least vfs.bufspace, and the default + // long handler - which can clamp longs to 32-bits: + // https://github.com/freebsd/freebsd/blob/releng/10.3/sys/kern/vfs_bio.c#L338 + // https://github.com/freebsd/freebsd/blob/releng/10.3/sys/kern/kern_sysctl.c#L1062 + return float64(*(*C.int)(unsafe.Pointer(&raw[0]))), nil + } + + return 0, fmt.Errorf( + "length of bytes received from sysctl (%d) does not match expected bytes (long: %d), (int: %d)", + len(raw), + C.sizeof_long, + C.sizeof_int, + ) + +} diff --git a/collector/sysctl_openbsd_amd64.go b/collector/sysctl_openbsd_amd64.go new file mode 100644 index 0000000000..3d8051094c --- /dev/null +++ b/collector/sysctl_openbsd_amd64.go @@ -0,0 +1,86 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "golang.org/x/sys/unix" + "syscall" + "unsafe" +) + +func int8ToString(a []int8) string { + buf := make([]byte, len(a)) + for i, v := range a { + if byte(v) == 0 { + buf = buf[:i] + break + } + buf[i] = byte(v) + } + return string(buf) +} + +// unix._C_int +type _C_int int32 + +var _zero uintptr + +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case unix.EAGAIN: + return syscall.EAGAIN + case unix.EINVAL: + return syscall.EINVAL + case unix.ENOENT: + return syscall.ENOENT + } + return e +} + +func _sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + for { + _, _, e1 := unix.Syscall6(unix.SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + if err != unix.EINTR { + return + } + } + return +} + +func sysctl(mib []_C_int) ([]byte, error) { + n := uintptr(0) + if err := _sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + buf := make([]byte, n) + if err := _sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return nil, err + } + return buf[:n], nil +} diff --git a/collector/systemd_linux.go b/collector/systemd_linux.go index 7817421f94..495aa560d8 100644 --- a/collector/systemd_linux.go +++ b/collector/systemd_linux.go @@ -11,31 +11,71 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nosystemd // +build !nosystemd package collector import ( + "errors" "fmt" + "math" "regexp" + "strconv" + "strings" + "sync" + "time" "github.com/coreos/go-systemd/dbus" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" - "gopkg.in/alecthomas/kingpin.v2" + kingpin "gopkg.in/alecthomas/kingpin.v2" +) + +const ( + // minSystemdVersionSystemState is the minimum SystemD version for availability of + // the 'SystemState' manager property and the timer property 'LastTriggerUSec' + // https://github.com/prometheus/node_exporter/issues/291 + minSystemdVersionSystemState = 212 ) var ( - unitWhitelist = kingpin.Flag("collector.systemd.unit-whitelist", "Regexp of systemd units to whitelist. Units must both match whitelist and not match blacklist to be included.").Default(".+").String() - unitBlacklist = kingpin.Flag("collector.systemd.unit-blacklist", "Regexp of systemd units to blacklist. Units must both match whitelist and not match blacklist to be included.").Default(".+\\.scope").String() - systemdPrivate = kingpin.Flag("collector.systemd.private", "Establish a private, direct connection to systemd without dbus.").Bool() + unitIncludeSet bool + unitInclude = kingpin.Flag("collector.systemd.unit-include", "Regexp of systemd units to include. Units must both match include and not match exclude to be included.").Default(".+").PreAction(func(c *kingpin.ParseContext) error { + unitIncludeSet = true + return nil + }).String() + oldUnitInclude = kingpin.Flag("collector.systemd.unit-whitelist", "DEPRECATED: Use --collector.systemd.unit-include").Hidden().String() + unitExcludeSet bool + unitExclude = kingpin.Flag("collector.systemd.unit-exclude", "Regexp of systemd units to exclude. Units must both match include and not match exclude to be included.").Default(".+\\.(automount|device|mount|scope|slice)").PreAction(func(c *kingpin.ParseContext) error { + unitExcludeSet = true + return nil + }).String() + oldUnitExclude = kingpin.Flag("collector.systemd.unit-blacklist", "DEPRECATED: Use collector.systemd.unit-exclude").Hidden().String() + systemdPrivate = kingpin.Flag("collector.systemd.private", "Establish a private, direct connection to systemd without dbus (Strongly discouraged since it requires root. For testing purposes only).").Hidden().Bool() + enableTaskMetrics = kingpin.Flag("collector.systemd.enable-task-metrics", "Enables service unit tasks metrics unit_tasks_current and unit_tasks_max").Bool() + enableRestartsMetrics = kingpin.Flag("collector.systemd.enable-restarts-metrics", "Enables service unit metric service_restart_total").Bool() + enableStartTimeMetrics = kingpin.Flag("collector.systemd.enable-start-time-metrics", "Enables service unit metric unit_start_time_seconds").Bool() ) type systemdCollector struct { - unitDesc *prometheus.Desc - systemRunningDesc *prometheus.Desc - unitWhitelistPattern *regexp.Regexp - unitBlacklistPattern *regexp.Regexp + unitDesc *prometheus.Desc + unitStartTimeDesc *prometheus.Desc + unitTasksCurrentDesc *prometheus.Desc + unitTasksMaxDesc *prometheus.Desc + systemRunningDesc *prometheus.Desc + summaryDesc *prometheus.Desc + nRestartsDesc *prometheus.Desc + timerLastTriggerDesc *prometheus.Desc + socketAcceptedConnectionsDesc *prometheus.Desc + socketCurrentConnectionsDesc *prometheus.Desc + socketRefusedConnectionsDesc *prometheus.Desc + systemdVersionDesc *prometheus.Desc + systemdVersion int + unitIncludePattern *regexp.Regexp + unitExcludePattern *regexp.Regexp + logger log.Logger } var unitStatesName = []string{"active", "activating", "deactivating", "inactive", "failed"} @@ -45,47 +85,203 @@ func init() { } // NewSystemdCollector returns a new Collector exposing systemd statistics. -func NewSystemdCollector() (Collector, error) { +func NewSystemdCollector(logger log.Logger) (Collector, error) { const subsystem = "systemd" unitDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "unit_state"), - "Systemd unit", []string{"name", "state"}, nil, + "Systemd unit", []string{"name", "state", "type"}, nil, + ) + unitStartTimeDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "unit_start_time_seconds"), + "Start time of the unit since unix epoch in seconds.", []string{"name"}, nil, + ) + unitTasksCurrentDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "unit_tasks_current"), + "Current number of tasks per Systemd unit", []string{"name"}, nil, + ) + unitTasksMaxDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "unit_tasks_max"), + "Maximum number of tasks per Systemd unit", []string{"name"}, nil, ) systemRunningDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "system_running"), "Whether the system is operational (see 'systemctl is-system-running')", nil, nil, ) - unitWhitelistPattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitWhitelist)) - unitBlacklistPattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitBlacklist)) + summaryDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "units"), + "Summary of systemd unit states", []string{"state"}, nil) + nRestartsDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "service_restart_total"), + "Service unit count of Restart triggers", []string{"name"}, nil) + timerLastTriggerDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "timer_last_trigger_seconds"), + "Seconds since epoch of last trigger.", []string{"name"}, nil) + socketAcceptedConnectionsDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "socket_accepted_connections_total"), + "Total number of accepted socket connections", []string{"name"}, nil) + socketCurrentConnectionsDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "socket_current_connections"), + "Current number of socket connections", []string{"name"}, nil) + socketRefusedConnectionsDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "socket_refused_connections_total"), + "Total number of refused socket connections", []string{"name"}, nil) + systemdVersionDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "version"), + "Detected systemd version", []string{}, nil) + + if *oldUnitExclude != "" { + if !unitExcludeSet { + level.Warn(logger).Log("msg", "--collector.systemd.unit-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-exclude") + *unitExclude = *oldUnitExclude + } else { + return nil, errors.New("--collector.systemd.unit-blacklist and --collector.systemd.unit-exclude are mutually exclusive") + } + } + if *oldUnitInclude != "" { + if !unitIncludeSet { + level.Warn(logger).Log("msg", "--collector.systemd.unit-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-include") + *unitInclude = *oldUnitInclude + } else { + return nil, errors.New("--collector.systemd.unit-whitelist and --collector.systemd.unit-include are mutually exclusive") + } + } + level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-include", "flag", *unitInclude) + unitIncludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitInclude)) + level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-exclude", "flag", *unitExclude) + unitExcludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitExclude)) + + systemdVersion := getSystemdVersion(logger) + if systemdVersion < minSystemdVersionSystemState { + level.Warn(logger).Log("msg", "Detected systemd version is lower than minimum", "current", systemdVersion, "minimum", minSystemdVersionSystemState) + level.Warn(logger).Log("msg", "Some systemd state and timer metrics will not be available") + } return &systemdCollector{ - unitDesc: unitDesc, - systemRunningDesc: systemRunningDesc, - unitWhitelistPattern: unitWhitelistPattern, - unitBlacklistPattern: unitBlacklistPattern, + unitDesc: unitDesc, + unitStartTimeDesc: unitStartTimeDesc, + unitTasksCurrentDesc: unitTasksCurrentDesc, + unitTasksMaxDesc: unitTasksMaxDesc, + systemRunningDesc: systemRunningDesc, + summaryDesc: summaryDesc, + nRestartsDesc: nRestartsDesc, + timerLastTriggerDesc: timerLastTriggerDesc, + socketAcceptedConnectionsDesc: socketAcceptedConnectionsDesc, + socketCurrentConnectionsDesc: socketCurrentConnectionsDesc, + socketRefusedConnectionsDesc: socketRefusedConnectionsDesc, + systemdVersionDesc: systemdVersionDesc, + systemdVersion: systemdVersion, + unitIncludePattern: unitIncludePattern, + unitExcludePattern: unitExcludePattern, + logger: logger, }, nil } +// Update gathers metrics from systemd. Dbus collection is done in parallel +// to reduce wait time for responses. func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error { - units, err := c.listUnits() + begin := time.Now() + conn, err := newSystemdDbusConn() if err != nil { - return fmt.Errorf("couldn't get units states: %s", err) + return fmt.Errorf("couldn't get dbus connection: %w", err) } - c.collectUnitStatusMetrics(ch, units) + defer conn.Close() - systemState, err := c.getSystemState() + allUnits, err := c.getAllUnits(conn) if err != nil { - return fmt.Errorf("couldn't get system state: %s", err) + return fmt.Errorf("couldn't get units: %w", err) } - c.collectSystemState(ch, systemState) + level.Debug(c.logger).Log("msg", "getAllUnits took", "duration_seconds", time.Since(begin).Seconds()) - return nil + begin = time.Now() + summary := summarizeUnits(allUnits) + c.collectSummaryMetrics(ch, summary) + level.Debug(c.logger).Log("msg", "collectSummaryMetrics took", "duration_seconds", time.Since(begin).Seconds()) + + begin = time.Now() + units := filterUnits(allUnits, c.unitIncludePattern, c.unitExcludePattern, c.logger) + level.Debug(c.logger).Log("msg", "filterUnits took", "duration_seconds", time.Since(begin).Seconds()) + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + begin = time.Now() + c.collectUnitStatusMetrics(conn, ch, units) + level.Debug(c.logger).Log("msg", "collectUnitStatusMetrics took", "duration_seconds", time.Since(begin).Seconds()) + }() + + if *enableStartTimeMetrics { + wg.Add(1) + go func() { + defer wg.Done() + begin = time.Now() + c.collectUnitStartTimeMetrics(conn, ch, units) + level.Debug(c.logger).Log("msg", "collectUnitStartTimeMetrics took", "duration_seconds", time.Since(begin).Seconds()) + }() + } + + if *enableTaskMetrics { + wg.Add(1) + go func() { + defer wg.Done() + begin = time.Now() + c.collectUnitTasksMetrics(conn, ch, units) + level.Debug(c.logger).Log("msg", "collectUnitTasksMetrics took", "duration_seconds", time.Since(begin).Seconds()) + }() + } + + if c.systemdVersion >= minSystemdVersionSystemState { + wg.Add(1) + go func() { + defer wg.Done() + begin = time.Now() + c.collectTimers(conn, ch, units) + level.Debug(c.logger).Log("msg", "collectTimers took", "duration_seconds", time.Since(begin).Seconds()) + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + begin = time.Now() + c.collectSockets(conn, ch, units) + level.Debug(c.logger).Log("msg", "collectSockets took", "duration_seconds", time.Since(begin).Seconds()) + }() + + if c.systemdVersion >= minSystemdVersionSystemState { + begin = time.Now() + err = c.collectSystemState(conn, ch) + level.Debug(c.logger).Log("msg", "collectSystemState took", "duration_seconds", time.Since(begin).Seconds()) + } + + ch <- prometheus.MustNewConstMetric( + c.systemdVersionDesc, prometheus.GaugeValue, float64(c.systemdVersion)) + + return err } -func (c *systemdCollector) collectUnitStatusMetrics(ch chan<- prometheus.Metric, units []dbus.UnitStatus) { +func (c *systemdCollector) collectUnitStatusMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { for _, unit := range units { + serviceType := "" + if strings.HasSuffix(unit.Name, ".service") { + serviceTypeProperty, err := conn.GetUnitTypeProperty(unit.Name, "Service", "Type") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err) + } else { + serviceType = serviceTypeProperty.Value.Value().(string) + } + } else if strings.HasSuffix(unit.Name, ".mount") { + serviceTypeProperty, err := conn.GetUnitTypeProperty(unit.Name, "Mount", "Type") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err) + } else { + serviceType = serviceTypeProperty.Value.Value().(string) + } + } for _, stateName := range unitStatesName { isActive := 0.0 if stateName == unit.ActiveState { @@ -93,61 +289,223 @@ func (c *systemdCollector) collectUnitStatusMetrics(ch chan<- prometheus.Metric, } ch <- prometheus.MustNewConstMetric( c.unitDesc, prometheus.GaugeValue, isActive, - unit.Name, stateName) + unit.Name, stateName, serviceType) + } + if *enableRestartsMetrics && strings.HasSuffix(unit.Name, ".service") { + // NRestarts wasn't added until systemd 235. + restartsCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "NRestarts") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit NRestarts", "unit", unit.Name, "err", err) + } else { + ch <- prometheus.MustNewConstMetric( + c.nRestartsDesc, prometheus.CounterValue, + float64(restartsCount.Value.Value().(uint32)), unit.Name) + } + } + } +} + +func (c *systemdCollector) collectSockets(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { + for _, unit := range units { + if !strings.HasSuffix(unit.Name, ".socket") { + continue + } + + acceptedConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NAccepted") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit NAccepted", "unit", unit.Name, "err", err) + continue + } + ch <- prometheus.MustNewConstMetric( + c.socketAcceptedConnectionsDesc, prometheus.CounterValue, + float64(acceptedConnectionCount.Value.Value().(uint32)), unit.Name) + + currentConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NConnections") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit NConnections", "unit", unit.Name, "err", err) + continue + } + ch <- prometheus.MustNewConstMetric( + c.socketCurrentConnectionsDesc, prometheus.GaugeValue, + float64(currentConnectionCount.Value.Value().(uint32)), unit.Name) + + // NRefused wasn't added until systemd 239. + refusedConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NRefused") + if err != nil { + //log.Debugf("couldn't get unit '%s' NRefused: %s", unit.Name, err) + } else { + ch <- prometheus.MustNewConstMetric( + c.socketRefusedConnectionsDesc, prometheus.GaugeValue, + float64(refusedConnectionCount.Value.Value().(uint32)), unit.Name) } } } -func (c *systemdCollector) collectSystemState(ch chan<- prometheus.Metric, systemState string) { +func (c *systemdCollector) collectUnitStartTimeMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { + var startTimeUsec uint64 + + for _, unit := range units { + if unit.ActiveState != "active" { + startTimeUsec = 0 + } else { + timestampValue, err := conn.GetUnitProperty(unit.Name, "ActiveEnterTimestamp") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit StartTimeUsec", "unit", unit.Name, "err", err) + continue + } + startTimeUsec = timestampValue.Value.Value().(uint64) + } + + ch <- prometheus.MustNewConstMetric( + c.unitStartTimeDesc, prometheus.GaugeValue, + float64(startTimeUsec)/1e6, unit.Name) + } +} + +func (c *systemdCollector) collectUnitTasksMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { + var val uint64 + for _, unit := range units { + if strings.HasSuffix(unit.Name, ".service") { + tasksCurrentCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "TasksCurrent") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit TasksCurrent", "unit", unit.Name, "err", err) + } else { + val = tasksCurrentCount.Value.Value().(uint64) + // Don't set if tasksCurrent if dbus reports MaxUint64. + if val != math.MaxUint64 { + ch <- prometheus.MustNewConstMetric( + c.unitTasksCurrentDesc, prometheus.GaugeValue, + float64(val), unit.Name) + } + } + tasksMaxCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "TasksMax") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit TasksMax", "unit", unit.Name, "err", err) + } else { + val = tasksMaxCount.Value.Value().(uint64) + // Don't set if tasksMax if dbus reports MaxUint64. + if val != math.MaxUint64 { + ch <- prometheus.MustNewConstMetric( + c.unitTasksMaxDesc, prometheus.GaugeValue, + float64(val), unit.Name) + } + } + } + } +} + +func (c *systemdCollector) collectTimers(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { + for _, unit := range units { + if !strings.HasSuffix(unit.Name, ".timer") { + continue + } + + lastTriggerValue, err := conn.GetUnitTypeProperty(unit.Name, "Timer", "LastTriggerUSec") + if err != nil { + level.Debug(c.logger).Log("msg", "couldn't get unit LastTriggerUSec", "unit", unit.Name, "err", err) + continue + } + + ch <- prometheus.MustNewConstMetric( + c.timerLastTriggerDesc, prometheus.GaugeValue, + float64(lastTriggerValue.Value.Value().(uint64))/1e6, unit.Name) + } +} + +func (c *systemdCollector) collectSummaryMetrics(ch chan<- prometheus.Metric, summary map[string]float64) { + for stateName, count := range summary { + ch <- prometheus.MustNewConstMetric( + c.summaryDesc, prometheus.GaugeValue, count, stateName) + } +} + +func (c *systemdCollector) collectSystemState(conn *dbus.Conn, ch chan<- prometheus.Metric) error { + systemState, err := conn.GetManagerProperty("SystemState") + if err != nil { + return fmt.Errorf("couldn't get system state: %w", err) + } isSystemRunning := 0.0 if systemState == `"running"` { isSystemRunning = 1.0 } ch <- prometheus.MustNewConstMetric(c.systemRunningDesc, prometheus.GaugeValue, isSystemRunning) + return nil } -func (c *systemdCollector) newDbus() (*dbus.Conn, error) { +func newSystemdDbusConn() (*dbus.Conn, error) { if *systemdPrivate { return dbus.NewSystemdConnection() } return dbus.New() } -func (c *systemdCollector) listUnits() ([]dbus.UnitStatus, error) { - conn, err := c.newDbus() +type unit struct { + dbus.UnitStatus +} + +func (c *systemdCollector) getAllUnits(conn *dbus.Conn) ([]unit, error) { + allUnits, err := conn.ListUnits() if err != nil { - return nil, fmt.Errorf("couldn't get dbus connection: %s", err) + return nil, err } - allUnits, err := conn.ListUnits() - conn.Close() - if err != nil { - return []dbus.UnitStatus{}, err + result := make([]unit, 0, len(allUnits)) + for _, status := range allUnits { + unit := unit{ + UnitStatus: status, + } + result = append(result, unit) } - units := filterUnits(allUnits, c.unitWhitelistPattern, c.unitBlacklistPattern) - return units, nil + return result, nil } -func filterUnits(units []dbus.UnitStatus, whitelistPattern, blacklistPattern *regexp.Regexp) []dbus.UnitStatus { - filtered := make([]dbus.UnitStatus, 0, len(units)) +func summarizeUnits(units []unit) map[string]float64 { + summarized := make(map[string]float64) + + for _, unitStateName := range unitStatesName { + summarized[unitStateName] = 0.0 + } + for _, unit := range units { - if whitelistPattern.MatchString(unit.Name) && !blacklistPattern.MatchString(unit.Name) { + summarized[unit.ActiveState] += 1.0 + } + + return summarized +} + +func filterUnits(units []unit, includePattern, excludePattern *regexp.Regexp, logger log.Logger) []unit { + filtered := make([]unit, 0, len(units)) + for _, unit := range units { + if includePattern.MatchString(unit.Name) && !excludePattern.MatchString(unit.Name) && unit.LoadState == "loaded" { + level.Debug(logger).Log("msg", "Adding unit", "unit", unit.Name) filtered = append(filtered, unit) } else { - log.Debugf("Ignoring unit: %s", unit.Name) + level.Debug(logger).Log("msg", "Ignoring unit", "unit", unit.Name) } } return filtered } -func (c *systemdCollector) getSystemState() (state string, err error) { - conn, err := c.newDbus() +func getSystemdVersion(logger log.Logger) int { + conn, err := newSystemdDbusConn() + if err != nil { + level.Warn(logger).Log("msg", "Unable to get systemd dbus connection, defaulting systemd version to 0", "err", err) + return 0 + } + defer conn.Close() + version, err := conn.GetManagerProperty("Version") + if err != nil { + level.Warn(logger).Log("msg", "Unable to get systemd version property, defaulting to 0") + return 0 + } + re := regexp.MustCompile(`[0-9][0-9][0-9]`) + version = re.FindString(version) + v, err := strconv.Atoi(version) if err != nil { - return "", fmt.Errorf("couldn't get dbus connection: %s", err) + level.Warn(logger).Log("msg", "Got invalid systemd version", "version", version) + return 0 } - state, err = conn.GetManagerProperty("SystemState") - conn.Close() - return state, err + return v } diff --git a/collector/systemd_linux_test.go b/collector/systemd_linux_test.go index 2d9bdf662f..1b55af7c3a 100644 --- a/collector/systemd_linux_test.go +++ b/collector/systemd_linux_test.go @@ -14,110 +14,122 @@ package collector import ( + "github.com/go-kit/log" "regexp" "testing" "github.com/coreos/go-systemd/dbus" - "github.com/prometheus/client_golang/prometheus" ) // Creates mock UnitLists -func getUnitListFixtures() [][]dbus.UnitStatus { - fixture1 := []dbus.UnitStatus{ +func getUnitListFixtures() [][]unit { + fixture1 := []unit{ { - Name: "foo", - Description: "foo desc", - LoadState: "loaded", - ActiveState: "active", - SubState: "running", - Followed: "", - Path: "/org/freedesktop/systemd1/unit/foo", - JobId: 0, - JobType: "", - JobPath: "/", + UnitStatus: dbus.UnitStatus{ + Name: "foo", + Description: "foo desc", + LoadState: "loaded", + ActiveState: "active", + SubState: "running", + Followed: "", + Path: "/org/freedesktop/systemd1/unit/foo", + JobId: 0, + JobType: "", + JobPath: "/", + }, }, { - Name: "bar", - Description: "bar desc", - LoadState: "not-found", - ActiveState: "inactive", - SubState: "dead", - Followed: "", - Path: "/org/freedesktop/systemd1/unit/bar", - JobId: 0, - JobType: "", - JobPath: "/", + UnitStatus: dbus.UnitStatus{ + Name: "bar", + Description: "bar desc", + LoadState: "not-found", + ActiveState: "inactive", + SubState: "dead", + Followed: "", + Path: "/org/freedesktop/systemd1/unit/bar", + JobId: 0, + JobType: "", + JobPath: "/", + }, }, { - Name: "foobar", - Description: "bar desc", - LoadState: "not-found", - ActiveState: "inactive", - SubState: "dead", - Followed: "", - Path: "/org/freedesktop/systemd1/unit/bar", - JobId: 0, - JobType: "", - JobPath: "/", + UnitStatus: dbus.UnitStatus{ + Name: "foobar", + Description: "bar desc", + LoadState: "not-found", + ActiveState: "inactive", + SubState: "dead", + Followed: "", + Path: "/org/freedesktop/systemd1/unit/bar", + JobId: 0, + JobType: "", + JobPath: "/", + }, }, { - Name: "baz", - Description: "bar desc", - LoadState: "not-found", - ActiveState: "inactive", - SubState: "dead", - Followed: "", - Path: "/org/freedesktop/systemd1/unit/bar", - JobId: 0, - JobType: "", - JobPath: "/", + UnitStatus: dbus.UnitStatus{ + Name: "baz", + Description: "bar desc", + LoadState: "not-found", + ActiveState: "inactive", + SubState: "dead", + Followed: "", + Path: "/org/freedesktop/systemd1/unit/bar", + JobId: 0, + JobType: "", + JobPath: "/", + }, }, } - fixture2 := []dbus.UnitStatus{} + fixture2 := []unit{} - return [][]dbus.UnitStatus{fixture1, fixture2} -} - -func TestSystemdCollectorDoesntCrash(t *testing.T) { - c, err := NewSystemdCollector() - if err != nil { - t.Fatal(err) - } - sink := make(chan prometheus.Metric) - go func() { - for { - <-sink - } - }() - - fixtures := getUnitListFixtures() - collector := (c).(*systemdCollector) - for _, units := range fixtures { - collector.collectUnitStatusMetrics(sink, units) - } + return [][]unit{fixture1, fixture2} } func TestSystemdIgnoreFilter(t *testing.T) { fixtures := getUnitListFixtures() - whitelistPattern := regexp.MustCompile("foo") - blacklistPattern := regexp.MustCompile("bar") - filtered := filterUnits(fixtures[0], whitelistPattern, blacklistPattern) + includePattern := regexp.MustCompile("^foo$") + excludePattern := regexp.MustCompile("^bar$") + filtered := filterUnits(fixtures[0], includePattern, excludePattern, log.NewNopLogger()) for _, unit := range filtered { - if blacklistPattern.MatchString(unit.Name) || !whitelistPattern.MatchString(unit.Name) { + if excludePattern.MatchString(unit.Name) || !includePattern.MatchString(unit.Name) { t.Error(unit.Name, "should not be in the filtered list") } } } func TestSystemdIgnoreFilterDefaultKeepsAll(t *testing.T) { - c, err := NewSystemdCollector() + logger := log.NewNopLogger() + c, err := NewSystemdCollector(logger) if err != nil { t.Fatal(err) } fixtures := getUnitListFixtures() collector := c.(*systemdCollector) - filtered := filterUnits(fixtures[0], collector.unitWhitelistPattern, collector.unitBlacklistPattern) - if len(filtered) != len(fixtures[0]) { + filtered := filterUnits(fixtures[0], collector.unitIncludePattern, collector.unitExcludePattern, logger) + // Adjust fixtures by 3 "not-found" units. + if len(filtered) != len(fixtures[0])-3 { t.Error("Default filters removed units") } } + +func TestSystemdSummary(t *testing.T) { + fixtures := getUnitListFixtures() + summary := summarizeUnits(fixtures[0]) + + for _, state := range unitStatesName { + if state == "inactive" { + testSummaryHelper(t, state, summary[state], 3.0) + } else if state == "active" { + testSummaryHelper(t, state, summary[state], 1.0) + } else { + testSummaryHelper(t, state, summary[state], 0.0) + } + } +} + +func testSummaryHelper(t *testing.T, state string, actual float64, expected float64) { + if actual != expected { + t.Errorf("Summary mode didn't count %s jobs correctly. Actual: %f, expected: %f", state, actual, expected) + } +} diff --git a/collector/tapestats_linux.go b/collector/tapestats_linux.go new file mode 100644 index 0000000000..441ac61073 --- /dev/null +++ b/collector/tapestats_linux.go @@ -0,0 +1,152 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !notapestats +// +build !notapestats + +package collector + +import ( + "fmt" + "os" + "regexp" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + ignoredTapeDevices = kingpin.Flag("collector.tapestats.ignored-devices", "Regexp of devices to ignore for tapestats.").Default("^$").String() +) + +type tapestatsCollector struct { + ignoredDevicesPattern *regexp.Regexp + ioNow *prometheus.Desc + ioTimeSeconds *prometheus.Desc + othersCompletedTotal *prometheus.Desc + readByteTotal *prometheus.Desc + readsCompletedTotal *prometheus.Desc + readTimeSeconds *prometheus.Desc + writtenByteTotal *prometheus.Desc + writesCompletedTotal *prometheus.Desc + writeTimeSeconds *prometheus.Desc + residualTotal *prometheus.Desc + fs sysfs.FS + logger log.Logger +} + +func init() { + registerCollector("tapestats", defaultEnabled, NewTapestatsCollector) +} + +// NewTapestatsCollector returns a new Collector exposing tape device stats. +// Docs from https://www.kernel.org/doc/html/latest/scsi/st.html#sysfs-and-statistics-for-tape-devices +func NewTapestatsCollector(logger log.Logger) (Collector, error) { + var tapeLabelNames = []string{"device"} + + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + tapeSubsystem := "tape" + + return &tapestatsCollector{ + ignoredDevicesPattern: regexp.MustCompile(*ignoredTapeDevices), + + ioNow: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "io_now"), + "The number of I/Os currently outstanding to this device.", + tapeLabelNames, nil, + ), + ioTimeSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "io_time_seconds_total"), + "The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used.", + tapeLabelNames, nil, + ), + othersCompletedTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "io_others_total"), + "The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total", + tapeLabelNames, nil, + ), + readByteTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "read_bytes_total"), + "The number of bytes read from the tape drive.", + tapeLabelNames, nil, + ), + readsCompletedTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "reads_completed_total"), + "The number of read requests issued to the tape drive.", + tapeLabelNames, nil, + ), + readTimeSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "read_time_seconds_total"), + "The amount of time spent waiting for read requests to complete.", + tapeLabelNames, nil, + ), + writtenByteTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "written_bytes_total"), + "The number of bytes written to the tape drive.", + tapeLabelNames, nil, + ), + writesCompletedTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "writes_completed_total"), + "The number of write requests issued to the tape drive.", + tapeLabelNames, nil, + ), + writeTimeSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "write_time_seconds_total"), + "The amount of time spent waiting for write requests to complete.", + tapeLabelNames, nil, + ), + residualTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, tapeSubsystem, "residual_total"), + "The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape.", + tapeLabelNames, nil, + ), + logger: logger, + fs: fs, + }, nil +} + +func (c *tapestatsCollector) Update(ch chan<- prometheus.Metric) error { + tapes, err := c.fs.SCSITapeClass() + if err != nil { + if os.IsNotExist(err) { + level.Debug(c.logger).Log("msg", "scsi_tape stats not found, skipping") + return ErrNoData + } + return fmt.Errorf("error obtaining SCSITape class info: %s", err) + } + + for _, tape := range tapes { + if c.ignoredDevicesPattern.MatchString(tape.Name) { + level.Debug(c.logger).Log("msg", "Ignoring device", "device", tape.Name) + continue + } + ch <- prometheus.MustNewConstMetric(c.ioNow, prometheus.GaugeValue, float64(tape.Counters.InFlight), tape.Name) + ch <- prometheus.MustNewConstMetric(c.ioTimeSeconds, prometheus.CounterValue, float64(tape.Counters.IoNs)*0.000000001, tape.Name) + ch <- prometheus.MustNewConstMetric(c.othersCompletedTotal, prometheus.CounterValue, float64(tape.Counters.OtherCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.readByteTotal, prometheus.CounterValue, float64(tape.Counters.ReadByteCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.readsCompletedTotal, prometheus.CounterValue, float64(tape.Counters.ReadCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.readTimeSeconds, prometheus.CounterValue, float64(tape.Counters.ReadNs)*0.000000001, tape.Name) + ch <- prometheus.MustNewConstMetric(c.residualTotal, prometheus.CounterValue, float64(tape.Counters.ResidCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.writtenByteTotal, prometheus.CounterValue, float64(tape.Counters.WriteByteCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.writesCompletedTotal, prometheus.CounterValue, float64(tape.Counters.WriteCnt), tape.Name) + ch <- prometheus.MustNewConstMetric(c.writeTimeSeconds, prometheus.CounterValue, float64(tape.Counters.WriteNs)*0.000000001, tape.Name) + } + return nil +} diff --git a/collector/tcpstat_linux.go b/collector/tcpstat_linux.go index 0a5f59f174..47c3f3e076 100644 --- a/collector/tcpstat_linux.go +++ b/collector/tcpstat_linux.go @@ -11,18 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !notcpstat // +build !notcpstat package collector import ( - "bufio" "fmt" "io" + "io/ioutil" "os" "strconv" "strings" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -51,10 +53,15 @@ const ( tcpListen // TCP_CLOSING tcpClosing + // TCP_RX_BUFFER + tcpRxQueuedBytes + // TCP_TX_BUFFER + tcpTxQueuedBytes ) type tcpStatCollector struct { - desc typedDesc + desc typedDesc + logger log.Logger } func init() { @@ -62,20 +69,21 @@ func init() { } // NewTCPStatCollector returns a new Collector exposing network stats. -func NewTCPStatCollector() (Collector, error) { +func NewTCPStatCollector(logger log.Logger) (Collector, error) { return &tcpStatCollector{ desc: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "tcp", "connection_states"), "Number of connection states.", []string{"state"}, nil, ), prometheus.GaugeValue}, + logger: logger, }, nil } func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error { tcpStats, err := getTCPStats(procFilePath("net/tcp")) if err != nil { - return fmt.Errorf("couldn't get tcpstats: %s", err) + return fmt.Errorf("couldn't get tcpstats: %w", err) } // if enabled ipv6 system @@ -83,7 +91,7 @@ func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error { if _, hasIPv6 := os.Stat(tcp6File); hasIPv6 == nil { tcp6Stats, err := getTCPStats(tcp6File) if err != nil { - return fmt.Errorf("couldn't get tcp6stats: %s", err) + return fmt.Errorf("couldn't get tcp6stats: %w", err) } for st, value := range tcp6Stats { @@ -108,28 +116,48 @@ func getTCPStats(statsFile string) (map[tcpConnectionState]float64, error) { } func parseTCPStats(r io.Reader) (map[tcpConnectionState]float64, error) { - var ( - tcpStats = map[tcpConnectionState]float64{} - scanner = bufio.NewScanner(r) - ) + tcpStats := map[tcpConnectionState]float64{} + contents, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) + for _, line := range strings.Split(string(contents), "\n")[1:] { + parts := strings.Fields(line) if len(parts) == 0 { continue } - if strings.HasPrefix(parts[0], "sl") { - continue + if len(parts) < 5 { + return nil, fmt.Errorf("invalid TCP stats line: %q", line) + } + + qu := strings.Split(parts[4], ":") + if len(qu) < 2 { + return nil, fmt.Errorf("cannot parse tx_queues and rx_queues: %q", line) + } + + tx, err := strconv.ParseUint(qu[0], 16, 64) + if err != nil { + return nil, err + } + tcpStats[tcpConnectionState(tcpTxQueuedBytes)] += float64(tx) + + rx, err := strconv.ParseUint(qu[1], 16, 64) + if err != nil { + return nil, err } + tcpStats[tcpConnectionState(tcpRxQueuedBytes)] += float64(rx) + st, err := strconv.ParseInt(parts[3], 16, 8) if err != nil { return nil, err } tcpStats[tcpConnectionState(st)]++ + } - return tcpStats, scanner.Err() + return tcpStats, nil } func (st tcpConnectionState) String() string { @@ -156,6 +184,10 @@ func (st tcpConnectionState) String() string { return "listen" case tcpClosing: return "closing" + case tcpRxQueuedBytes: + return "rx_queued_bytes" + case tcpTxQueuedBytes: + return "tx_queued_bytes" default: return "unknown" } diff --git a/collector/tcpstat_linux_test.go b/collector/tcpstat_linux_test.go index 806ca223a2..b609b84679 100644 --- a/collector/tcpstat_linux_test.go +++ b/collector/tcpstat_linux_test.go @@ -15,10 +15,58 @@ package collector import ( "os" + "strings" "testing" ) +func Test_parseTCPStatsError(t *testing.T) { + tests := []struct { + name string + in string + }{ + { + name: "too few fields", + in: "sl local_address\n 0: 00000000:0016", + }, + { + name: "missing colon in tx-rx field", + in: "sl local_address rem_address st tx_queue rx_queue\n" + + " 1: 0F02000A:0016 0202000A:8B6B 01 0000000000000001", + }, + { + name: "tx parsing issue", + in: "sl local_address rem_address st tx_queue rx_queue\n" + + " 1: 0F02000A:0016 0202000A:8B6B 01 0000000x:00000001", + }, + { + name: "rx parsing issue", + in: "sl local_address rem_address st tx_queue rx_queue\n" + + " 1: 0F02000A:0016 0202000A:8B6B 01 00000000:0000000x", + }, + { + name: "state parsing issue", + in: "sl local_address rem_address st tx_queue rx_queue\n" + + " 1: 0F02000A:0016 0202000A:8B6B 0H 00000000:00000001", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if _, err := parseTCPStats(strings.NewReader(tt.in)); err == nil { + t.Fatal("expected an error, but none occurred") + } + }) + } +} + func TestTCPStat(t *testing.T) { + + noFile, _ := os.Open("follow the white rabbit") + defer noFile.Close() + + if _, err := parseTCPStats(noFile); err == nil { + t.Fatal("expected an error, but none occurred") + } + file, err := os.Open("fixtures/proc/net/tcpstat") if err != nil { t.Fatal(err) @@ -37,4 +85,39 @@ func TestTCPStat(t *testing.T) { if want, got := 1, int(tcpStats[tcpListen]); want != got { t.Errorf("want tcpstat number of listen state %d, got %d", want, got) } + + if want, got := 42, int(tcpStats[tcpTxQueuedBytes]); want != got { + t.Errorf("want tcpstat number of bytes in tx queue %d, got %d", want, got) + } + if want, got := 1, int(tcpStats[tcpRxQueuedBytes]); want != got { + t.Errorf("want tcpstat number of bytes in rx queue %d, got %d", want, got) + } + +} + +func Test_getTCPStats(t *testing.T) { + type args struct { + statsFile string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "file not found", + args: args{statsFile: "somewhere over the rainbow"}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := getTCPStats(tt.args.statsFile) + if (err != nil) != tt.wantErr { + t.Errorf("getTCPStats() error = %v, wantErr %v", err, tt.wantErr) + return + } + // other cases are covered by TestTCPStat() + }) + } } diff --git a/collector/textfile.go b/collector/textfile.go index 4428479164..71fc0b5e3a 100644 --- a/collector/textfile.go +++ b/collector/textfile.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !notextfile // +build !notextfile package collector @@ -24,20 +25,29 @@ import ( "strings" "time" - "github.com/golang/protobuf/proto" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" - "github.com/prometheus/common/log" - "gopkg.in/alecthomas/kingpin.v2" + kingpin "gopkg.in/alecthomas/kingpin.v2" ) var ( textFileDirectory = kingpin.Flag("collector.textfile.directory", "Directory to read text files with metrics from.").Default("").String() + mtimeDesc = prometheus.NewDesc( + "node_textfile_mtime_seconds", + "Unixtime mtime of textfiles successfully read.", + []string{"file"}, + nil, + ) ) type textFileCollector struct { path string + // Only set for testing to get predictable output. + mtime *float64 + logger log.Logger } func init() { @@ -46,114 +56,256 @@ func init() { // NewTextFileCollector returns a new Collector exposing metrics read from files // in the given textfile directory. -func NewTextFileCollector() (Collector, error) { +func NewTextFileCollector(logger log.Logger) (Collector, error) { c := &textFileCollector{ - path: *textFileDirectory, + path: *textFileDirectory, + logger: logger, } - - if c.path == "" { - // This collector is enabled by default, so do not fail if - // the flag is not passed. - log.Infof("No directory specified, see --collector.textfile.directory") - } else { - prometheus.DefaultGatherer = prometheus.Gatherers{ - prometheus.DefaultGatherer, - prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return c.parseTextFiles(), nil }), - } - } - return c, nil } -// Update implements the Collector interface. -func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error { - return nil +type metricWrapper struct { + prometheus.Metric + ts *int64 +} + +func (mw *metricWrapper) Write(m *dto.Metric) error { + err := mw.Metric.Write(m) + m.TimestampMs = mw.ts + return err } -func (c *textFileCollector) parseTextFiles() []*dto.MetricFamily { - error := 0.0 - var metricFamilies []*dto.MetricFamily - mtimes := map[string]time.Time{} +func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric, logger log.Logger) { + var valType prometheus.ValueType + var val float64 - // Iterate over files and accumulate their metrics. - files, err := ioutil.ReadDir(c.path) - if err != nil && c.path != "" { - log.Errorf("Error reading textfile collector directory %s: %s", c.path, err) - error = 1.0 - } - for _, f := range files { - if !strings.HasSuffix(f.Name(), ".prom") { - continue + allLabelNames := map[string]struct{}{} + for _, metric := range metricFamily.Metric { + labels := metric.GetLabel() + for _, label := range labels { + if _, ok := allLabelNames[label.GetName()]; !ok { + allLabelNames[label.GetName()] = struct{}{} + } } - path := filepath.Join(c.path, f.Name()) - file, err := os.Open(path) - if err != nil { - log.Errorf("Error opening %s: %v", path, err) - error = 1.0 - continue + } + + for _, metric := range metricFamily.Metric { + // if metric.TimestampMs != nil { + // level.Warn(logger).Log("msg", "Ignoring unsupported custom timestamp on textfile collector metric", "metric", metric) + // } + + labels := metric.GetLabel() + var names []string + var values []string + for _, label := range labels { + names = append(names, label.GetName()) + values = append(values, label.GetValue()) } - var parser expfmt.TextParser - parsedFamilies, err := parser.TextToMetricFamilies(file) - file.Close() - if err != nil { - log.Errorf("Error parsing %s: %v", path, err) - error = 1.0 - continue + + for k := range allLabelNames { + present := false + for _, name := range names { + if k == name { + present = true + break + } + } + if !present { + names = append(names, k) + values = append(values, "") + } } - // Only set this once it has been parsed, so that - // a failure does not appear fresh. - mtimes[f.Name()] = f.ModTime() - for _, mf := range parsedFamilies { - if mf.Help == nil { - help := fmt.Sprintf("Metric read from %s", path) - mf.Help = &help + + metricType := metricFamily.GetType() + switch metricType { + case dto.MetricType_COUNTER: + valType = prometheus.CounterValue + val = metric.Counter.GetValue() + + case dto.MetricType_GAUGE: + valType = prometheus.GaugeValue + val = metric.Gauge.GetValue() + + case dto.MetricType_UNTYPED: + valType = prometheus.UntypedValue + val = metric.Untyped.GetValue() + + case dto.MetricType_SUMMARY: + quantiles := map[float64]float64{} + for _, q := range metric.Summary.Quantile { + quantiles[q.GetQuantile()] = q.GetValue() + } + wrapper := &metricWrapper{prometheus.MustNewConstSummary( + prometheus.NewDesc( + *metricFamily.Name, + metricFamily.GetHelp(), + names, nil, + ), + metric.Summary.GetSampleCount(), + metric.Summary.GetSampleSum(), + quantiles, values..., + ), metric.TimestampMs} + ch <- wrapper + case dto.MetricType_HISTOGRAM: + buckets := map[float64]uint64{} + for _, b := range metric.Histogram.Bucket { + buckets[b.GetUpperBound()] = b.GetCumulativeCount() } - metricFamilies = append(metricFamilies, mf) + wrapper := &metricWrapper{prometheus.MustNewConstHistogram( + prometheus.NewDesc( + *metricFamily.Name, + metricFamily.GetHelp(), + names, nil, + ), + metric.Histogram.GetSampleCount(), + metric.Histogram.GetSampleSum(), + buckets, values..., + ), metric.TimestampMs} + ch <- wrapper + default: + panic("unknown metric type") } + if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED { + wrapper := &metricWrapper{prometheus.MustNewConstMetric( + prometheus.NewDesc( + *metricFamily.Name, + metricFamily.GetHelp(), + names, nil, + ), + valType, val, values..., + ), metric.TimestampMs} + ch <- wrapper + } + } +} + +func (c *textFileCollector) exportMTimes(mtimes map[string]time.Time, ch chan<- prometheus.Metric) { + if len(mtimes) == 0 { + return } // Export the mtimes of the successful files. - if len(mtimes) > 0 { - mtimeMetricFamily := dto.MetricFamily{ - Name: proto.String("node_textfile_mtime"), - Help: proto.String("Unixtime mtime of textfiles successfully read."), - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{}, + // Sorting is needed for predictable output comparison in tests. + filepaths := make([]string, 0, len(mtimes)) + for path := range mtimes { + filepaths = append(filepaths, path) + } + sort.Strings(filepaths) + + for _, path := range filepaths { + mtime := float64(mtimes[path].UnixNano() / 1e9) + if c.mtime != nil { + mtime = *c.mtime } + ch <- prometheus.MustNewConstMetric(mtimeDesc, prometheus.GaugeValue, mtime, path) + } +} + +// Update implements the Collector interface. +func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error { + // Iterate over files and accumulate their metrics, but also track any + // parsing errors so an error metric can be reported. + var errored bool + + paths, err := filepath.Glob(c.path) + if err != nil || len(paths) == 0 { + // not glob or not accessible path either way assume single + // directory and let ioutil.ReadDir handle it + paths = []string{c.path} + } - // Sorting is needed for predictable output comparison in tests. - filenames := make([]string, 0, len(mtimes)) - for filename := range mtimes { - filenames = append(filenames, filename) + mtimes := make(map[string]time.Time) + for _, path := range paths { + files, err := ioutil.ReadDir(path) + if err != nil && path != "" { + errored = true + level.Error(c.logger).Log("msg", "failed to read textfile collector directory", "path", path, "err", err) } - sort.Strings(filenames) - - for _, filename := range filenames { - mtimeMetricFamily.Metric = append(mtimeMetricFamily.Metric, - &dto.Metric{ - Label: []*dto.LabelPair{ - { - Name: proto.String("file"), - Value: proto.String(filename), - }, - }, - Gauge: &dto.Gauge{Value: proto.Float64(float64(mtimes[filename].UnixNano()) / 1e9)}, - }, - ) + + for _, f := range files { + if !strings.HasSuffix(f.Name(), ".prom") { + continue + } + + mtime, err := c.processFile(path, f.Name(), ch) + if err != nil { + errored = true + level.Error(c.logger).Log("msg", "failed to collect textfile data", "file", f.Name(), "err", err) + continue + } + + mtimes[filepath.Join(path, f.Name())] = *mtime } - metricFamilies = append(metricFamilies, &mtimeMetricFamily) } + c.exportMTimes(mtimes, ch) + // Export if there were errors. - metricFamilies = append(metricFamilies, &dto.MetricFamily{ - Name: proto.String("node_textfile_scrape_error"), - Help: proto.String("1 if there was an error opening or reading a file, 0 otherwise"), - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{ - { - Gauge: &dto.Gauge{Value: &error}, - }, - }, - }) - - return metricFamilies + var errVal float64 + if errored { + errVal = 1.0 + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + "node_textfile_scrape_error", + "1 if there was an error opening or reading a file, 0 otherwise", + nil, nil, + ), + prometheus.GaugeValue, errVal, + ) + + return nil +} + +// processFile processes a single file, returning its modification time on success. +func (c *textFileCollector) processFile(dir, name string, ch chan<- prometheus.Metric) (*time.Time, error) { + path := filepath.Join(dir, name) + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open textfile data file %q: %w", path, err) + } + defer f.Close() + + var parser expfmt.TextParser + families, err := parser.TextToMetricFamilies(f) + if err != nil { + return nil, fmt.Errorf("failed to parse textfile data from %q: %w", path, err) + } + + // if hasTimestamps(families) { + // return nil, fmt.Errorf("textfile %q contains unsupported client-side timestamps, skipping entire file", path) + // } + + for _, mf := range families { + if mf.Help == nil { + help := fmt.Sprintf("Metric read from %s", path) + mf.Help = &help + } + } + + for _, mf := range families { + convertMetricFamily(mf, ch, c.logger) + } + + // Only stat the file once it has been parsed and validated, so that + // a failure does not appear fresh. + stat, err := f.Stat() + if err != nil { + return nil, fmt.Errorf("failed to stat %q: %w", path, err) + } + + t := stat.ModTime() + return &t, nil +} + +// hasTimestamps returns true when metrics contain unsupported timestamps. +func hasTimestamps(parsedFamilies map[string]*dto.MetricFamily) bool { + for _, mf := range parsedFamilies { + for _, m := range mf.Metric { + if m.TimestampMs != nil { + return true + } + } + } + return false } diff --git a/collector/textfile_test.go b/collector/textfile_test.go index c57446a311..ec3f748d44 100644 --- a/collector/textfile_test.go +++ b/collector/textfile_test.go @@ -14,17 +14,39 @@ package collector import ( + "fmt" "io/ioutil" - "sort" - "strings" + "net/http" + "net/http/httptest" "testing" - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/log" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/common/promlog" + "github.com/prometheus/common/promlog/flag" "gopkg.in/alecthomas/kingpin.v2" ) -func TestParseTextFiles(t *testing.T) { +type collectorAdapter struct { + Collector +} + +// Describe implements the prometheus.Collector interface. +func (a collectorAdapter) Describe(ch chan<- *prometheus.Desc) { + // We have to send *some* metric in Describe, but we don't know which ones + // we're going to get, so just send a dummy metric. + ch <- prometheus.NewDesc("dummy_metric", "Dummy metric.", nil, nil) +} + +// Collect implements the prometheus.Collector interface. +func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) { + if err := a.Update(ch); err != nil { + panic(fmt.Sprintf("failed to update collector: %v", err)) + } +} + +func TestTextfileCollector(t *testing.T) { tests := []struct { path string out string @@ -41,32 +63,58 @@ func TestParseTextFiles(t *testing.T) { path: "fixtures/textfile/nonexistent_path", out: "fixtures/textfile/nonexistent_path.out", }, + { + path: "fixtures/textfile/different_metric_types", + out: "fixtures/textfile/different_metric_types.out", + }, + { + path: "fixtures/textfile/inconsistent_metrics", + out: "fixtures/textfile/inconsistent_metrics.out", + }, + { + path: "fixtures/textfile/histogram", + out: "fixtures/textfile/histogram.out", + }, + { + path: "fixtures/textfile/histogram_extra_dimension", + out: "fixtures/textfile/histogram_extra_dimension.out", + }, + { + path: "fixtures/textfile/summary", + out: "fixtures/textfile/summary.out", + }, + { + path: "fixtures/textfile/summary_extra_dimension", + out: "fixtures/textfile/summary_extra_dimension.out", + }, + { + path: "fixtures/textfile/*_extra_dimension", + out: "fixtures/textfile/glob_extra_dimension.out", + }, } for i, test := range tests { - c := textFileCollector{ - path: test.path, + mtime := 1.0 + c := &textFileCollector{ + path: test.path, + mtime: &mtime, + logger: log.NewNopLogger(), } // Suppress a log message about `nonexistent_path` not existing, this is // expected and clutters the test output. - log.AddFlags(kingpin.CommandLine) - _, err := kingpin.CommandLine.Parse([]string{"--log.level", "fatal"}) - if err != nil { + promlogConfig := &promlog.Config{} + flag.AddFlags(kingpin.CommandLine, promlogConfig) + if _, err := kingpin.CommandLine.Parse([]string{"--log.level", "debug"}); err != nil { t.Fatal(err) } - mfs := c.parseTextFiles() - textMFs := make([]string, 0, len(mfs)) - for _, mf := range mfs { - if mf.GetName() == "node_textfile_mtime" { - mf.GetMetric()[0].GetGauge().Value = proto.Float64(1) - mf.GetMetric()[1].GetGauge().Value = proto.Float64(2) - } - textMFs = append(textMFs, proto.MarshalTextString(mf)) - } - sort.Strings(textMFs) - got := strings.Join(textMFs, "") + registry := prometheus.NewRegistry() + registry.MustRegister(collectorAdapter{c}) + + rw := httptest.NewRecorder() + promhttp.HandlerFor(registry, promhttp.HandlerOpts{}).ServeHTTP(rw, &http.Request{}) + got := string(rw.Body.String()) want, err := ioutil.ReadFile(test.out) if err != nil { @@ -74,7 +122,7 @@ func TestParseTextFiles(t *testing.T) { } if string(want) != got { - t.Fatalf("%d. want:\n\n%s\n\ngot:\n\n%s", i, string(want), got) + t.Fatalf("%d.%q want:\n\n%s\n\ngot:\n\n%s", i, test.path, string(want), got) } } } diff --git a/collector/thermal_darwin.go b/collector/thermal_darwin.go new file mode 100644 index 0000000000..282ca3f383 --- /dev/null +++ b/collector/thermal_darwin.go @@ -0,0 +1,184 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !notherm +// +build !notherm + +package collector + +/* +#cgo LDFLAGS: -framework IOKit -framework CoreFoundation +#include +#include +#include +#include +#include + +struct ref_with_ret { + CFDictionaryRef ref; + IOReturn ret; +}; + +struct ref_with_ret FetchThermal(); + +struct ref_with_ret FetchThermal() { + CFDictionaryRef ref; + IOReturn ret; + ret = IOPMCopyCPUPowerStatus(&ref); + struct ref_with_ret result = { + ref, + ret, + }; + return result; +} +*/ +import "C" + +import ( + "errors" + "fmt" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "unsafe" +) + +type thermCollector struct { + cpuSchedulerLimit typedDesc + cpuAvailableCPU typedDesc + cpuSpeedLimit typedDesc + logger log.Logger +} + +const thermal = "thermal" + +func init() { + registerCollector(thermal, defaultEnabled, NewThermCollector) +} + +// NewThermCollector returns a new Collector exposing current CPU power levels. +func NewThermCollector(logger log.Logger) (Collector, error) { + return &thermCollector{ + cpuSchedulerLimit: typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, thermal, "cpu_scheduler_limit_ratio"), + "Represents the percentage (0-100) of CPU time available. 100% at normal operation. The OS may limit this time for a percentage less than 100%.", + nil, + nil), + valueType: prometheus.GaugeValue, + }, + cpuAvailableCPU: typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, thermal, "cpu_available_cpu"), + "Reflects how many, if any, CPUs have been taken offline. Represented as an integer number of CPUs (0 - Max CPUs).", + nil, + nil, + ), + valueType: prometheus.GaugeValue, + }, + cpuSpeedLimit: typedDesc{ + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, thermal, "cpu_speed_limit_ratio"), + "Defines the speed & voltage limits placed on the CPU. Represented as a percentage (0-100) of maximum CPU speed.", + nil, + nil, + ), + valueType: prometheus.GaugeValue, + }, + logger: logger, + }, nil +} + +func (c *thermCollector) Update(ch chan<- prometheus.Metric) error { + cpuPowerStatus, err := fetchCPUPowerStatus() + if err != nil { + return err + } + if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitSchedulerTimeKey))]; ok { + ch <- c.cpuSchedulerLimit.mustNewConstMetric(float64(value) / 100.0) + } + if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitProcessorCountKey))]; ok { + ch <- c.cpuAvailableCPU.mustNewConstMetric(float64(value)) + } + if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitProcessorSpeedKey))]; ok { + ch <- c.cpuSpeedLimit.mustNewConstMetric(float64(value) / 100.0) + } + return nil +} + +func fetchCPUPowerStatus() (map[string]int, error) { + cfDictRef, _ := C.FetchThermal() + defer func() { + C.CFRelease(C.CFTypeRef(cfDictRef.ref)) + }() + + if C.kIOReturnNotFound == cfDictRef.ret { + return nil, errors.New("no CPU power status has been recorded") + } + + if C.kIOReturnSuccess != cfDictRef.ret { + return nil, fmt.Errorf("no CPU power status with error code 0x%08x", int(cfDictRef.ret)) + } + + // mapping CFDictionary to map + cfDict := CFDict(cfDictRef.ref) + return mappingCFDictToMap(cfDict), nil +} + +type CFDict uintptr + +func mappingCFDictToMap(dict CFDict) map[string]int { + if C.CFNullRef(dict) == C.kCFNull { + return nil + } + cfDict := C.CFDictionaryRef(dict) + + var result map[string]int + count := C.CFDictionaryGetCount(cfDict) + if count > 0 { + keys := make([]C.CFTypeRef, count) + values := make([]C.CFTypeRef, count) + C.CFDictionaryGetKeysAndValues(cfDict, (*unsafe.Pointer)(unsafe.Pointer(&keys[0])), (*unsafe.Pointer)(unsafe.Pointer(&values[0]))) + result = make(map[string]int, count) + for i := C.CFIndex(0); i < count; i++ { + result[mappingCFStringToString(C.CFStringRef(keys[i]))] = mappingCFNumberLongToInt(C.CFNumberRef(values[i])) + } + } + return result +} + +// CFStringToString converts a CFStringRef to a string. +func mappingCFStringToString(s C.CFStringRef) string { + p := C.CFStringGetCStringPtr(s, C.kCFStringEncodingUTF8) + if p != nil { + return C.GoString(p) + } + length := C.CFStringGetLength(s) + if length == 0 { + return "" + } + maxBufLen := C.CFStringGetMaximumSizeForEncoding(length, C.kCFStringEncodingUTF8) + if maxBufLen == 0 { + return "" + } + buf := make([]byte, maxBufLen) + var usedBufLen C.CFIndex + _ = C.CFStringGetBytes(s, C.CFRange{0, length}, C.kCFStringEncodingUTF8, C.UInt8(0), C.false, (*C.UInt8)(&buf[0]), maxBufLen, &usedBufLen) + return string(buf[:usedBufLen]) +} + +func mappingCFNumberLongToInt(n C.CFNumberRef) int { + typ := C.CFNumberGetType(n) + var long C.long + C.CFNumberGetValue(n, typ, unsafe.Pointer(&long)) + return int(long) +} diff --git a/collector/thermal_zone_linux.go b/collector/thermal_zone_linux.go new file mode 100644 index 0000000000..6aedf34796 --- /dev/null +++ b/collector/thermal_zone_linux.go @@ -0,0 +1,110 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nothermalzone +// +build !nothermalzone + +package collector + +import ( + "fmt" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +const coolingDevice = "cooling_device" +const thermalZone = "thermal_zone" + +type thermalZoneCollector struct { + fs sysfs.FS + coolingDeviceCurState *prometheus.Desc + coolingDeviceMaxState *prometheus.Desc + zoneTemp *prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector("thermal_zone", defaultEnabled, NewThermalZoneCollector) +} + +// NewThermalZoneCollector returns a new Collector exposing kernel/system statistics. +func NewThermalZoneCollector(logger log.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + + return &thermalZoneCollector{ + fs: fs, + zoneTemp: prometheus.NewDesc( + prometheus.BuildFQName(namespace, thermalZone, "temp"), + "Zone temperature in Celsius", + []string{"zone", "type"}, nil, + ), + coolingDeviceCurState: prometheus.NewDesc( + prometheus.BuildFQName(namespace, coolingDevice, "cur_state"), + "Current throttle state of the cooling device", + []string{"name", "type"}, nil, + ), + coolingDeviceMaxState: prometheus.NewDesc( + prometheus.BuildFQName(namespace, coolingDevice, "max_state"), + "Maximum throttle state of the cooling device", + []string{"name", "type"}, nil, + ), + logger: logger, + }, nil +} + +func (c *thermalZoneCollector) Update(ch chan<- prometheus.Metric) error { + thermalZones, err := c.fs.ClassThermalZoneStats() + if err != nil { + return err + } + + for _, stats := range thermalZones { + ch <- prometheus.MustNewConstMetric( + c.zoneTemp, + prometheus.GaugeValue, + float64(stats.Temp)/1000.0, + stats.Name, + stats.Type, + ) + } + + coolingDevices, err := c.fs.ClassCoolingDeviceStats() + if err != nil { + return err + } + + for _, stats := range coolingDevices { + ch <- prometheus.MustNewConstMetric( + c.coolingDeviceCurState, + prometheus.GaugeValue, + float64(stats.CurState), + stats.Name, + stats.Type, + ) + + ch <- prometheus.MustNewConstMetric( + c.coolingDeviceMaxState, + prometheus.GaugeValue, + float64(stats.MaxState), + stats.Name, + stats.Type, + ) + } + + return nil +} diff --git a/collector/time.go b/collector/time.go index 86102d7a5d..31a6e74e24 100644 --- a/collector/time.go +++ b/collector/time.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !notime // +build !notime package collector @@ -18,12 +19,17 @@ package collector import ( "time" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" ) type timeCollector struct { - desc *prometheus.Desc + now typedDesc + zone typedDesc + clocksourcesAvailable typedDesc + clocksourceCurrent typedDesc + logger log.Logger } func init() { @@ -32,19 +38,41 @@ func init() { // NewTimeCollector returns a new Collector exposing the current system time in // seconds since epoch. -func NewTimeCollector() (Collector, error) { +func NewTimeCollector(logger log.Logger) (Collector, error) { + const subsystem = "time" return &timeCollector{ - desc: prometheus.NewDesc( - namespace+"_time", + now: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "seconds"), "System time in seconds since epoch (1970).", nil, nil, - ), + ), prometheus.GaugeValue}, + zone: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "zone_offset_seconds"), + "System time zone offset in seconds.", + []string{"time_zone"}, nil, + ), prometheus.GaugeValue}, + clocksourcesAvailable: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "clocksource_available_info"), + "Available clocksources read from '/sys/devices/system/clocksource'.", + []string{"device", "clocksource"}, nil, + ), prometheus.GaugeValue}, + clocksourceCurrent: typedDesc{prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "clocksource_current_info"), + "Current clocksource read from '/sys/devices/system/clocksource'.", + []string{"device", "clocksource"}, nil, + ), prometheus.GaugeValue}, + logger: logger, }, nil } func (c *timeCollector) Update(ch chan<- prometheus.Metric) error { - now := float64(time.Now().UnixNano()) / 1e9 - log.Debugf("Return time: %f", now) - ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, now) - return nil + now := time.Now() + nowSec := float64(now.UnixNano()) / 1e9 + zone, zoneOffset := now.Zone() + + level.Debug(c.logger).Log("msg", "Return time", "now", nowSec) + ch <- c.now.mustNewConstMetric(nowSec) + level.Debug(c.logger).Log("msg", "Zone offset", "offset", zoneOffset, "time_zone", zone) + ch <- c.zone.mustNewConstMetric(float64(zoneOffset), zone) + return c.update(ch) } diff --git a/collector/time_linux.go b/collector/time_linux.go new file mode 100644 index 0000000000..a67f5a8f27 --- /dev/null +++ b/collector/time_linux.go @@ -0,0 +1,48 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !notime +// +build linux,!notime + +package collector + +import ( + "fmt" + "strconv" + + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +func (c *timeCollector) update(ch chan<- prometheus.Metric) error { + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + return fmt.Errorf("failed to open procfs: %w", err) + } + + clocksources, err := fs.ClockSources() + if err != nil { + return fmt.Errorf("couldn't get clocksources: %w", err) + } + level.Debug(c.logger).Log("msg", "in Update", "clocksources", fmt.Sprintf("%v", clocksources)) + + for i, clocksource := range clocksources { + is := strconv.Itoa(i) + for _, cs := range clocksource.Available { + ch <- c.clocksourcesAvailable.mustNewConstMetric(1.0, is, cs) + } + ch <- c.clocksourceCurrent.mustNewConstMetric(1.0, is, clocksource.Current) + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/collector/time_other.go similarity index 68% rename from vendor/github.com/prometheus/common/model/model.go rename to collector/time_other.go index a7b9691707..8227435bd9 100644 --- a/vendor/github.com/prometheus/common/model/model.go +++ b/collector/time_other.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Prometheus Authors +// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,6 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model +//go:build !linux && !notime +// +build !linux,!notime + +package collector + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +func (c *timeCollector) update(ch chan<- prometheus.Metric) error { + return nil +} diff --git a/collector/timex.go b/collector/timex.go index 1042b9e6f6..c444d3ea14 100644 --- a/collector/timex.go +++ b/collector/timex.go @@ -11,26 +11,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !notimex +//go:build linux && !notimex +// +build linux,!notimex package collector -// #include -import "C" - import ( + "errors" "fmt" - "syscall" + "os" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" ) const ( - // The system clock is not synchronized to a reliable server. - timeError = C.TIME_ERROR - // The timex.Status time resolution bit, 0 = microsecond, 1 = nanoseconds. - staNano = C.STA_NANO + // The system clock is not synchronized to a reliable + // server (TIME_ERROR). + timeError = 5 + // The timex.Status time resolution bit (STA_NANO), + // 0 = microsecond, 1 = nanoseconds. + staNano = 0x2000 + // 1 second in nanoSeconds = 1000000000 microSeconds = 1000000 @@ -54,6 +58,7 @@ type timexCollector struct { stbcnt, tai, syncStatus typedDesc + logger log.Logger } func init() { @@ -61,7 +66,7 @@ func init() { } // NewTimexCollector returns a new Collector exposing adjtime(3) stats. -func NewTimexCollector() (Collector, error) { +func NewTimexCollector(logger log.Logger) (Collector, error) { const subsystem = "timex" return &timexCollector{ @@ -71,7 +76,7 @@ func NewTimexCollector() (Collector, error) { nil, nil, ), prometheus.GaugeValue}, freq: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "frequency_adjustment"), + prometheus.BuildFQName(namespace, subsystem, "frequency_adjustment_ratio"), "Local clock frequency adjustment.", nil, nil, ), prometheus.GaugeValue}, @@ -101,7 +106,7 @@ func NewTimexCollector() (Collector, error) { nil, nil, ), prometheus.GaugeValue}, ppsfreq: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "pps_frequency"), + prometheus.BuildFQName(namespace, subsystem, "pps_frequency_hertz"), "Pulse per second frequency.", nil, nil, ), prometheus.GaugeValue}, @@ -116,32 +121,32 @@ func NewTimexCollector() (Collector, error) { nil, nil, ), prometheus.GaugeValue}, stabil: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "pps_stability"), - "Pulse per second stability.", + prometheus.BuildFQName(namespace, subsystem, "pps_stability_hertz"), + "Pulse per second stability, average of recent frequency changes.", nil, nil, - ), prometheus.CounterValue}, + ), prometheus.GaugeValue}, jitcnt: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "pps_jitter_count"), + prometheus.BuildFQName(namespace, subsystem, "pps_jitter_total"), "Pulse per second count of jitter limit exceeded events.", nil, nil, ), prometheus.CounterValue}, calcnt: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "pps_calibration_count"), + prometheus.BuildFQName(namespace, subsystem, "pps_calibration_total"), "Pulse per second count of calibration intervals.", nil, nil, ), prometheus.CounterValue}, errcnt: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "pps_error_count"), + prometheus.BuildFQName(namespace, subsystem, "pps_error_total"), "Pulse per second count of calibration errors.", nil, nil, ), prometheus.CounterValue}, stbcnt: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "pps_stability_exceeded_count"), + prometheus.BuildFQName(namespace, subsystem, "pps_stability_exceeded_total"), "Pulse per second count of stability limit exceeded events.", nil, nil, - ), prometheus.GaugeValue}, + ), prometheus.CounterValue}, tai: typedDesc{prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "tai_offset"), + prometheus.BuildFQName(namespace, subsystem, "tai_offset_seconds"), "International Atomic Time (TAI) offset.", nil, nil, ), prometheus.GaugeValue}, @@ -150,17 +155,22 @@ func NewTimexCollector() (Collector, error) { "Is clock synchronized to a reliable server (1 = yes, 0 = no).", nil, nil, ), prometheus.GaugeValue}, + logger: logger, }, nil } func (c *timexCollector) Update(ch chan<- prometheus.Metric) error { var syncStatus float64 var divisor float64 - var timex = new(syscall.Timex) + var timex = new(unix.Timex) - status, err := syscall.Adjtimex(timex) + status, err := unix.Adjtimex(timex) if err != nil { - return fmt.Errorf("failed to retrieve adjtimex stats: %v", err) + if errors.Is(err, os.ErrPermission) { + level.Debug(c.logger).Log("msg", "Not collecting timex metrics", "err", err) + return ErrNoData + } + return fmt.Errorf("failed to retrieve adjtimex stats: %w", err) } if status == timeError { @@ -173,18 +183,21 @@ func (c *timexCollector) Update(ch chan<- prometheus.Metric) error { } else { divisor = microSeconds } + // See NOTES in adjtimex(2). + const ppm16frac = 1000000.0 * 65536.0 + ch <- c.syncStatus.mustNewConstMetric(syncStatus) ch <- c.offset.mustNewConstMetric(float64(timex.Offset) / divisor) - ch <- c.freq.mustNewConstMetric(float64(timex.Freq)) + ch <- c.freq.mustNewConstMetric(1 + float64(timex.Freq)/ppm16frac) ch <- c.maxerror.mustNewConstMetric(float64(timex.Maxerror) / microSeconds) ch <- c.esterror.mustNewConstMetric(float64(timex.Esterror) / microSeconds) ch <- c.status.mustNewConstMetric(float64(timex.Status)) ch <- c.constant.mustNewConstMetric(float64(timex.Constant)) ch <- c.tick.mustNewConstMetric(float64(timex.Tick) / microSeconds) - ch <- c.ppsfreq.mustNewConstMetric(float64(timex.Ppsfreq)) + ch <- c.ppsfreq.mustNewConstMetric(float64(timex.Ppsfreq) / ppm16frac) ch <- c.jitter.mustNewConstMetric(float64(timex.Jitter) / divisor) ch <- c.shift.mustNewConstMetric(float64(timex.Shift)) - ch <- c.stabil.mustNewConstMetric(float64(timex.Stabil)) + ch <- c.stabil.mustNewConstMetric(float64(timex.Stabil) / ppm16frac) ch <- c.jitcnt.mustNewConstMetric(float64(timex.Jitcnt)) ch <- c.calcnt.mustNewConstMetric(float64(timex.Calcnt)) ch <- c.errcnt.mustNewConstMetric(float64(timex.Errcnt)) diff --git a/collector/udp_queues_linux.go b/collector/udp_queues_linux.go new file mode 100644 index 0000000000..3fac869198 --- /dev/null +++ b/collector/udp_queues_linux.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noudp_queues +// +build !noudp_queues + +package collector + +import ( + "errors" + "fmt" + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +type ( + udpQueuesCollector struct { + fs procfs.FS + desc *prometheus.Desc + logger log.Logger + } +) + +func init() { + registerCollector("udp_queues", defaultEnabled, NewUDPqueuesCollector) +} + +// NewUDPqueuesCollector returns a new Collector exposing network udp queued bytes. +func NewUDPqueuesCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &udpQueuesCollector{ + fs: fs, + desc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "udp", "queues"), + "Number of allocated memory in the kernel for UDP datagrams in bytes.", + []string{"queue", "ip"}, nil, + ), + logger: logger, + }, nil +} + +func (c *udpQueuesCollector) Update(ch chan<- prometheus.Metric) error { + + s4, errIPv4 := c.fs.NetUDPSummary() + if errIPv4 == nil { + ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.TxQueueLength), "tx", "v4") + ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.RxQueueLength), "rx", "v4") + } else { + if errors.Is(errIPv4, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "not collecting ipv4 based metrics") + } else { + return fmt.Errorf("couldn't get udp queued bytes: %w", errIPv4) + } + } + + s6, errIPv6 := c.fs.NetUDP6Summary() + if errIPv6 == nil { + ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.TxQueueLength), "tx", "v6") + ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.RxQueueLength), "rx", "v6") + } else { + if errors.Is(errIPv6, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "not collecting ipv6 based metrics") + } else { + return fmt.Errorf("couldn't get udp6 queued bytes: %w", errIPv6) + } + } + + if errors.Is(errIPv4, os.ErrNotExist) && errors.Is(errIPv6, os.ErrNotExist) { + return ErrNoData + } + return nil +} diff --git a/collector/uname.go b/collector/uname.go new file mode 100644 index 0000000000..14cfefb324 --- /dev/null +++ b/collector/uname.go @@ -0,0 +1,76 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (darwin || freebsd || openbsd || linux) && !nouname +// +build darwin freebsd openbsd linux +// +build !nouname + +package collector + +import ( + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +var unameDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "uname", "info"), + "Labeled system information as provided by the uname system call.", + []string{ + "sysname", + "release", + "version", + "machine", + "nodename", + "domainname", + }, + nil, +) + +type unameCollector struct { + logger log.Logger +} +type uname struct { + SysName string + Release string + Version string + Machine string + NodeName string + DomainName string +} + +func init() { + registerCollector("uname", defaultEnabled, newUnameCollector) +} + +// NewUnameCollector returns new unameCollector. +func newUnameCollector(logger log.Logger) (Collector, error) { + return &unameCollector{logger}, nil +} + +func (c *unameCollector) Update(ch chan<- prometheus.Metric) error { + uname, err := getUname() + if err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric(unameDesc, prometheus.GaugeValue, 1, + uname.SysName, + uname.Release, + uname.Version, + uname.Machine, + uname.NodeName, + uname.DomainName, + ) + + return nil +} diff --git a/collector/uname_bsd.go b/collector/uname_bsd.go new file mode 100644 index 0000000000..77d47877be --- /dev/null +++ b/collector/uname_bsd.go @@ -0,0 +1,64 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (darwin || freebsd || openbsd) && !nouname +// +build darwin freebsd openbsd +// +build !nouname + +package collector + +import ( + "bytes" + "strings" + + "golang.org/x/sys/unix" +) + +func getUname() (uname, error) { + var utsname unix.Utsname + if err := unix.Uname(&utsname); err != nil { + return uname{}, err + } + + nodeName, domainName := parseHostNameAndDomainName(utsname) + + output := uname{ + SysName: string(utsname.Sysname[:bytes.IndexByte(utsname.Sysname[:], 0)]), + Release: string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), + Version: string(utsname.Version[:bytes.IndexByte(utsname.Version[:], 0)]), + Machine: string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), + NodeName: nodeName, + DomainName: domainName, + } + + return output, nil +} + +// parseHostNameAndDomainName for FreeBSD,OpenBSD,Darwin. +// Attempts to emulate what happens in the Linux uname calls since these OS doesn't have a Domainname. +func parseHostNameAndDomainName(utsname unix.Utsname) (hostname string, domainname string) { + nodename := string(utsname.Nodename[:bytes.IndexByte(utsname.Nodename[:], 0)]) + split := strings.SplitN(nodename, ".", 2) + + // We'll always have at least a single element in the array. We assume this + // is the hostname. + hostname = split[0] + + // If we have more than one element, we assume this is the domainname. + // Otherwise leave it to "(none)" like Linux. + domainname = "(none)" + if len(split) > 1 { + domainname = split[1] + } + return hostname, domainname +} diff --git a/collector/uname_linux.go b/collector/uname_linux.go index c092fab5a5..549c221ab6 100644 --- a/collector/uname_linux.go +++ b/collector/uname_linux.go @@ -11,54 +11,31 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nouname // +build !nouname package collector import ( - "syscall" + "bytes" - "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sys/unix" ) -var unameDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "uname", "info"), - "Labeled system information as provided by the uname system call.", - []string{ - "sysname", - "release", - "version", - "machine", - "nodename", - "domainname", - }, - nil, -) - -type unameCollector struct{} - -func init() { - registerCollector("uname", defaultEnabled, newUnameCollector) -} - -// NewUnameCollector returns new unameCollector. -func newUnameCollector() (Collector, error) { - return &unameCollector{}, nil -} +func getUname() (uname, error) { + var utsname unix.Utsname + if err := unix.Uname(&utsname); err != nil { + return uname{}, err + } -func (c unameCollector) Update(ch chan<- prometheus.Metric) error { - var uname syscall.Utsname - if err := syscall.Uname(&uname); err != nil { - return err + output := uname{ + SysName: string(utsname.Sysname[:bytes.IndexByte(utsname.Sysname[:], 0)]), + Release: string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), + Version: string(utsname.Version[:bytes.IndexByte(utsname.Version[:], 0)]), + Machine: string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), + NodeName: string(utsname.Nodename[:bytes.IndexByte(utsname.Nodename[:], 0)]), + DomainName: string(utsname.Domainname[:bytes.IndexByte(utsname.Domainname[:], 0)]), } - ch <- prometheus.MustNewConstMetric(unameDesc, prometheus.GaugeValue, 1, - unameToString(uname.Sysname), - unameToString(uname.Release), - unameToString(uname.Version), - unameToString(uname.Machine), - unameToString(uname.Nodename), - unameToString(uname.Domainname), - ) - return nil + return output, nil } diff --git a/collector/uname_linux_int8.go b/collector/uname_linux_int8.go deleted file mode 100644 index e0a89596c1..0000000000 --- a/collector/uname_linux_int8.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build 386 amd64 arm64 mips64 mips64le mips mipsle -// +build linux -// +build !nouname - -package collector - -func unameToString(input [65]int8) string { - var str string - for _, a := range input { - if a == 0 { - break - } - str += string(a) - } - return str -} diff --git a/collector/vmstat_linux.go b/collector/vmstat_linux.go index 0b9b2b53cc..a59916573b 100644 --- a/collector/vmstat_linux.go +++ b/collector/vmstat_linux.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !novmstat // +build !novmstat package collector @@ -19,25 +20,39 @@ import ( "bufio" "fmt" "os" + "regexp" "strconv" "strings" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "gopkg.in/alecthomas/kingpin.v2" ) const ( vmStatSubsystem = "vmstat" ) -type vmStatCollector struct{} +var ( + vmStatFields = kingpin.Flag("collector.vmstat.fields", "Regexp of fields to return for vmstat collector.").Default("^(oom_kill|pgpg|pswp|pg.*fault).*").String() +) + +type vmStatCollector struct { + fieldPattern *regexp.Regexp + logger log.Logger +} func init() { registerCollector("vmstat", defaultEnabled, NewvmStatCollector) } // NewvmStatCollector returns a new Collector exposing vmstat stats. -func NewvmStatCollector() (Collector, error) { - return &vmStatCollector{}, nil +func NewvmStatCollector(logger log.Logger) (Collector, error) { + pattern := regexp.MustCompile(*vmStatFields) + return &vmStatCollector{ + fieldPattern: pattern, + logger: logger, + }, nil } func (c *vmStatCollector) Update(ch chan<- prometheus.Metric) error { @@ -54,6 +69,9 @@ func (c *vmStatCollector) Update(ch chan<- prometheus.Metric) error { if err != nil { return err } + if !c.fieldPattern.MatchString(parts[0]) { + continue + } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( diff --git a/collector/wifi_linux.go b/collector/wifi_linux.go index b3a1cf493b..4b293ea379 100644 --- a/collector/wifi_linux.go +++ b/collector/wifi_linux.go @@ -11,18 +11,23 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nowifi +// +build !nowifi + package collector import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" "path/filepath" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/mdlayher/wifi" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" "gopkg.in/alecthomas/kingpin.v2" ) @@ -34,10 +39,14 @@ type wifiCollector struct { stationInactiveSeconds *prometheus.Desc stationReceiveBitsPerSecond *prometheus.Desc stationTransmitBitsPerSecond *prometheus.Desc + stationReceiveBytesTotal *prometheus.Desc + stationTransmitBytesTotal *prometheus.Desc stationSignalDBM *prometheus.Desc stationTransmitRetriesTotal *prometheus.Desc stationTransmitFailedTotal *prometheus.Desc stationBeaconLossTotal *prometheus.Desc + + logger log.Logger } var ( @@ -45,7 +54,7 @@ var ( ) func init() { - registerCollector("wifi", defaultEnabled, NewWifiCollector) + registerCollector("wifi", defaultDisabled, NewWifiCollector) } var _ wifiStater = &wifi.Client{} @@ -55,24 +64,24 @@ type wifiStater interface { BSS(ifi *wifi.Interface) (*wifi.BSS, error) Close() error Interfaces() ([]*wifi.Interface, error) - StationInfo(ifi *wifi.Interface) (*wifi.StationInfo, error) + StationInfo(ifi *wifi.Interface) ([]*wifi.StationInfo, error) } // NewWifiCollector returns a new Collector exposing Wifi statistics. -func NewWifiCollector() (Collector, error) { +func NewWifiCollector(logger log.Logger) (Collector, error) { const ( subsystem = "wifi" ) var ( - labels = []string{"device"} + labels = []string{"device", "mac_address"} ) return &wifiCollector{ interfaceFrequencyHertz: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "interface_frequency_hertz"), "The current frequency a WiFi interface is operating at, in hertz.", - labels, + []string{"device"}, nil, ), @@ -111,6 +120,20 @@ func NewWifiCollector() (Collector, error) { nil, ), + stationReceiveBytesTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "station_receive_bytes_total"), + "The total number of bytes received by a WiFi station.", + labels, + nil, + ), + + stationTransmitBytesTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, "station_transmit_bytes_total"), + "The total number of bytes transmitted by a WiFi station.", + labels, + nil, + ), + stationSignalDBM: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_signal_dbm"), "The current WiFi signal strength, in decibel-milliwatts (dBm).", @@ -138,6 +161,7 @@ func NewWifiCollector() (Collector, error) { labels, nil, ), + logger: logger, }, nil } @@ -145,22 +169,22 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { stat, err := newWifiStater(*collectorWifi) if err != nil { // Cannot access wifi metrics, report no error. - if os.IsNotExist(err) { - log.Debug("wifi collector metrics are not available for this system") - return nil + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "wifi collector metrics are not available for this system") + return ErrNoData } - if os.IsPermission(err) { - log.Debug("wifi collector got permission denied when accessing metrics") - return nil + if errors.Is(err, os.ErrPermission) { + level.Debug(c.logger).Log("msg", "wifi collector got permission denied when accessing metrics") + return ErrNoData } - return fmt.Errorf("failed to access wifi data: %v", err) + return fmt.Errorf("failed to access wifi data: %w", err) } defer stat.Close() ifis, err := stat.Interfaces() if err != nil { - return fmt.Errorf("failed to retrieve wifi interfaces: %v", err) + return fmt.Errorf("failed to retrieve wifi interfaces: %w", err) } for _, ifi := range ifis { @@ -169,7 +193,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { continue } - log.Debugf("probing wifi device %q with type %q", ifi.Name, ifi.Type) + level.Debug(c.logger).Log("msg", "probing wifi device with type", "wifi", ifi.Name, "type", ifi.Type) ch <- prometheus.MustNewConstMetric( c.interfaceFrequencyHertz, @@ -179,26 +203,28 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { ) // When a statistic is not available for a given interface, package wifi - // returns an error compatible with os.IsNotExist. We leverage this to - // only export metrics which are actually valid for given interface types. + // returns a os.ErrNotExist error. We leverage this to only export + // metrics which are actually valid for given interface types. bss, err := stat.BSS(ifi) switch { case err == nil: c.updateBSSStats(ch, ifi.Name, bss) - case os.IsNotExist(err): - log.Debugf("BSS information not found for wifi device %q", ifi.Name) + case errors.Is(err, os.ErrNotExist): + level.Debug(c.logger).Log("msg", "BSS information not found for wifi device", "name", ifi.Name) default: return fmt.Errorf("failed to retrieve BSS for device %s: %v", ifi.Name, err) } - info, err := stat.StationInfo(ifi) + stations, err := stat.StationInfo(ifi) switch { case err == nil: - c.updateStationStats(ch, ifi.Name, info) - case os.IsNotExist(err): - log.Debugf("station information not found for wifi device %q", ifi.Name) + for _, station := range stations { + c.updateStationStats(ch, ifi.Name, station) + } + case errors.Is(err, os.ErrNotExist): + level.Debug(c.logger).Log("msg", "station information not found for wifi device", "name", ifi.Name) default: return fmt.Errorf("failed to retrieve station info for device %q: %v", ifi.Name, err) @@ -227,6 +253,7 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.CounterValue, info.Connected.Seconds(), device, + info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( @@ -234,6 +261,7 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.GaugeValue, info.Inactive.Seconds(), device, + info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( @@ -241,6 +269,7 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.GaugeValue, float64(info.ReceiveBitrate), device, + info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( @@ -248,6 +277,23 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.GaugeValue, float64(info.TransmitBitrate), device, + info.HardwareAddr.String(), + ) + + ch <- prometheus.MustNewConstMetric( + c.stationReceiveBytesTotal, + prometheus.CounterValue, + float64(info.ReceivedBytes), + device, + info.HardwareAddr.String(), + ) + + ch <- prometheus.MustNewConstMetric( + c.stationTransmitBytesTotal, + prometheus.CounterValue, + float64(info.TransmittedBytes), + device, + info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( @@ -255,6 +301,7 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.GaugeValue, float64(info.Signal), device, + info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( @@ -262,6 +309,7 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.CounterValue, float64(info.TransmitRetries), device, + info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( @@ -269,6 +317,7 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.CounterValue, float64(info.TransmitFailed), device, + info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( @@ -276,6 +325,7 @@ func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device s prometheus.CounterValue, float64(info.BeaconLoss), device, + info.HardwareAddr.String(), ) } @@ -346,13 +396,13 @@ func (s *mockWifiStater) Interfaces() ([]*wifi.Interface, error) { return ifis, nil } -func (s *mockWifiStater) StationInfo(ifi *wifi.Interface) (*wifi.StationInfo, error) { +func (s *mockWifiStater) StationInfo(ifi *wifi.Interface) ([]*wifi.StationInfo, error) { p := filepath.Join(ifi.Name, "stationinfo.json") - var info wifi.StationInfo - if err := s.unmarshalJSONFile(p, &info); err != nil { + var stations []*wifi.StationInfo + if err := s.unmarshalJSONFile(p, &stations); err != nil { return nil, err } - return &info, nil + return stations, nil } diff --git a/collector/xfs_linux.go b/collector/xfs_linux.go index 8129de70c2..bb25acab9f 100644 --- a/collector/xfs_linux.go +++ b/collector/xfs_linux.go @@ -11,19 +11,23 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noxfs +// +build !noxfs + package collector import ( "fmt" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/procfs/sysfs" "github.com/prometheus/procfs/xfs" ) // An xfsCollector is a Collector which gathers metrics from XFS filesystems. type xfsCollector struct { - fs sysfs.FS + fs xfs.FS + logger log.Logger } func init() { @@ -31,22 +35,23 @@ func init() { } // NewXFSCollector returns a new Collector exposing XFS statistics. -func NewXFSCollector() (Collector, error) { - fs, err := sysfs.NewFS(*sysPath) +func NewXFSCollector(logger log.Logger) (Collector, error) { + fs, err := xfs.NewFS(*procPath, *sysPath) if err != nil { - return nil, fmt.Errorf("failed to open sysfs: %v", err) + return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &xfsCollector{ - fs: fs, + fs: fs, + logger: logger, }, nil } // Update implements Collector. func (c *xfsCollector) Update(ch chan<- prometheus.Metric) error { - stats, err := c.fs.XFSStats() + stats, err := c.fs.SysStats() if err != nil { - return fmt.Errorf("failed to retrieve XFS stats: %v", err) + return fmt.Errorf("failed to retrieve XFS stats: %w", err) } for _, s := range stats { @@ -156,24 +161,124 @@ func (c *xfsCollector) updateXFSStats(ch chan<- prometheus.Metric, s *xfs.Stats) value: float64(s.BlockMapping.ExtentListCompares), }, { - name: "block_mapping_btree_lookups_total", + name: "block_map_btree_lookups_total", desc: "Number of block map B-tree lookups for a filesystem.", - value: float64(s.AllocationBTree.Lookups), + value: float64(s.BlockMapBTree.Lookups), }, { - name: "block_mapping_btree_compares_total", + name: "block_map_btree_compares_total", desc: "Number of block map B-tree compares for a filesystem.", - value: float64(s.AllocationBTree.Compares), + value: float64(s.BlockMapBTree.Compares), }, { - name: "block_mapping_btree_records_inserted_total", + name: "block_map_btree_records_inserted_total", desc: "Number of block map B-tree records inserted for a filesystem.", - value: float64(s.AllocationBTree.RecordsInserted), + value: float64(s.BlockMapBTree.RecordsInserted), }, { - name: "block_mapping_btree_records_deleted_total", + name: "block_map_btree_records_deleted_total", desc: "Number of block map B-tree records deleted for a filesystem.", - value: float64(s.AllocationBTree.RecordsDeleted), + value: float64(s.BlockMapBTree.RecordsDeleted), + }, + { + name: "directory_operation_lookup_total", + desc: "Number of file name directory lookups which miss the operating systems directory name lookup cache.", + value: float64(s.DirectoryOperation.Lookups), + }, + { + name: "directory_operation_create_total", + desc: "Number of times a new directory entry was created for a filesystem.", + value: float64(s.DirectoryOperation.Creates), + }, + { + name: "directory_operation_remove_total", + desc: "Number of times an existing directory entry was created for a filesystem.", + value: float64(s.DirectoryOperation.Removes), + }, + { + name: "directory_operation_getdents_total", + desc: "Number of times the directory getdents operation was performed for a filesystem.", + value: float64(s.DirectoryOperation.Getdents), + }, + { + name: "inode_operation_attempts_total", + desc: "Number of times the OS looked for an XFS inode in the inode cache.", + value: float64(s.InodeOperation.Attempts), + }, + { + name: "inode_operation_found_total", + desc: "Number of times the OS looked for and found an XFS inode in the inode cache.", + value: float64(s.InodeOperation.Found), + }, + { + name: "inode_operation_recycled_total", + desc: "Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled.", + value: float64(s.InodeOperation.Recycle), + }, + { + name: "inode_operation_missed_total", + desc: "Number of times the OS looked for an XFS inode in the cache, but did not find it.", + value: float64(s.InodeOperation.Missed), + }, + { + name: "inode_operation_duplicates_total", + desc: "Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process.", + value: float64(s.InodeOperation.Duplicate), + }, + { + name: "inode_operation_reclaims_total", + desc: "Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose.", + value: float64(s.InodeOperation.Reclaims), + }, + { + name: "inode_operation_attribute_changes_total", + desc: "Number of times the OS explicitly changed the attributes of an XFS inode.", + value: float64(s.InodeOperation.AttributeChange), + }, + { + name: "read_calls_total", + desc: "Number of read(2) system calls made to files in a filesystem.", + value: float64(s.ReadWrite.Read), + }, + { + name: "write_calls_total", + desc: "Number of write(2) system calls made to files in a filesystem.", + value: float64(s.ReadWrite.Write), + }, + { + name: "vnode_active_total", + desc: "Number of vnodes not on free lists for a filesystem.", + value: float64(s.Vnode.Active), + }, + { + name: "vnode_allocate_total", + desc: "Number of times vn_alloc called for a filesystem.", + value: float64(s.Vnode.Allocate), + }, + { + name: "vnode_get_total", + desc: "Number of times vn_get called for a filesystem.", + value: float64(s.Vnode.Get), + }, + { + name: "vnode_hold_total", + desc: "Number of times vn_hold called for a filesystem.", + value: float64(s.Vnode.Hold), + }, + { + name: "vnode_release_total", + desc: "Number of times vn_rele called for a filesystem.", + value: float64(s.Vnode.Release), + }, + { + name: "vnode_reclaim_total", + desc: "Number of times vn_reclaim called for a filesystem.", + value: float64(s.Vnode.Reclaim), + }, + { + name: "vnode_remove_total", + desc: "Number of times vn_remove called for a filesystem.", + value: float64(s.Vnode.Remove), }, } diff --git a/collector/zfs.go b/collector/zfs.go index aed8d3ce64..8ddbe3c278 100644 --- a/collector/zfs.go +++ b/collector/zfs.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !nozfs +//go:build linux && !nozfs +// +build linux,!nozfs package collector @@ -20,8 +20,9 @@ import ( "errors" "strings" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" ) var errZFSNotAvailable = errors.New("ZFS / ZFS statistics are not available") @@ -33,34 +34,53 @@ func init() { } type zfsCollector struct { - linuxProcpathBase string - linuxZpoolIoPath string - linuxPathMap map[string]string + linuxProcpathBase string + linuxZpoolIoPath string + linuxZpoolObjsetPath string + linuxZpoolStatePath string + linuxPathMap map[string]string + logger log.Logger } // NewZFSCollector returns a new Collector exposing ZFS statistics. -func NewZFSCollector() (Collector, error) { +func NewZFSCollector(logger log.Logger) (Collector, error) { return &zfsCollector{ - linuxProcpathBase: "spl/kstat/zfs", - linuxZpoolIoPath: "/*/io", + linuxProcpathBase: "spl/kstat/zfs", + linuxZpoolIoPath: "/*/io", + linuxZpoolObjsetPath: "/*/objset-*", + linuxZpoolStatePath: "/*/state", linuxPathMap: map[string]string{ - "zfs_arc": "arcstats", - "zfs_dmu_tx": "dmu_tx", - "zfs_fm": "fm", - "zfs_zfetch": "zfetchstats", - "zfs_vdev_cache": "vdev_cache_stats", - "zfs_xuio": "xuio_stats", - "zfs_zil": "zil", + "zfs_abd": "abdstats", + "zfs_arc": "arcstats", + "zfs_dbuf": "dbuf_stats", + "zfs_dmu_tx": "dmu_tx", + "zfs_dnode": "dnodestats", + "zfs_fm": "fm", + "zfs_vdev_cache": "vdev_cache_stats", // vdev_cache is deprecated + "zfs_vdev_mirror": "vdev_mirror_stats", + "zfs_xuio": "xuio_stats", // no known consumers of the XUIO interface on Linux exist + "zfs_zfetch": "zfetchstats", + "zfs_zil": "zil", }, + logger: logger, }, nil } func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { + + if _, err := c.openProcFile(c.linuxProcpathBase); err != nil { + if err == errZFSNotAvailable { + level.Debug(c.logger).Log("err", err) + return ErrNoData + } + } + for subsystem := range c.linuxPathMap { if err := c.updateZfsStats(subsystem, ch); err != nil { if err == errZFSNotAvailable { - log.Debug(err) - return nil + level.Debug(c.logger).Log("err", err) + // ZFS /proc files are added as new features to ZFS arrive, it is ok to continue + continue } return err } @@ -75,7 +95,7 @@ func (s zfsSysctl) metricName() string { return strings.Replace(parts[len(parts)-1], "-", "_", -1) } -func (c *zfsCollector) constSysctlMetric(subsystem string, sysctl zfsSysctl, value int64) prometheus.Metric { +func (c *zfsCollector) constSysctlMetric(subsystem string, sysctl zfsSysctl, value uint64) prometheus.Metric { metricName := sysctl.metricName() return prometheus.MustNewConstMetric( @@ -90,7 +110,7 @@ func (c *zfsCollector) constSysctlMetric(subsystem string, sysctl zfsSysctl, val ) } -func (c *zfsCollector) constPoolMetric(poolName string, sysctl zfsSysctl, value int64) prometheus.Metric { +func (c *zfsCollector) constPoolMetric(poolName string, sysctl zfsSysctl, value uint64) prometheus.Metric { metricName := sysctl.metricName() return prometheus.MustNewConstMetric( @@ -105,3 +125,35 @@ func (c *zfsCollector) constPoolMetric(poolName string, sysctl zfsSysctl, value poolName, ) } + +func (c *zfsCollector) constPoolObjsetMetric(poolName string, datasetName string, sysctl zfsSysctl, value uint64) prometheus.Metric { + metricName := sysctl.metricName() + + return prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "zfs_zpool_dataset", metricName), + string(sysctl), + []string{"zpool", "dataset"}, + nil, + ), + prometheus.UntypedValue, + float64(value), + poolName, + datasetName, + ) +} + +func (c *zfsCollector) constPoolStateMetric(poolName string, stateName string, isActive uint64) prometheus.Metric { + return prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, "zfs_zpool", "state"), + "kstat.zfs.misc.state", + []string{"zpool", "state"}, + nil, + ), + prometheus.GaugeValue, + float64(isActive), + poolName, + stateName, + ) +} diff --git a/collector/zfs_freebsd.go b/collector/zfs_freebsd.go new file mode 100644 index 0000000000..2418b229d0 --- /dev/null +++ b/collector/zfs_freebsd.go @@ -0,0 +1,266 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !nozfs +// +build !nozfs + +package collector + +import ( + "fmt" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +type zfsCollector struct { + sysctls []bsdSysctl + logger log.Logger +} + +const ( + zfsCollectorSubsystem = "zfs" +) + +func init() { + registerCollector("zfs", defaultEnabled, NewZfsCollector) +} + +func NewZfsCollector(logger log.Logger) (Collector, error) { + return &zfsCollector{ + sysctls: []bsdSysctl{ + { + name: "abdstats_linear_count_total", + description: "ZFS ARC buffer data linear count", + mib: "kstat.zfs.misc.abdstats.linear_cnt", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "abdstats_linear_data_bytes", + description: "ZFS ARC buffer data linear data size", + mib: "kstat.zfs.misc.abdstats.linear_data_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "abdstats_scatter_chunk_waste_bytes", + description: "ZFS ARC buffer data scatter chunk waste", + mib: "kstat.zfs.misc.abdstats.scatter_chunk_waste", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "abdstats_scatter_count_total", + description: "ZFS ARC buffer data scatter count", + mib: "kstat.zfs.misc.abdstats.scatter_cnt", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "abdstats_scatter_data_bytes", + description: "ZFS ARC buffer data scatter data size", + mib: "kstat.zfs.misc.abdstats.scatter_data_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "abdstats_struct_bytes", + description: "ZFS ARC buffer data struct size", + mib: "kstat.zfs.misc.abdstats.struct_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_anon_bytes", + description: "ZFS ARC anon size", + mib: "kstat.zfs.misc.arcstats.anon_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_c_bytes", + description: "ZFS ARC target size", + mib: "kstat.zfs.misc.arcstats.c", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_c_max_bytes", + description: "ZFS ARC maximum size", + mib: "kstat.zfs.misc.arcstats.c_max", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_c_min_bytes", + description: "ZFS ARC minimum size", + mib: "kstat.zfs.misc.arcstats.c_min", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_data_bytes", + description: "ZFS ARC data size", + mib: "kstat.zfs.misc.arcstats.data_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_demand_data_hits_total", + description: "ZFS ARC demand data hits", + mib: "kstat.zfs.misc.arcstats.demand_data_hits", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_demand_data_misses_total", + description: "ZFS ARC demand data misses", + mib: "kstat.zfs.misc.arcstats.demand_data_misses", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_demand_metadata_hits_total", + description: "ZFS ARC demand metadata hits", + mib: "kstat.zfs.misc.arcstats.demand_metadata_hits", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_demand_metadata_misses_total", + description: "ZFS ARC demand metadata misses", + mib: "kstat.zfs.misc.arcstats.demand_metadata_misses", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_hdr_bytes", + description: "ZFS ARC header size", + mib: "kstat.zfs.misc.arcstats.hdr_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_hits_total", + description: "ZFS ARC hits", + mib: "kstat.zfs.misc.arcstats.hits", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_misses_total", + description: "ZFS ARC misses", + mib: "kstat.zfs.misc.arcstats.misses", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_mfu_ghost_hits_total", + description: "ZFS ARC MFU ghost hits", + mib: "kstat.zfs.misc.arcstats.mfu_ghost_hits", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_mfu_ghost_size", + description: "ZFS ARC MFU ghost size", + mib: "kstat.zfs.misc.arcstats.mfu_ghost_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_mfu_bytes", + description: "ZFS ARC MFU size", + mib: "kstat.zfs.misc.arcstats.mfu_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_mru_ghost_hits_total", + description: "ZFS ARC MRU ghost hits", + mib: "kstat.zfs.misc.arcstats.mru_ghost_hits", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "arcstats_mru_ghost_bytes", + description: "ZFS ARC MRU ghost size", + mib: "kstat.zfs.misc.arcstats.mru_ghost_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_mru_bytes", + description: "ZFS ARC MRU size", + mib: "kstat.zfs.misc.arcstats.mru_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_other_bytes", + description: "ZFS ARC other size", + mib: "kstat.zfs.misc.arcstats.other_size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_p_bytes", + description: "ZFS ARC MRU target size", + mib: "kstat.zfs.misc.arcstats.p", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "arcstats_size_bytes", + description: "ZFS ARC size", + mib: "kstat.zfs.misc.arcstats.size", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.GaugeValue, + }, + { + name: "zfetchstats_hits_total", + description: "ZFS cache fetch hits", + mib: "kstat.zfs.misc.zfetchstats.hits", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + { + name: "zfetchstats_misses_total", + description: "ZFS cache fetch misses", + mib: "kstat.zfs.misc.zfetchstats.misses", + dataType: bsdSysctlTypeUint64, + valueType: prometheus.CounterValue, + }, + }, + logger: logger, + }, nil +} + +func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { + for _, m := range c.sysctls { + v, err := m.Value() + if err != nil { + return fmt.Errorf("couldn't get sysctl: %w", err) + } + + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, m.name), + m.description, + nil, nil, + ), m.valueType, v) + } + + return nil +} diff --git a/collector/zfs_linux.go b/collector/zfs_linux.go index ead94967f0..735e2154c9 100644 --- a/collector/zfs_linux.go +++ b/collector/zfs_linux.go @@ -11,6 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nozfs +// +build !nozfs + package collector import ( @@ -22,14 +25,32 @@ import ( "strconv" "strings" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" ) +// constants from https://github.com/zfsonlinux/zfs/blob/master/lib/libspl/include/sys/kstat.h +// kept as strings for comparison thus avoiding conversion to int +const ( + // kstatDataChar = "0" + // kstatDataInt32 = "1" + // kstatDataUint32 = "2" + // kstatDataInt64 = "3" + kstatDataUint64 = "4" + // kstatDataLong = "5" + // kstatDataUlong = "6" + // kstatDataString = "7" +) + +var zfsPoolStatesName = []string{"online", "degraded", "faulted", "offline", "removed", "unavail"} + func (c *zfsCollector) openProcFile(path string) (*os.File, error) { file, err := os.Open(procFilePath(path)) if err != nil { - log.Debugf("Cannot open %q for reading. Is the kernel module loaded?", procFilePath(path)) + // file not found error can occur if: + // 1. zfs module is not loaded + // 2. zfs version does not have the feature with metrics -- ok to ignore + level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", procFilePath(path)) return nil, errZFSNotAvailable } return file, nil @@ -42,7 +63,7 @@ func (c *zfsCollector) updateZfsStats(subsystem string, ch chan<- prometheus.Met } defer file.Close() - return c.parseProcfsFile(file, c.linuxPathMap[subsystem], func(s zfsSysctl, v int64) { + return c.parseProcfsFile(file, c.linuxPathMap[subsystem], func(s zfsSysctl, v uint64) { ch <- c.constSysctlMetric(subsystem, s, v) }) } @@ -60,11 +81,12 @@ func (c *zfsCollector) updatePoolStats(ch chan<- prometheus.Metric) error { for _, zpoolPath := range zpoolPaths { file, err := os.Open(zpoolPath) if err != nil { - log.Debugf("Cannot open %q for reading. Is the kernel module loaded?", zpoolPath) + // this file should exist, but there is a race where an exporting pool can remove the files -- ok to ignore + level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) return errZFSNotAvailable } - err = c.parsePoolProcfsFile(file, zpoolPath, func(poolName string, s zfsSysctl, v int64) { + err = c.parsePoolProcfsFile(file, zpoolPath, func(poolName string, s zfsSysctl, v uint64) { ch <- c.constPoolMetric(poolName, s, v) }) file.Close() @@ -73,10 +95,60 @@ func (c *zfsCollector) updatePoolStats(ch chan<- prometheus.Metric) error { } } + zpoolObjsetPaths, err := filepath.Glob(procFilePath(filepath.Join(c.linuxProcpathBase, c.linuxZpoolObjsetPath))) + if err != nil { + return err + } + + for _, zpoolPath := range zpoolObjsetPaths { + file, err := os.Open(zpoolPath) + if err != nil { + // This file should exist, but there is a race where an exporting pool can remove the files. Ok to ignore. + level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) + return errZFSNotAvailable + } + + err = c.parsePoolObjsetFile(file, zpoolPath, func(poolName string, datasetName string, s zfsSysctl, v uint64) { + ch <- c.constPoolObjsetMetric(poolName, datasetName, s, v) + }) + file.Close() + if err != nil { + return err + } + } + + zpoolStatePaths, err := filepath.Glob(procFilePath(filepath.Join(c.linuxProcpathBase, c.linuxZpoolStatePath))) + if err != nil { + return err + } + + if zpoolStatePaths == nil { + level.Debug(c.logger).Log("msg", "No pool state files found") + return nil + } + + for _, zpoolPath := range zpoolStatePaths { + file, err := os.Open(zpoolPath) + if err != nil { + // This file should exist, but there is a race where an exporting pool can remove the files. Ok to ignore. + level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) + return errZFSNotAvailable + } + + err = c.parsePoolStateFile(file, zpoolPath, func(poolName string, stateName string, isActive uint64) { + ch <- c.constPoolStateMetric(poolName, stateName, isActive) + }) + + file.Close() + if err != nil { + return err + } + } + return nil } -func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler func(zfsSysctl, int64)) error { +func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler func(zfsSysctl, uint64)) error { scanner := bufio.NewScanner(reader) parseLine := false @@ -93,14 +165,16 @@ func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler continue } - key := fmt.Sprintf("kstat.zfs.misc.%s.%s", fmtExt, parts[0]) - - value, err := strconv.ParseInt(parts[2], 10, 64) - if err != nil { - return fmt.Errorf("could not parse expected integer value for %q", key) + // kstat data type (column 2) should be KSTAT_DATA_UINT64, otherwise ignore + // TODO: when other KSTAT_DATA_* types arrive, much of this will need to be restructured + if parts[1] == kstatDataUint64 { + key := fmt.Sprintf("kstat.zfs.misc.%s.%s", fmtExt, parts[0]) + value, err := strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return fmt.Errorf("could not parse expected integer value for %q", key) + } + handler(zfsSysctl(key), value) } - handler(zfsSysctl(key), value) - } if !parseLine { return fmt.Errorf("did not parse a single %q metric", fmtExt) @@ -109,7 +183,7 @@ func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler return scanner.Err() } -func (c *zfsCollector) parsePoolProcfsFile(reader io.Reader, zpoolPath string, handler func(string, zfsSysctl, int64)) error { +func (c *zfsCollector) parsePoolProcfsFile(reader io.Reader, zpoolPath string, handler func(string, zfsSysctl, uint64)) error { scanner := bufio.NewScanner(reader) parseLine := false @@ -139,9 +213,9 @@ func (c *zfsCollector) parsePoolProcfsFile(reader io.Reader, zpoolPath string, h for i, field := range fields { key := fmt.Sprintf("kstat.zfs.misc.%s.%s", zpoolFile, field) - value, err := strconv.ParseInt(line[i], 10, 64) + value, err := strconv.ParseUint(line[i], 10, 64) if err != nil { - return fmt.Errorf("could not parse expected integer value for %q: %v", key, err) + return fmt.Errorf("could not parse expected integer value for %q: %w", key, err) } handler(zpoolName, zfsSysctl(key), value) } @@ -149,3 +223,75 @@ func (c *zfsCollector) parsePoolProcfsFile(reader io.Reader, zpoolPath string, h return scanner.Err() } + +func (c *zfsCollector) parsePoolObjsetFile(reader io.Reader, zpoolPath string, handler func(string, string, zfsSysctl, uint64)) error { + scanner := bufio.NewScanner(reader) + + parseLine := false + var zpoolName, datasetName string + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + + if !parseLine && len(parts) == 3 && parts[0] == "name" && parts[1] == "type" && parts[2] == "data" { + parseLine = true + continue + } + + if !parseLine || len(parts) < 3 { + continue + } + if parts[0] == "dataset_name" { + zpoolPathElements := strings.Split(zpoolPath, "/") + pathLen := len(zpoolPathElements) + zpoolName = zpoolPathElements[pathLen-2] + datasetName = parts[2] + continue + } + + if parts[1] == kstatDataUint64 { + key := fmt.Sprintf("kstat.zfs.misc.objset.%s", parts[0]) + value, err := strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return fmt.Errorf("could not parse expected integer value for %q", key) + } + handler(zpoolName, datasetName, zfsSysctl(key), value) + } + } + if !parseLine { + return fmt.Errorf("did not parse a single %s %s metric", zpoolName, datasetName) + } + + return scanner.Err() +} + +func (c *zfsCollector) parsePoolStateFile(reader io.Reader, zpoolPath string, handler func(string, string, uint64)) error { + scanner := bufio.NewScanner(reader) + scanner.Scan() + + actualStateName, err := scanner.Text(), scanner.Err() + if err != nil { + return err + } + + actualStateName = strings.ToLower(actualStateName) + + zpoolPathElements := strings.Split(zpoolPath, "/") + pathLen := len(zpoolPathElements) + if pathLen < 2 { + return fmt.Errorf("zpool path did not return at least two elements") + } + + zpoolName := zpoolPathElements[pathLen-2] + + for _, stateName := range zfsPoolStatesName { + isActive := uint64(0) + + if actualStateName == stateName { + isActive = 1 + } + + handler(zpoolName, stateName, isActive) + } + + return nil +} diff --git a/collector/zfs_linux_test.go b/collector/zfs_linux_test.go index 879eecde15..b9aec14480 100644 --- a/collector/zfs_linux_test.go +++ b/collector/zfs_linux_test.go @@ -11,6 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !nozfs +// +build !nozfs + package collector import ( @@ -32,7 +35,7 @@ func TestArcstatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(arcstatsFile, "arcstats", func(s zfsSysctl, v int64) { + err = c.parseProcfsFile(arcstatsFile, "arcstats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.arcstats.hits") { return @@ -40,7 +43,7 @@ func TestArcstatsParsing(t *testing.T) { handlerCalled = true - if v != int64(8772612) { + if v != uint64(8772612) { t.Fatalf("Incorrect value parsed from procfs data") } @@ -68,7 +71,7 @@ func TestZfetchstatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(zfetchstatsFile, "zfetchstats", func(s zfsSysctl, v int64) { + err = c.parseProcfsFile(zfetchstatsFile, "zfetchstats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.zfetchstats.hits") { return @@ -76,7 +79,7 @@ func TestZfetchstatsParsing(t *testing.T) { handlerCalled = true - if v != int64(7067992) { + if v != uint64(7067992) { t.Fatalf("Incorrect value parsed from procfs data") } @@ -104,7 +107,7 @@ func TestZilParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(zilFile, "zil", func(s zfsSysctl, v int64) { + err = c.parseProcfsFile(zilFile, "zil", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.zil.zil_commit_count") { return @@ -112,7 +115,7 @@ func TestZilParsing(t *testing.T) { handlerCalled = true - if v != int64(10) { + if v != uint64(10) { t.Fatalf("Incorrect value parsed from procfs data") } @@ -140,7 +143,7 @@ func TestVdevCacheStatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(vdevCacheStatsFile, "vdev_cache_stats", func(s zfsSysctl, v int64) { + err = c.parseProcfsFile(vdevCacheStatsFile, "vdev_cache_stats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.vdev_cache_stats.delegations") { return @@ -148,7 +151,7 @@ func TestVdevCacheStatsParsing(t *testing.T) { handlerCalled = true - if v != int64(40) { + if v != uint64(40) { t.Fatalf("Incorrect value parsed from procfs data") } @@ -176,7 +179,7 @@ func TestXuioStatsParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(xuioStatsFile, "xuio_stats", func(s zfsSysctl, v int64) { + err = c.parseProcfsFile(xuioStatsFile, "xuio_stats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.xuio_stats.onloan_read_buf") { return @@ -184,7 +187,7 @@ func TestXuioStatsParsing(t *testing.T) { handlerCalled = true - if v != int64(32) { + if v != uint64(32) { t.Fatalf("Incorrect value parsed from procfs data") } @@ -212,7 +215,7 @@ func TestFmParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(fmFile, "fm", func(s zfsSysctl, v int64) { + err = c.parseProcfsFile(fmFile, "fm", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.fm.erpt-dropped") { return @@ -220,7 +223,7 @@ func TestFmParsing(t *testing.T) { handlerCalled = true - if v != int64(18) { + if v != uint64(18) { t.Fatalf("Incorrect value parsed from procfs data") } @@ -248,7 +251,7 @@ func TestDmuTxParsing(t *testing.T) { } handlerCalled := false - err = c.parseProcfsFile(dmuTxFile, "dmu_tx", func(s zfsSysctl, v int64) { + err = c.parseProcfsFile(dmuTxFile, "dmu_tx", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.dmu_tx.dmu_tx_assigned") { return @@ -256,7 +259,7 @@ func TestDmuTxParsing(t *testing.T) { handlerCalled = true - if v != int64(3532844) { + if v != uint64(3532844) { t.Fatalf("Incorrect value parsed from procfs data") } @@ -289,14 +292,14 @@ func TestZpoolParsing(t *testing.T) { t.Fatal(err) } - err = c.parsePoolProcfsFile(file, zpoolPath, func(poolName string, s zfsSysctl, v int64) { + err = c.parsePoolProcfsFile(file, zpoolPath, func(poolName string, s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.io.nread") { return } handlerCalled = true - if v != int64(1884160) && v != int64(2826240) { + if v != uint64(1884160) && v != uint64(2826240) { t.Fatalf("Incorrect value parsed from procfs data %v", v) } @@ -310,3 +313,236 @@ func TestZpoolParsing(t *testing.T) { t.Fatal("Zpool parsing handler was not called for some expected sysctls") } } + +func TestZpoolObjsetParsing(t *testing.T) { + zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/objset-*") + if err != nil { + t.Fatal(err) + } + + c := zfsCollector{} + if err != nil { + t.Fatal(err) + } + + handlerCalled := false + for _, zpoolPath := range zpoolPaths { + file, err := os.Open(zpoolPath) + if err != nil { + t.Fatal(err) + } + + err = c.parsePoolObjsetFile(file, zpoolPath, func(poolName string, datasetName string, s zfsSysctl, v uint64) { + if s != zfsSysctl("kstat.zfs.misc.objset.writes") { + return + } + + handlerCalled = true + + if v != uint64(0) && v != uint64(4) && v != uint64(10) { + t.Fatalf("Incorrect value parsed from procfs data %v", v) + } + + }) + file.Close() + if err != nil { + t.Fatal(err) + } + } + if !handlerCalled { + t.Fatal("Zpool parsing handler was not called for some expected sysctls") + } +} + +func TestAbdstatsParsing(t *testing.T) { + abdstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/abdstats") + if err != nil { + t.Fatal(err) + } + defer abdstatsFile.Close() + + c := zfsCollector{} + if err != nil { + t.Fatal(err) + } + + handlerCalled := false + err = c.parseProcfsFile(abdstatsFile, "abdstats", func(s zfsSysctl, v uint64) { + + if s != zfsSysctl("kstat.zfs.misc.abdstats.linear_data_size") { + return + } + + handlerCalled = true + + if v != uint64(223232) { + t.Fatalf("Incorrect value parsed from procfs abdstats data") + } + + }) + + if err != nil { + t.Fatal(err) + } + + if !handlerCalled { + t.Fatal("ABDStats parsing handler was not called for some expected sysctls") + } +} + +func TestDbufstatsParsing(t *testing.T) { + dbufstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/dbuf_stats") + if err != nil { + t.Fatal(err) + } + defer dbufstatsFile.Close() + + c := zfsCollector{} + if err != nil { + t.Fatal(err) + } + + handlerCalled := false + err = c.parseProcfsFile(dbufstatsFile, "dbufstats", func(s zfsSysctl, v uint64) { + + if s != zfsSysctl("kstat.zfs.misc.dbufstats.hash_hits") { + return + } + + handlerCalled = true + + if v != uint64(108807) { + t.Fatalf("Incorrect value parsed from procfs dbufstats data") + } + + }) + + if err != nil { + t.Fatal(err) + } + + if !handlerCalled { + t.Fatal("DbufStats parsing handler was not called for some expected sysctls") + } +} + +func TestDnodestatsParsing(t *testing.T) { + dnodestatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/dnodestats") + if err != nil { + t.Fatal(err) + } + defer dnodestatsFile.Close() + + c := zfsCollector{} + if err != nil { + t.Fatal(err) + } + + handlerCalled := false + err = c.parseProcfsFile(dnodestatsFile, "dnodestats", func(s zfsSysctl, v uint64) { + + if s != zfsSysctl("kstat.zfs.misc.dnodestats.dnode_hold_alloc_hits") { + return + } + + handlerCalled = true + + if v != uint64(37617) { + t.Fatalf("Incorrect value parsed from procfs dnodestats data") + } + + }) + + if err != nil { + t.Fatal(err) + } + + if !handlerCalled { + t.Fatal("Dnodestats parsing handler was not called for some expected sysctls") + } +} + +func TestVdevMirrorstatsParsing(t *testing.T) { + vdevMirrorStatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/vdev_mirror_stats") + if err != nil { + t.Fatal(err) + } + defer vdevMirrorStatsFile.Close() + + c := zfsCollector{} + if err != nil { + t.Fatal(err) + } + + handlerCalled := false + err = c.parseProcfsFile(vdevMirrorStatsFile, "vdev_mirror_stats", func(s zfsSysctl, v uint64) { + + if s != zfsSysctl("kstat.zfs.misc.vdev_mirror_stats.preferred_not_found") { + return + } + + handlerCalled = true + + if v != uint64(94) { + t.Fatalf("Incorrect value parsed from procfs vdev_mirror_stats data") + } + + }) + + if err != nil { + t.Fatal(err) + } + + if !handlerCalled { + t.Fatal("VdevMirrorStats parsing handler was not called for some expected sysctls") + } +} + +func TestPoolStateParsing(t *testing.T) { + zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/state") + if err != nil { + t.Fatal(err) + } + + c := zfsCollector{} + if err != nil { + t.Fatal(err) + } + + handlerCalled := false + for _, zpoolPath := range zpoolPaths { + file, err := os.Open(zpoolPath) + if err != nil { + t.Fatal(err) + } + + err = c.parsePoolStateFile(file, zpoolPath, func(poolName string, stateName string, isActive uint64) { + handlerCalled = true + + if poolName == "pool1" { + if isActive != uint64(1) && stateName == "online" { + t.Fatalf("Incorrect parsed value for online state") + } + if isActive != uint64(0) && stateName != "online" { + t.Fatalf("Incorrect parsed value for online state") + } + } + if poolName == "poolz1" { + if isActive != uint64(1) && stateName == "degraded" { + t.Fatalf("Incorrect parsed value for degraded state") + } + if isActive != uint64(0) && stateName != "degraded" { + t.Fatalf("Incorrect parsed value for degraded state") + } + } + }) + file.Close() + if err != nil { + t.Fatal(err) + } + } + if !handlerCalled { + t.Fatal("Zpool parsing handler was not called for some expected sysctls") + } + +} diff --git a/collector/zfs_solaris.go b/collector/zfs_solaris.go new file mode 100644 index 0000000000..040f31dbbd --- /dev/null +++ b/collector/zfs_solaris.go @@ -0,0 +1,336 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build solaris && !nozfs +// +build solaris,!nozfs + +package collector + +import ( + "strings" + + "github.com/go-kit/log" + "github.com/illumos/go-kstat" + "github.com/prometheus/client_golang/prometheus" +) + +type zfsCollector struct { + abdstatsLinearCount *prometheus.Desc + abdstatsLinearDataSize *prometheus.Desc + abdstatsScatterChunkWaste *prometheus.Desc + abdstatsScatterCount *prometheus.Desc + abdstatsScatterDataSize *prometheus.Desc + abdstatsStructSize *prometheus.Desc + arcstatsAnonSize *prometheus.Desc + arcstatsC *prometheus.Desc + arcstatsCMax *prometheus.Desc + arcstatsCMin *prometheus.Desc + arcstatsDataSize *prometheus.Desc + arcstatsDemandDataHits *prometheus.Desc + arcstatsDemandDataMisses *prometheus.Desc + arcstatsDemandMetadataHits *prometheus.Desc + arcstatsDemandMetadataMisses *prometheus.Desc + arcstatsHeaderSize *prometheus.Desc + arcstatsHits *prometheus.Desc + arcstatsMisses *prometheus.Desc + arcstatsMFUGhostHits *prometheus.Desc + arcstatsMFUGhostSize *prometheus.Desc + arcstatsMFUSize *prometheus.Desc + arcstatsMRUGhostHits *prometheus.Desc + arcstatsMRUGhostSize *prometheus.Desc + arcstatsMRUSize *prometheus.Desc + arcstatsOtherSize *prometheus.Desc + arcstatsP *prometheus.Desc + arcstatsSize *prometheus.Desc + zfetchstatsHits *prometheus.Desc + zfetchstatsMisses *prometheus.Desc + logger log.Logger +} + +const ( + zfsCollectorSubsystem = "zfs" +) + +func init() { + registerCollector("zfs", defaultEnabled, NewZfsCollector) +} + +func NewZfsCollector(logger log.Logger) (Collector, error) { + return &zfsCollector{ + abdstatsLinearCount: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_linear_count_total"), + "ZFS ARC buffer data linear count", nil, nil, + ), + abdstatsLinearDataSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_linear_data_bytes"), + "ZFS ARC buffer data linear data size", nil, nil, + ), + abdstatsScatterChunkWaste: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_scatter_chunk_waste_bytes"), + "ZFS ARC buffer data scatter chunk waste", nil, nil, + ), + abdstatsScatterCount: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_scatter_count_total"), + "ZFS ARC buffer data scatter count", nil, nil, + ), + abdstatsScatterDataSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_scatter_data_bytes"), + "ZFS ARC buffer data scatter data size", nil, nil, + ), + abdstatsStructSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_struct_bytes"), + "ZFS ARC buffer data struct size", nil, nil, + ), + arcstatsAnonSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_anon_bytes"), + "ZFS ARC anon size", nil, nil, + ), + arcstatsC: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_c_bytes"), + "ZFS ARC target size", nil, nil, + ), + arcstatsCMax: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_c_max_bytes"), + "ZFS ARC maximum size", nil, nil, + ), + arcstatsCMin: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_c_min_bytes"), + "ZFS ARC minimum size", nil, nil, + ), + arcstatsDataSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_data_bytes"), + "ZFS ARC data size", nil, nil, + ), + arcstatsDemandDataHits: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_data_hits_total"), + "ZFS ARC demand data hits", nil, nil, + ), + arcstatsDemandDataMisses: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_data_misses_total"), + "ZFS ARC demand data misses", nil, nil, + ), + arcstatsDemandMetadataHits: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_metadata_hits_total"), + "ZFS ARC demand metadata hits", nil, nil, + ), + arcstatsDemandMetadataMisses: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_metadata_misses_total"), + "ZFS ARC demand metadata misses", nil, nil, + ), + arcstatsHeaderSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_hdr_bytes"), + "ZFS ARC header size", nil, nil, + ), + arcstatsHits: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_hits_total"), + "ZFS ARC hits", nil, nil, + ), + arcstatsMisses: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_misses_total"), + "ZFS ARC misses", nil, nil, + ), + arcstatsMFUGhostHits: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mfu_ghost_hits_total"), + "ZFS ARC MFU ghost hits", nil, nil, + ), + arcstatsMFUGhostSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mfu_ghost_size"), + "ZFS ARC MFU ghost size", nil, nil, + ), + arcstatsMFUSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mfu_bytes"), + "ZFS ARC MFU size", nil, nil, + ), + arcstatsMRUGhostHits: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mru_ghost_hits_total"), + "ZFS ARC MRU ghost hits", nil, nil, + ), + arcstatsMRUGhostSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mru_ghost_bytes"), + "ZFS ARC MRU ghost size", nil, nil, + ), + arcstatsMRUSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mru_bytes"), + "ZFS ARC MRU size", nil, nil, + ), + arcstatsOtherSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_other_bytes"), + "ZFS ARC other size", nil, nil, + ), + arcstatsP: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_p_bytes"), + "ZFS ARC MRU target size", nil, nil, + ), + arcstatsSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_size_bytes"), + "ZFS ARC size", nil, nil, + ), + zfetchstatsHits: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "zfetchstats_hits_total"), + "ZFS cache fetch hits", nil, nil, + ), + zfetchstatsMisses: prometheus.NewDesc( + prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "zfetchstats_misses_total"), + "ZFS cache fetch misses", nil, nil, + ), + logger: logger, + }, nil +} + +func (c *zfsCollector) updateZfsAbdStats(ch chan<- prometheus.Metric) error { + var metricType prometheus.ValueType + + tok, err := kstat.Open() + if err != nil { + return err + } + + defer tok.Close() + + ksZFSInfo, err := tok.Lookup("zfs", 0, "abdstats") + if err != nil { + return err + } + + for k, v := range map[string]*prometheus.Desc{ + "linear_cnt": c.abdstatsLinearCount, + "linear_data_size": c.abdstatsLinearDataSize, + "scatter_chunk_waste": c.abdstatsScatterChunkWaste, + "scatter_cnt": c.abdstatsScatterCount, + "scatter_data_size": c.abdstatsScatterDataSize, + "struct_size": c.abdstatsStructSize, + } { + ksZFSInfoValue, err := ksZFSInfo.GetNamed(k) + if err != nil { + return err + } + + if strings.HasSuffix(k, "_cnt") { + metricType = prometheus.CounterValue + } else { + metricType = prometheus.GaugeValue + } + + ch <- prometheus.MustNewConstMetric( + v, + metricType, + float64(ksZFSInfoValue.UintVal), + ) + } + + return nil +} + +func (c *zfsCollector) updateZfsArcStats(ch chan<- prometheus.Metric) error { + var metricType prometheus.ValueType + + tok, err := kstat.Open() + if err != nil { + return err + } + + defer tok.Close() + + ksZFSInfo, err := tok.Lookup("zfs", 0, "arcstats") + if err != nil { + return err + } + + for k, v := range map[string]*prometheus.Desc{ + "anon_size": c.arcstatsAnonSize, + "c": c.arcstatsC, + "c_max": c.arcstatsCMax, + "c_min": c.arcstatsCMin, + "data_size": c.arcstatsDataSize, + "demand_data_hits": c.arcstatsDemandDataHits, + "demand_data_misses": c.arcstatsDemandDataMisses, + "demand_metadata_hits": c.arcstatsDemandMetadataHits, + "demand_metadata_misses": c.arcstatsDemandMetadataMisses, + "hdr_size": c.arcstatsHeaderSize, + "hits": c.arcstatsHits, + "misses": c.arcstatsMisses, + "mfu_ghost_hits": c.arcstatsMFUGhostHits, + "mfu_ghost_size": c.arcstatsMFUGhostSize, + "mfu_size": c.arcstatsMFUSize, + "mru_ghost_hits": c.arcstatsMRUGhostHits, + "mru_ghost_size": c.arcstatsMRUGhostSize, + "mru_size": c.arcstatsMRUSize, + "other_size": c.arcstatsOtherSize, + "p": c.arcstatsP, + "size": c.arcstatsSize, + } { + ksZFSInfoValue, err := ksZFSInfo.GetNamed(k) + if err != nil { + return err + } + + if strings.HasSuffix(k, "_hits") || strings.HasSuffix(k, "_misses") { + metricType = prometheus.CounterValue + } else { + metricType = prometheus.GaugeValue + } + + ch <- prometheus.MustNewConstMetric( + v, + metricType, + float64(ksZFSInfoValue.UintVal), + ) + } + + return nil +} + +func (c *zfsCollector) updateZfsFetchStats(ch chan<- prometheus.Metric) error { + tok, err := kstat.Open() + if err != nil { + return err + } + + defer tok.Close() + + ksZFSInfo, err := tok.Lookup("zfs", 0, "zfetchstats") + if err != nil { + return err + } + + for k, v := range map[string]*prometheus.Desc{ + "hits": c.zfetchstatsHits, + "misses": c.zfetchstatsMisses, + } { + ksZFSInfoValue, err := ksZFSInfo.GetNamed(k) + if err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric( + v, + prometheus.CounterValue, + float64(ksZFSInfoValue.UintVal), + ) + } + + return nil +} + +func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { + if err := c.updateZfsAbdStats(ch); err != nil { + return err + } + if err := c.updateZfsArcStats(ch); err != nil { + return err + } + if err := c.updateZfsFetchStats(ch); err != nil { + return err + } + return nil +} diff --git a/collector/zoneinfo_linux.go b/collector/zoneinfo_linux.go new file mode 100644 index 0000000000..8f7e35d9f1 --- /dev/null +++ b/collector/zoneinfo_linux.go @@ -0,0 +1,240 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "fmt" + "reflect" + + "github.com/go-kit/log" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" +) + +const zoneinfoSubsystem = "zoneinfo" + +type zoneinfoCollector struct { + gaugeMetricDescs map[string]*prometheus.Desc + counterMetricDescs map[string]*prometheus.Desc + logger log.Logger + fs procfs.FS +} + +func init() { + registerCollector("zoneinfo", defaultDisabled, NewZoneinfoCollector) +} + +// NewZoneinfoCollector returns a new Collector exposing zone stats. +func NewZoneinfoCollector(logger log.Logger) (Collector, error) { + fs, err := procfs.NewFS(*procPath) + if err != nil { + return nil, fmt.Errorf("failed to open procfs: %w", err) + } + return &zoneinfoCollector{ + gaugeMetricDescs: createGaugeMetricDescriptions(), + counterMetricDescs: createCounterMetricDescriptions(), + logger: logger, + fs: fs, + }, nil +} + +func (c *zoneinfoCollector) Update(ch chan<- prometheus.Metric) error { + metrics, err := c.fs.Zoneinfo() + if err != nil { + return fmt.Errorf("couldn't get zoneinfo: %w", err) + } + for _, metric := range metrics { + node := metric.Node + zone := metric.Zone + metricStruct := reflect.ValueOf(metric) + typeOfMetricStruct := metricStruct.Type() + for i := 0; i < metricStruct.NumField(); i++ { + value := reflect.Indirect(metricStruct.Field(i)) + if value.Kind() != reflect.Int64 { + continue + } + metricName := typeOfMetricStruct.Field(i).Name + desc, ok := c.gaugeMetricDescs[metricName] + metricType := prometheus.GaugeValue + if !ok { + desc = c.counterMetricDescs[metricName] + metricType = prometheus.CounterValue + } + ch <- prometheus.MustNewConstMetric(desc, metricType, + float64(reflect.Indirect(metricStruct.Field(i)).Int()), + node, zone) + } + for i, value := range metric.Protection { + metricName := fmt.Sprintf("protection_%d", i) + desc, ok := c.gaugeMetricDescs[metricName] + if !ok { + desc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, metricName), + fmt.Sprintf("Protection array %d. field", i), + []string{"node", "zone"}, nil) + c.gaugeMetricDescs[metricName] = desc + } + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, + float64(*value), node, zone) + } + + } + return nil +} +func createGaugeMetricDescriptions() map[string]*prometheus.Desc { + return map[string]*prometheus.Desc{ + "NrFreePages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_free_pages"), + "Total number of free pages in the zone", + []string{"node", "zone"}, nil), + "Min": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "min_pages"), + "Zone watermark pages_min", + []string{"node", "zone"}, nil), + "Low": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "low_pages"), + "Zone watermark pages_low", + []string{"node", "zone"}, nil), + "High": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "high_pages"), + "Zone watermark pages_high", + []string{"node", "zone"}, nil), + "Scanned": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "scanned_pages"), + "Pages scanned since last reclaim", + []string{"node", "zone"}, nil), + "Spanned": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "spanned_pages"), + "Total pages spanned by the zone, including holes", + []string{"node", "zone"}, nil), + "Present": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "present_pages"), + "Physical pages existing within the zone", + []string{"node", "zone"}, nil), + "Managed": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "managed_pages"), + "Present pages managed by the buddy system", + []string{"node", "zone"}, nil), + "NrActiveAnon": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_active_anon_pages"), + "Number of anonymous pages recently more used", + []string{"node", "zone"}, nil), + "NrInactiveAnon": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_inactive_anon_pages"), + "Number of anonymous pages recently less used", + []string{"node", "zone"}, nil), + "NrIsolatedAnon": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_isolated_anon_pages"), + "Temporary isolated pages from anon lru", + []string{"node", "zone"}, nil), + "NrAnonPages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_anon_pages"), + "Number of anonymous pages currently used by the system", + []string{"node", "zone"}, nil), + "NrAnonTransparentHugepages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_anon_transparent_hugepages"), + "Number of anonymous transparent huge pages currently used by the system", + []string{"node", "zone"}, nil), + "NrActiveFile": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_active_file_pages"), + "Number of active pages with file-backing", + []string{"node", "zone"}, nil), + "NrInactiveFile": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_inactive_file_pages"), + "Number of inactive pages with file-backing", + []string{"node", "zone"}, nil), + "NrIsolatedFile": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_isolated_file_pages"), + "Temporary isolated pages from file lru", + []string{"node", "zone"}, nil), + "NrFilePages": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_file_pages"), + "Number of file pages", + []string{"node", "zone"}, nil), + "NrSlabReclaimable": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_slab_reclaimable_pages"), + "Number of reclaimable slab pages", + []string{"node", "zone"}, nil), + "NrSlabUnreclaimable": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_slab_unreclaimable_pages"), + "Number of unreclaimable slab pages", + []string{"node", "zone"}, nil), + "NrMlockStack": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_mlock_stack_pages"), + "mlock()ed pages found and moved off LRU", + []string{"node", "zone"}, nil), + "NrKernelStack": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_kernel_stacks"), + "Number of kernel stacks", + []string{"node", "zone"}, nil), + "NrMapped": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_mapped_pages"), + "Number of mapped pages", + []string{"node", "zone"}, nil), + "NrDirty": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_dirty_pages"), + "Number of dirty pages", + []string{"node", "zone"}, nil), + "NrWriteback": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_writeback_pages"), + "Number of writeback pages", + []string{"node", "zone"}, nil), + "NrUnevictable": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_unevictable_pages"), + "Number of unevictable pages", + []string{"node", "zone"}, nil), + "NrShmem": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_shmem_pages"), + "Number of shmem pages (included tmpfs/GEM pages)", + []string{"node", "zone"}, nil), + } + +} +func createCounterMetricDescriptions() map[string]*prometheus.Desc { + return map[string]*prometheus.Desc{ + "NrDirtied": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_dirtied_total"), + "Page dirtyings since bootup", + []string{"node", "zone"}, nil), + "NrWritten": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_written_total"), + "Page writings since bootup", + []string{"node", "zone"}, nil), + "NumaHit": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_hit_total"), + "Allocated in intended node", + []string{"node", "zone"}, nil), + "NumaMiss": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_miss_total"), + "Allocated in non intended node", + []string{"node", "zone"}, nil), + "NumaForeign": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_foreign_total"), + "Was intended here, hit elsewhere", + []string{"node", "zone"}, nil), + "NumaInterleave": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_interleave_total"), + "Interleaver preferred this zone", + []string{"node", "zone"}, nil), + "NumaLocal": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_local_total"), + "Allocation from local node", + []string{"node", "zone"}, nil), + "NumaOther": prometheus.NewDesc( + prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_other_total"), + "Allocation from other node", + []string{"node", "zone"}, nil), + } +} diff --git a/docs/TIME.md b/docs/TIME.md index 0f28718f30..340c72d65c 100644 --- a/docs/TIME.md +++ b/docs/TIME.md @@ -2,15 +2,15 @@ ## `ntp` collector -This collector is intended for usage with local NTPD like [ntp.org](http://ntp.org/), [chrony](https://chrony.tuxfamily.org/comparison.html) or [OpenNTPD](http://www.openntpd.org/). +This collector is intended for usage with local NTP daemons including [ntp.org](http://ntp.org/), [chrony](https://chrony.tuxfamily.org/comparison.html), and [OpenNTPD](http://www.openntpd.org/). -Note, some chrony packages have `local stratum 10` configuration value making chrony a valid server when it it is unsynchronised. This configuration makes one of `node_ntp_sanity` heuristics unreliable. +Note, some chrony packages have `local stratum 10` configuration value making chrony a valid server when it is unsynchronised. This configuration makes one of the heuristics that derive `node_ntp_sanity` unreliable. -Note, OpenNTPD does not listen for SNTP queries by default, you should add `listen on 127.0.0.1` configuration line to use this collector with OpenNTPD. +Note, OpenNTPD does not listen for SNTP queries by default. Add `listen on 127.0.0.1` to the OpenNTPD configuration when using this collector with that package. ### `node_ntp_stratum` -This metric shows [stratum](https://en.wikipedia.org/wiki/Network_Time_Protocol#Clock_strata) of local NTPD. +This metric shows the [stratum](https://en.wikipedia.org/wiki/Network_Time_Protocol#Clock_strata) of the local NTP daemon. Stratum `16` means that clock are unsynchronised. See also aforementioned note about default local stratum in chrony. @@ -41,7 +41,7 @@ implementation details vary from "**local** wall-clock time" to "Reference Time field in incoming SNTP packet". `time() - node_ntp_reference_timestamp_seconds` and -`node_time - node_ntp_reference_timestamp_seconds` represent some estimate of +`node_time_seconds - node_ntp_reference_timestamp_seconds` represent some estimate of "freshness" of synchronization. ### `node_ntp_root_delay` and `node_ntp_root_dispersion` diff --git a/docs/V0_16_UPGRADE_GUIDE.md b/docs/V0_16_UPGRADE_GUIDE.md new file mode 100644 index 0000000000..0a8bf24ee6 --- /dev/null +++ b/docs/V0_16_UPGRADE_GUIDE.md @@ -0,0 +1,21 @@ +# Version 0.16.0 Upgrade Guide + +The `node_exporter` 0.16.0 and newer renamed many metrics in order to conform with Prometheus [naming best practices]. + +In order to allow easy upgrades, there are several options. + +## Update dashboards + +Grafana users can add multiple queries in order to display both the old and new data simultaneously. + +## Use recording rules + +We have provided a [sample recording rule set that translates old metrics to new ones] and the [one that translates new metrics format to old one] to create duplicate metrics (it translates "old" metrics format to new one). This has a minor disadvantage that it creates a lot of extra data, and re-aligns the timestamps of the data. + +## Run both old and new versions simultaneously. + +It's possible to run both the old and new exporter on different ports, and include an additional scrape job in Prometheus. It's recommended to enable only the collectors that have name changes that you care about. + +[naming best practices]: https://prometheus.io/docs/practices/naming/ +[sample recording rule set that translates old metrics to new ones]: example-16-compatibility-rules.yml +[one that translates new metrics format to old one]: example-16-compatibility-rules-new-to-old.yml diff --git a/docs/example-16-compatibility-rules-new-to-old.yml b/docs/example-16-compatibility-rules-new-to-old.yml new file mode 100644 index 0000000000..1d94672cc9 --- /dev/null +++ b/docs/example-16-compatibility-rules-new-to-old.yml @@ -0,0 +1,201 @@ +groups: + - name: node_exporter-16-bcache + rules: + - expr: node_bcache_cache_read_races + record: node_bcache_cache_read_races_total + - name: node_exporter-16-buddyinfo + rules: + - expr: node_buddyinfo_blocks + record: node_buddyinfo_count + - name: node_exporter-16-stat + rules: + - expr: node_boot_time_seconds + record: node_boot_time + - expr: node_time_seconds + record: node_time + - expr: node_context_switches_total + record: node_context_switches + - expr: node_forks_total + record: node_forks + - expr: node_intr_total + record: node_intr + - name: node_exporter-16-cpu + rules: + - expr: label_replace(node_cpu_seconds_total, "cpu", "$1", "cpu", "cpu(.+)") + record: node_cpu + - name: node_exporter-16-diskstats + rules: + - expr: node_disk_read_bytes_total + record: node_disk_bytes_read + - expr: node_disk_written_bytes_total + record: node_disk_bytes_written + - expr: node_disk_io_time_seconds_total * 1000 + record: node_disk_io_time_ms + - expr: node_disk_io_time_weighted_seconds_total + record: node_disk_io_time_weighted + - expr: node_disk_reads_completed_total + record: node_disk_reads_completed + - expr: node_disk_reads_merged_total + record: node_disk_reads_merged + - expr: node_disk_read_time_seconds_total * 1000 + record: node_disk_read_time_ms + - expr: node_disk_writes_completed_total + record: node_disk_writes_completed + - expr: node_disk_writes_merged_total + record: node_disk_writes_merged + - expr: node_disk_write_time_seconds_total * 1000 + record: node_disk_write_time_ms + - name: node_exporter-16-filesystem + rules: + - expr: node_filesystem_free_bytes + record: node_filesystem_free + - expr: node_filesystem_avail_bytes + record: node_filesystem_avail + - expr: node_filesystem_size_bytes + record: node_filesystem_size + - name: node_exporter-16-infiniband + rules: + - expr: node_infiniband_port_data_received_bytes_total + record: node_infiniband_port_data_received_bytes + - expr: node_infiniband_port_data_transmitted_bytes_total + record: node_infiniband_port_data_transmitted_bytes + - name: node_exporter-16-interrupts + rules: + - expr: node_interrupts_total + record: node_interrupts + - name: node_exporter-16-memory + rules: + - expr: node_memory_Active_bytes + record: node_memory_Active + - expr: node_memory_Active_anon_bytes + record: node_memory_Active_anon + - expr: node_memory_Active_file_bytes + record: node_memory_Active_file + - expr: node_memory_AnonHugePages_bytes + record: node_memory_AnonHugePages + - expr: node_memory_AnonPages_bytes + record: node_memory_AnonPages + - expr: node_memory_Bounce_bytes + record: node_memory_Bounce + - expr: node_memory_Buffers_bytes + record: node_memory_Buffers + - expr: node_memory_Cached_bytes + record: node_memory_Cached + - expr: node_memory_CommitLimit_bytes + record: node_memory_CommitLimit + - expr: node_memory_Committed_AS_bytes + record: node_memory_Committed_AS + - expr: node_memory_DirectMap2M_bytes + record: node_memory_DirectMap2M + - expr: node_memory_DirectMap4k_bytes + record: node_memory_DirectMap4k + - expr: node_memory_Dirty_bytes + record: node_memory_Dirty + - expr: node_memory_HardwareCorrupted_bytes + record: node_memory_HardwareCorrupted + - expr: node_memory_Hugepagesize_bytes + record: node_memory_Hugepagesize + - expr: node_memory_Inactive_bytes + record: node_memory_Inactive + - expr: node_memory_Inactive_anon_bytes + record: node_memory_Inactive_anon + - expr: node_memory_Inactive_file_bytes + record: node_memory_Inactive_file + - expr: node_memory_KernelStack_bytes + record: node_memory_KernelStack + - expr: node_memory_Mapped_bytes + record: node_memory_Mapped + - expr: node_memory_MemAvailable_bytes + record: node_memory_MemAvailable + - expr: node_memory_MemFree_bytes + record: node_memory_MemFree + - expr: node_memory_MemTotal_bytes + record: node_memory_MemTotal + - expr: node_memory_Mlocked_bytes + record: node_memory_Mlocked + - expr: node_memory_NFS_Unstable_bytes + record: node_memory_NFS_Unstable + - expr: node_memory_PageTables_bytes + record: node_memory_PageTables + - expr: node_memory_Shmem_bytes + record: node_memory_Shmem + - expr: node_memory_ShmemHugePages_bytes + record: node_memory_ShmemHugePages + - expr: node_memory_ShmemPmdMapped_bytes + record: node_memory_ShmemPmdMapped + - expr: node_memory_Slab_bytes + record: node_memory_Slab + - expr: node_memory_SReclaimable_bytes + record: node_memory_SReclaimable + - expr: node_memory_SUnreclaim_bytes + record: node_memory_SUnreclaim + - expr: node_memory_SwapCached_bytes + record: node_memory_SwapCached + - expr: node_memory_SwapFree_bytes + record: node_memory_SwapFree + - expr: node_memory_SwapTotal_bytes + record: node_memory_SwapTotal + - expr: node_memory_Unevictable_bytes + record: node_memory_Unevictable + - expr: node_memory_VmallocChunk_bytes + record: node_memory_VmallocChunk + - expr: node_memory_VmallocTotal_bytes + record: node_memory_VmallocTotal + - expr: node_memory_VmallocUsed_bytes + record: node_memory_VmallocUsed + - expr: node_memory_Writeback_bytes + record: node_memory_Writeback + - expr: node_memory_WritebackTmp_bytes + record: node_memory_WritebackTmp + - name: node_exporter-16-network + rules: + - expr: node_network_receive_bytes_total + record: node_network_receive_bytes + - expr: node_network_receive_compressed_total + record: node_network_receive_compressed + - expr: node_network_receive_drop_total + record: node_network_receive_drop + - expr: node_network_receive_errs_total + record: node_network_receive_errs + - expr: node_network_receive_fifo_total + record: node_network_receive_fifo + - expr: node_network_receive_frame_total + record: node_network_receive_frame + - expr: node_network_receive_multicast_total + record: node_network_receive_multicast + - expr: node_network_receive_packets_total + record: node_network_receive_packets + - expr: node_network_transmit_bytes_total + record: node_network_transmit_bytes + - expr: node_network_transmit_compressed_total + record: node_network_transmit_compressed + - expr: node_network_transmit_drop_total + record: node_network_transmit_drop + - expr: node_network_transmit_errs_total + record: node_network_transmit_errs + - expr: node_network_transmit_fifo_total + record: node_network_transmit_fifo + - expr: node_network_transmit_frame_total + record: node_network_transmit_frame + - expr: node_network_transmit_multicast_total + record: node_network_transmit_multicast + - expr: node_network_transmit_packets_total + record: node_network_transmit_packets + - name: node_exporter-16-nfs + rules: + - expr: node_nfs_connections_total + record: node_nfs_net_connections + - expr: node_nfs_packets_total + record: node_nfs_net_reads + - expr: label_replace(label_replace(node_nfs_requests_total, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") + record: node_nfs_procedures + - expr: node_nfs_rpc_authentication_refreshes_total + record: node_nfs_rpc_authentication_refreshes + - expr: node_nfs_rpcs_total + record: node_nfs_rpc_operations + - expr: node_nfs_rpc_retransmissions_total + record: node_nfs_rpc_retransmissions + - name: node_exporter-16-textfile + rules: + - expr: node_textfile_mtime_seconds + record: node_textfile_mtime diff --git a/docs/example-16-compatibility-rules.yml b/docs/example-16-compatibility-rules.yml new file mode 100644 index 0000000000..f79ebb5040 --- /dev/null +++ b/docs/example-16-compatibility-rules.yml @@ -0,0 +1,201 @@ +groups: + - name: node_exporter-16-bcache + rules: + - record: node_bcache_cache_read_races + expr: node_bcache_cache_read_races_total + - name: node_exporter-16-buddyinfo + rules: + - record: node_buddyinfo_blocks + expr: node_buddyinfo_count + - name: node_exporter-16-stat + rules: + - record: node_boot_time_seconds + expr: node_boot_time + - record: node_time_seconds + expr: node_time + - record: node_context_switches_total + expr: node_context_switches + - record: node_forks_total + expr: node_forks + - record: node_intr_total + expr: node_intr + - name: node_exporter-16-cpu + rules: + - record: node_cpu_seconds_total + expr: label_replace(node_cpu, "cpu", "$1", "cpu", "cpu(.+)") + - name: node_exporter-16-diskstats + rules: + - record: node_disk_read_bytes_total + expr: node_disk_bytes_read + - record: node_disk_written_bytes_total + expr: node_disk_bytes_written + - record: node_disk_io_time_seconds_total + expr: node_disk_io_time_ms / 1000 + - record: node_disk_io_time_weighted_seconds_total + expr: node_disk_io_time_weighted + - record: node_disk_reads_completed_total + expr: node_disk_reads_completed + - record: node_disk_reads_merged_total + expr: node_disk_reads_merged + - record: node_disk_read_time_seconds_total + expr: node_disk_read_time_ms / 1000 + - record: node_disk_writes_completed_total + expr: node_disk_writes_completed + - record: node_disk_writes_merged_total + expr: node_disk_writes_merged + - record: node_disk_write_time_seconds_total + expr: node_disk_write_time_ms / 1000 + - name: node_exporter-16-filesystem + rules: + - record: node_filesystem_free_bytes + expr: node_filesystem_free + - record: node_filesystem_avail_bytes + expr: node_filesystem_avail + - record: node_filesystem_size_bytes + expr: node_filesystem_size + - name: node_exporter-16-infiniband + rules: + - record: node_infiniband_port_data_received_bytes_total + expr: node_infiniband_port_data_received_bytes + - record: node_infiniband_port_data_transmitted_bytes_total + expr: node_infiniband_port_data_transmitted_bytes + - name: node_exporter-16-interrupts + rules: + - record: node_interrupts_total + expr: node_interrupts + - name: node_exporter-16-memory + rules: + - record: node_memory_Active_bytes + expr: node_memory_Active + - record: node_memory_Active_anon_bytes + expr: node_memory_Active_anon + - record: node_memory_Active_file_bytes + expr: node_memory_Active_file + - record: node_memory_AnonHugePages_bytes + expr: node_memory_AnonHugePages + - record: node_memory_AnonPages_bytes + expr: node_memory_AnonPages + - record: node_memory_Bounce_bytes + expr: node_memory_Bounce + - record: node_memory_Buffers_bytes + expr: node_memory_Buffers + - record: node_memory_Cached_bytes + expr: node_memory_Cached + - record: node_memory_CommitLimit_bytes + expr: node_memory_CommitLimit + - record: node_memory_Committed_AS_bytes + expr: node_memory_Committed_AS + - record: node_memory_DirectMap2M_bytes + expr: node_memory_DirectMap2M + - record: node_memory_DirectMap4k_bytes + expr: node_memory_DirectMap4k + - record: node_memory_Dirty_bytes + expr: node_memory_Dirty + - record: node_memory_HardwareCorrupted_bytes + expr: node_memory_HardwareCorrupted + - record: node_memory_Hugepagesize_bytes + expr: node_memory_Hugepagesize + - record: node_memory_Inactive_bytes + expr: node_memory_Inactive + - record: node_memory_Inactive_anon_bytes + expr: node_memory_Inactive_anon + - record: node_memory_Inactive_file_bytes + expr: node_memory_Inactive_file + - record: node_memory_KernelStack_bytes + expr: node_memory_KernelStack + - record: node_memory_Mapped_bytes + expr: node_memory_Mapped + - record: node_memory_MemAvailable_bytes + expr: node_memory_MemAvailable + - record: node_memory_MemFree_bytes + expr: node_memory_MemFree + - record: node_memory_MemTotal_bytes + expr: node_memory_MemTotal + - record: node_memory_Mlocked_bytes + expr: node_memory_Mlocked + - record: node_memory_NFS_Unstable_bytes + expr: node_memory_NFS_Unstable + - record: node_memory_PageTables_bytes + expr: node_memory_PageTables + - record: node_memory_Shmem_bytes + expr: node_memory_Shmem + - record: node_memory_ShmemHugePages_bytes + expr: node_memory_ShmemHugePages + - record: node_memory_ShmemPmdMapped_bytes + expr: node_memory_ShmemPmdMapped + - record: node_memory_Slab_bytes + expr: node_memory_Slab + - record: node_memory_SReclaimable_bytes + expr: node_memory_SReclaimable + - record: node_memory_SUnreclaim_bytes + expr: node_memory_SUnreclaim + - record: node_memory_SwapCached_bytes + expr: node_memory_SwapCached + - record: node_memory_SwapFree_bytes + expr: node_memory_SwapFree + - record: node_memory_SwapTotal_bytes + expr: node_memory_SwapTotal + - record: node_memory_Unevictable_bytes + expr: node_memory_Unevictable + - record: node_memory_VmallocChunk_bytes + expr: node_memory_VmallocChunk + - record: node_memory_VmallocTotal_bytes + expr: node_memory_VmallocTotal + - record: node_memory_VmallocUsed_bytes + expr: node_memory_VmallocUsed + - record: node_memory_Writeback_bytes + expr: node_memory_Writeback + - record: node_memory_WritebackTmp_bytes + expr: node_memory_WritebackTmp + - name: node_exporter-16-network + rules: + - record: node_network_receive_bytes_total + expr: node_network_receive_bytes + - record: node_network_receive_compressed_total + expr: node_network_receive_compressed + - record: node_network_receive_drop_total + expr: node_network_receive_drop + - record: node_network_receive_errs_total + expr: node_network_receive_errs + - record: node_network_receive_fifo_total + expr: node_network_receive_fifo + - record: node_network_receive_frame_total + expr: node_network_receive_frame + - record: node_network_receive_multicast_total + expr: node_network_receive_multicast + - record: node_network_receive_packets_total + expr: node_network_receive_packets + - record: node_network_transmit_bytes_total + expr: node_network_transmit_bytes + - record: node_network_transmit_compressed_total + expr: node_network_transmit_compressed + - record: node_network_transmit_drop_total + expr: node_network_transmit_drop + - record: node_network_transmit_errs_total + expr: node_network_transmit_errs + - record: node_network_transmit_fifo_total + expr: node_network_transmit_fifo + - record: node_network_transmit_frame_total + expr: node_network_transmit_frame + - record: node_network_transmit_multicast_total + expr: node_network_transmit_multicast + - record: node_network_transmit_packets_total + expr: node_network_transmit_packets + - name: node_exporter-16-nfs + rules: + - record: node_nfs_connections_total + expr: node_nfs_net_connections + - record: node_nfs_packets_total + expr: node_nfs_net_reads + - record: node_nfs_requests_total + expr: label_replace(label_replace(node_nfs_procedures, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") + - record: node_nfs_rpc_authentication_refreshes_total + expr: node_nfs_rpc_authentication_refreshes + - record: node_nfs_rpcs_total + expr: node_nfs_rpc_operations + - record: node_nfs_rpc_retransmissions_total + expr: node_nfs_rpc_retransmissions + - name: node_exporter-16-textfile + rules: + - record: node_textfile_mtime_seconds + expr: node_textfile_mtime diff --git a/docs/example-17-compatibility-rules-new-to-old.yml b/docs/example-17-compatibility-rules-new-to-old.yml new file mode 100644 index 0000000000..da1520c9f4 --- /dev/null +++ b/docs/example-17-compatibility-rules-new-to-old.yml @@ -0,0 +1,5 @@ +groups: + - name: node_exporter-17-supervisord + rules: + - record: node_supervisord_start_time_seconds + expr: node_supervisord_uptime + time() diff --git a/docs/example-17-compatibility-rules.yml b/docs/example-17-compatibility-rules.yml new file mode 100644 index 0000000000..e3d4d6626d --- /dev/null +++ b/docs/example-17-compatibility-rules.yml @@ -0,0 +1,5 @@ +groups: + - name: node_exporter-17-supervisord + rules: + - record: node_supervisord_uptime + expr: time() - node_supervisord_start_time_seconds diff --git a/docs/node-mixin/.gitignore b/docs/node-mixin/.gitignore new file mode 100644 index 0000000000..522b99f0bd --- /dev/null +++ b/docs/node-mixin/.gitignore @@ -0,0 +1,4 @@ +jsonnetfile.lock.json +vendor +*.yaml +dashboards_out diff --git a/docs/node-mixin/Makefile b/docs/node-mixin/Makefile new file mode 100644 index 0000000000..d04b37d009 --- /dev/null +++ b/docs/node-mixin/Makefile @@ -0,0 +1,32 @@ +JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s + +all: fmt node_alerts.yaml node_rules.yaml dashboards_out lint + +fmt: + find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ + xargs -n 1 -- $(JSONNET_FMT) -i + +node_alerts.yaml: mixin.libsonnet config.libsonnet $(wildcard alerts/*) + jsonnet -S alerts.jsonnet > $@ + +node_rules.yaml: mixin.libsonnet config.libsonnet $(wildcard rules/*) + jsonnet -S rules.jsonnet > $@ + +dashboards_out: mixin.libsonnet config.libsonnet $(wildcard dashboards/*) + @mkdir -p dashboards_out + jsonnet -J vendor -m dashboards_out dashboards.jsonnet + +lint: node_alerts.yaml node_rules.yaml + find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ + while read f; do \ + $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ + done + + promtool check rules node_alerts.yaml node_rules.yaml + +.PHONY: jb_install +jb_install: + jb install + +clean: + rm -rf dashboards_out node_alerts.yaml node_rules.yaml diff --git a/docs/node-mixin/README.md b/docs/node-mixin/README.md new file mode 100644 index 0000000000..492409c3c8 --- /dev/null +++ b/docs/node-mixin/README.md @@ -0,0 +1,42 @@ +# Node Mixin + +_This is a work in progress. We aim for it to become a good role model for alerts +and dashboards eventually, but it is not quite there yet._ + +The Node Mixin is a set of configurable, reusable, and extensible alerts and +dashboards based on the metrics exported by the Node Exporter. The mixin creates +recording and alerting rules for Prometheus and suitable dashboard descriptions +for Grafana. + +To use them, you need to have `jsonnet` (v0.16+) and `jb` installed. If you +have a working Go development environment, it's easiest to run the following: +```bash +$ go install github.com/google/go-jsonnet/cmd/jsonnet@latest +$ go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest +$ go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest +``` + +Next, install the dependencies by running the following command in this +directory: +```bash +$ jb install +``` + +You can then build the Prometheus rules files `node_alerts.yaml` and +`node_rules.yaml`: +```bash +$ make node_alerts.yaml node_rules.yaml +``` + +You can also build a directory `dashboard_out` with the JSON dashboard files +for Grafana: +```bash +$ make dashboards_out +``` + +Note that some of the generated dashboards require recording rules specified in +the previously generated `node_rules.yaml`. + +For more advanced uses of mixins, see +https://github.com/monitoring-mixins/docs. + diff --git a/docs/node-mixin/alerts.jsonnet b/docs/node-mixin/alerts.jsonnet new file mode 100644 index 0000000000..75e7c1b297 --- /dev/null +++ b/docs/node-mixin/alerts.jsonnet @@ -0,0 +1 @@ +std.manifestYamlDoc((import 'mixin.libsonnet').prometheusAlerts) diff --git a/docs/node-mixin/alerts/alerts.libsonnet b/docs/node-mixin/alerts/alerts.libsonnet new file mode 100644 index 0000000000..1d81136a81 --- /dev/null +++ b/docs/node-mixin/alerts/alerts.libsonnet @@ -0,0 +1,316 @@ +{ + prometheusAlerts+:: { + groups+: [ + { + name: 'node-exporter', + rules: [ + { + alert: 'NodeFilesystemSpaceFillingUp', + expr: ||| + ( + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d + and + predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '1h', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Filesystem is predicted to run out of space within the next 24 hours.', + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.', + }, + }, + { + alert: 'NodeFilesystemSpaceFillingUp', + expr: ||| + ( + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d + and + predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '1h', + labels: { + severity: '%(nodeCriticalSeverity)s' % $._config, + }, + annotations: { + summary: 'Filesystem is predicted to run out of space within the next 4 hours.', + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.', + }, + }, + { + alert: 'NodeFilesystemAlmostOutOfSpace', + expr: ||| + ( + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < %(fsSpaceAvailableCriticalThreshold)d + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '30m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Filesystem has less than %(fsSpaceAvailableCriticalThreshold)d%% space left.' % $._config, + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', + }, + }, + { + alert: 'NodeFilesystemAlmostOutOfSpace', + expr: ||| + ( + node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < %(fsSpaceAvailableWarningThreshold)d + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '30m', + labels: { + severity: '%(nodeCriticalSeverity)s' % $._config, + }, + annotations: { + summary: 'Filesystem has less than %(fsSpaceAvailableWarningThreshold)d%% space left.' % $._config, + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', + }, + }, + { + alert: 'NodeFilesystemFilesFillingUp', + expr: ||| + ( + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 40 + and + predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '1h', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Filesystem is predicted to run out of inodes within the next 24 hours.', + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.', + }, + }, + { + alert: 'NodeFilesystemFilesFillingUp', + expr: ||| + ( + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 20 + and + predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '1h', + labels: { + severity: '%(nodeCriticalSeverity)s' % $._config, + }, + annotations: { + summary: 'Filesystem is predicted to run out of inodes within the next 4 hours.', + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.', + }, + }, + { + alert: 'NodeFilesystemAlmostOutOfFiles', + expr: ||| + ( + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 5 + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '1h', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Filesystem has less than 5% inodes left.', + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', + }, + }, + { + alert: 'NodeFilesystemAlmostOutOfFiles', + expr: ||| + ( + node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s} * 100 < 3 + and + node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s} == 0 + ) + ||| % $._config, + 'for': '1h', + labels: { + severity: '%(nodeCriticalSeverity)s' % $._config, + }, + annotations: { + summary: 'Filesystem has less than 3% inodes left.', + description: 'Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', + }, + }, + { + alert: 'NodeNetworkReceiveErrs', + expr: ||| + rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 + ||| % $._config, + 'for': '1h', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Network interface is reporting many receive errors.', + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.', + }, + }, + { + alert: 'NodeNetworkTransmitErrs', + expr: ||| + rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 + ||| % $._config, + 'for': '1h', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Network interface is reporting many transmit errors.', + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.', + }, + }, + { + alert: 'NodeHighNumberConntrackEntriesUsed', + expr: ||| + (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75 + ||| % $._config, + annotations: { + summary: 'Number of conntrack are getting close to the limit.', + description: '{{ $value | humanizePercentage }} of conntrack entries are used.', + }, + labels: { + severity: 'warning', + }, + }, + { + alert: 'NodeTextFileCollectorScrapeError', + expr: ||| + node_textfile_scrape_error{%(nodeExporterSelector)s} == 1 + ||| % $._config, + annotations: { + summary: 'Node Exporter text file collector failed to scrape.', + description: 'Node Exporter text file collector failed to scrape.', + }, + labels: { + severity: 'warning', + }, + }, + { + alert: 'NodeClockSkewDetected', + expr: ||| + ( + node_timex_offset_seconds > 0.05 + and + deriv(node_timex_offset_seconds[5m]) >= 0 + ) + or + ( + node_timex_offset_seconds < -0.05 + and + deriv(node_timex_offset_seconds[5m]) <= 0 + ) + ||| % $._config, + 'for': '10m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Clock skew detected.', + description: 'Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure NTP is configured correctly on this host.', + }, + }, + { + alert: 'NodeClockNotSynchronising', + expr: ||| + min_over_time(node_timex_sync_status[5m]) == 0 + and + node_timex_maxerror_seconds >= 16 + ||| % $._config, + 'for': '10m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Clock not synchronising.', + description: 'Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host.', + }, + }, + { + alert: 'NodeRAIDDegraded', + expr: ||| + node_md_disks_required - ignoring (state) (node_md_disks{state="active"}) > 0 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'critical', + }, + annotations: { + summary: 'RAID Array is degraded', + description: "RAID array '{{ $labels.device }}' on {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.", + }, + }, + { + alert: 'NodeRAIDDiskFailure', + expr: ||| + node_md_disks{state="failed"} > 0 + ||| % $._config, + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Failed device in RAID array', + description: "At least one device in RAID array on {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap.", + }, + }, + { + alert: 'NodeFileDescriptorLimit', + expr: ||| + ( + node_filefd_allocated{job="node-exporter"} * 100 / node_filefd_maximum{job="node-exporter"} > 70 + ) + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Kernel is predicted to exhaust file descriptors limit soon.', + description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.', + }, + }, + { + alert: 'NodeFileDescriptorLimit', + expr: ||| + ( + node_filefd_allocated{job="node-exporter"} * 100 / node_filefd_maximum{job="node-exporter"} > 90 + ) + ||| % $._config, + 'for': '15m', + labels: { + severity: 'critical', + }, + annotations: { + summary: 'Kernel is predicted to exhaust file descriptors limit soon.', + description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.', + }, + }, + ], + }, + ], + }, +} diff --git a/docs/node-mixin/config.libsonnet b/docs/node-mixin/config.libsonnet new file mode 100644 index 0000000000..eb5ab330b3 --- /dev/null +++ b/docs/node-mixin/config.libsonnet @@ -0,0 +1,63 @@ +{ + _config+:: { + // Selectors are inserted between {} in Prometheus queries. + + // Select the metrics coming from the node exporter. Note that all + // the selected metrics are shown stacked on top of each other in + // the 'USE Method / Cluster' dashboard. Consider disabling that + // dashboard if mixing up all those metrics in the same dashboard + // doesn't make sense (e.g. because they are coming from different + // clusters). + nodeExporterSelector: 'job="node"', + + // Select the fstype for filesystem-related queries. If left + // empty, all filesystems are selected. If you have unusual + // filesystem you don't want to include in dashboards and + // alerting, you can exclude them here, e.g. 'fstype!="tmpfs"'. + fsSelector: 'fstype!=""', + + // Select the device for disk-related queries. If left empty, all + // devices are selected. If you have unusual devices you don't + // want to include in dashboards and alerting, you can exclude + // them here, e.g. 'device!="tmpfs"'. + diskDeviceSelector: 'device!=""', + + // Some of the alerts are meant to fire if a critical failure of a + // node is imminent (e.g. the disk is about to run full). In a + // true “cloud native†setup, failures of a single node should be + // tolerated. Hence, even imminent failure of a single node is no + // reason to create a paging alert. However, in practice there are + // still many situations where operators like to get paged in time + // before a node runs out of disk space. nodeCriticalSeverity can + // be set to the desired severity for this kind of alerts. This + // can even be templated to depend on labels of the node, e.g. you + // could make this critical for traditional database masters but + // just a warning for K8s nodes. + nodeCriticalSeverity: 'critical', + + // Available disk space (%) thresholds on which to trigger the + // 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk + // usage grows in a way that it is predicted to run out in 4h or 1d + // and if the provided thresholds have been reached right now. + // In some cases you'll want to adjust these, e.g. by default Kubernetes + // runs the image garbage collection when the disk usage reaches 85% + // of its available space. In that case, you'll want to reduce the + // critical threshold below to something like 14 or 15, otherwise + // the alert could fire under normal node usage. + fsSpaceFillingUpWarningThreshold: 40, + fsSpaceFillingUpCriticalThreshold: 20, + + // Available disk space (%) thresholds on which to trigger the + // 'NodeFilesystemAlmostOutOfSpace' alerts. + fsSpaceAvailableCriticalThreshold: 5, + fsSpaceAvailableWarningThreshold: 3, + + rateInterval: '5m', + // Opt-in for multi-cluster support. + showMultiCluster: false, + clusterLabel: 'cluster', + + dashboardNamePrefix: 'Node Exporter / ', + dashboardTags: ['node-exporter-mixin'], + }, +} diff --git a/docs/node-mixin/dashboards.jsonnet b/docs/node-mixin/dashboards.jsonnet new file mode 100644 index 0000000000..9d913ed3f1 --- /dev/null +++ b/docs/node-mixin/dashboards.jsonnet @@ -0,0 +1,6 @@ +local dashboards = (import 'mixin.libsonnet').grafanaDashboards; + +{ + [name]: dashboards[name] + for name in std.objectFields(dashboards) +} diff --git a/docs/node-mixin/dashboards/dashboards.libsonnet b/docs/node-mixin/dashboards/dashboards.libsonnet new file mode 100644 index 0000000000..e6adbd4fa0 --- /dev/null +++ b/docs/node-mixin/dashboards/dashboards.libsonnet @@ -0,0 +1,2 @@ +(import 'node.libsonnet') + +(import 'use.libsonnet') diff --git a/docs/node-mixin/dashboards/node.libsonnet b/docs/node-mixin/dashboards/node.libsonnet new file mode 100644 index 0000000000..ef8d3ae243 --- /dev/null +++ b/docs/node-mixin/dashboards/node.libsonnet @@ -0,0 +1,256 @@ +local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; +local dashboard = grafana.dashboard; +local row = grafana.row; +local prometheus = grafana.prometheus; +local template = grafana.template; +local graphPanel = grafana.graphPanel; +local promgrafonnet = import 'github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/promgrafonnet.libsonnet'; +local gauge = promgrafonnet.gauge; + +{ + grafanaDashboards+:: { + 'nodes.json': + local idleCPU = + graphPanel.new( + 'CPU Usage', + datasource='$datasource', + span=6, + format='percentunit', + max=1, + min=0, + stack=true, + ) + .addTarget(prometheus.target( + ||| + ( + (1 - sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal", instance="$instance"}[$__rate_interval]))) + / ignoring(cpu) group_left + count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance"}) + ) + ||| % $._config, + legendFormat='{{cpu}}', + intervalFactor=5, + )); + + local systemLoad = + graphPanel.new( + 'Load Average', + datasource='$datasource', + span=6, + format='short', + min=0, + fill=0, + ) + .addTarget(prometheus.target('node_load1{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='1m load average')) + .addTarget(prometheus.target('node_load5{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='5m load average')) + .addTarget(prometheus.target('node_load15{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='15m load average')) + .addTarget(prometheus.target('count(node_cpu_seconds_total{%(nodeExporterSelector)s, instance="$instance", mode="idle"})' % $._config, legendFormat='logical cores')); + + local memoryGraph = + graphPanel.new( + 'Memory Usage', + datasource='$datasource', + span=9, + format='bytes', + stack=true, + min=0, + ) + .addTarget(prometheus.target( + ||| + ( + node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"} + - + node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"} + - + node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"} + - + node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"} + ) + ||| % $._config, legendFormat='memory used' + )) + .addTarget(prometheus.target('node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='memory buffers')) + .addTarget(prometheus.target('node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='memory cached')) + .addTarget(prometheus.target('node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"}' % $._config, legendFormat='memory free')); + + // TODO: It would be nicer to have a gauge that gets a 0-1 range and displays it as a percentage 0%-100%. + // This needs to be added upstream in the promgrafonnet library and then changed here. + // NOTE: avg() is used to circumvent a label change caused by a node_exporter rollout. + local memoryGauge = gauge.new( + 'Memory Usage', + ||| + 100 - + ( + avg(node_memory_MemAvailable_bytes{%(nodeExporterSelector)s, instance="$instance"}) + / + avg(node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"}) + * 100 + ) + ||| % $._config, + ).withLowerBeingBetter(); + + local diskIO = + graphPanel.new( + 'Disk I/O', + datasource='$datasource', + span=6, + min=0, + fill=0, + ) + // TODO: Does it make sense to have those three in the same panel? + .addTarget(prometheus.target( + 'rate(node_disk_read_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % $._config, + legendFormat='{{device}} read', + )) + .addTarget(prometheus.target( + 'rate(node_disk_written_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % $._config, + legendFormat='{{device}} written', + )) + .addTarget(prometheus.target( + 'rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % $._config, + legendFormat='{{device}} io time', + )) + + { + seriesOverrides: [ + { + alias: '/ read| written/', + yaxis: 1, + }, + { + alias: '/ io time/', + yaxis: 2, + }, + ], + yaxes: [ + self.yaxe(format='bytes'), + self.yaxe(format='s'), + ], + }; + + // TODO: Somehow partition this by device while excluding read-only devices. + local diskSpaceUsage = + graphPanel.new( + 'Disk Space Usage', + datasource='$datasource', + span=6, + format='bytes', + min=0, + fill=1, + stack=true, + ) + .addTarget(prometheus.target( + ||| + sum( + max by (device) ( + node_filesystem_size_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s} + - + node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s} + ) + ) + ||| % $._config, + legendFormat='used', + )) + .addTarget(prometheus.target( + ||| + sum( + max by (device) ( + node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s} + ) + ) + ||| % $._config, + legendFormat='available', + )) + + { + seriesOverrides: [ + { + alias: 'used', + color: '#E0B400', + }, + { + alias: 'available', + color: '#73BF69', + }, + ], + }; + + local networkReceived = + graphPanel.new( + 'Network Received', + datasource='$datasource', + span=6, + format='bytes', + min=0, + fill=0, + ) + .addTarget(prometheus.target( + 'rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__rate_interval])' % $._config, + legendFormat='{{device}}', + )); + + local networkTransmitted = + graphPanel.new( + 'Network Transmitted', + datasource='$datasource', + span=6, + format='bytes', + min=0, + fill=0, + ) + .addTarget(prometheus.target( + 'rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__rate_interval])' % $._config, + legendFormat='{{device}}', + )); + + dashboard.new( + '%sNodes' % $._config.dashboardNamePrefix, + time_from='now-1h', + tags=($._config.dashboardTags), + timezone='utc', + refresh='30s', + graphTooltip='shared_crosshair' + ) + .addTemplate( + { + current: { + text: 'Prometheus', + value: 'Prometheus', + }, + hide: 0, + label: 'Data Source', + name: 'datasource', + options: [], + query: 'prometheus', + refresh: 1, + regex: '', + type: 'datasource', + }, + ) + .addTemplate( + template.new( + 'instance', + '$datasource', + 'label_values(node_exporter_build_info{%(nodeExporterSelector)s}, instance)' % $._config, + refresh='time', + ) + ) + .addRow( + row.new() + .addPanel(idleCPU) + .addPanel(systemLoad) + ) + .addRow( + row.new() + .addPanel(memoryGraph) + .addPanel(memoryGauge) + ) + .addRow( + row.new() + .addPanel(diskIO) + .addPanel(diskSpaceUsage) + ) + .addRow( + row.new() + .addPanel(networkReceived) + .addPanel(networkTransmitted) + ), + }, +} diff --git a/docs/node-mixin/dashboards/use.libsonnet b/docs/node-mixin/dashboards/use.libsonnet new file mode 100644 index 0000000000..e830d7b555 --- /dev/null +++ b/docs/node-mixin/dashboards/use.libsonnet @@ -0,0 +1,466 @@ +local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; +local dashboard = grafana.dashboard; +local row = grafana.row; +local prometheus = grafana.prometheus; +local template = grafana.template; +local graphPanel = grafana.graphPanel; + +local c = import '../config.libsonnet'; + +local datasourceTemplate = { + current: { + text: 'Prometheus', + value: 'Prometheus', + }, + hide: 0, + label: 'Data Source', + name: 'datasource', + options: [], + query: 'prometheus', + refresh: 1, + regex: '', + type: 'datasource', +}; + +local CPUUtilisation = + graphPanel.new( + 'CPU Utilisation', + datasource='$datasource', + span=6, + format='percentunit', + stack=true, + fill=10, + legend_show=false, + ) { tooltip+: { sort: 2 } }; + +local CPUSaturation = + // TODO: Is this a useful panel? At least there should be some explanation how load + // average relates to the "CPU saturation" in the title. + graphPanel.new( + 'CPU Saturation (Load1 per CPU)', + datasource='$datasource', + span=6, + format='percentunit', + stack=true, + fill=10, + legend_show=false, + ) { tooltip+: { sort: 2 } }; + +local memoryUtilisation = + graphPanel.new( + 'Memory Utilisation', + datasource='$datasource', + span=6, + format='percentunit', + stack=true, + fill=10, + legend_show=false, + ) { tooltip+: { sort: 2 } }; + +local memorySaturation = + graphPanel.new( + 'Memory Saturation (Major Page Faults)', + datasource='$datasource', + span=6, + format='rds', + stack=true, + fill=10, + legend_show=false, + ) { tooltip+: { sort: 2 } }; + +local networkUtilisation = + graphPanel.new( + 'Network Utilisation (Bytes Receive/Transmit)', + datasource='$datasource', + span=6, + format='Bps', + stack=true, + fill=10, + legend_show=false, + ) + .addSeriesOverride({ alias: '/Receive/', stack: 'A' }) + .addSeriesOverride({ alias: '/Transmit/', stack: 'B', transform: 'negative-Y' }) + { tooltip+: { sort: 2 } }; + +local networkSaturation = + graphPanel.new( + 'Network Saturation (Drops Receive/Transmit)', + datasource='$datasource', + span=6, + format='Bps', + stack=true, + fill=10, + legend_show=false, + ) + .addSeriesOverride({ alias: '/ Receive/', stack: 'A' }) + .addSeriesOverride({ alias: '/ Transmit/', stack: 'B', transform: 'negative-Y' }) + { tooltip+: { sort: 2 } }; + +local diskIOUtilisation = + graphPanel.new( + 'Disk IO Utilisation', + datasource='$datasource', + span=6, + format='percentunit', + stack=true, + fill=10, + legend_show=false, + ) { tooltip+: { sort: 2 } }; + +local diskIOSaturation = + graphPanel.new( + 'Disk IO Saturation', + datasource='$datasource', + span=6, + format='percentunit', + stack=true, + fill=10, + legend_show=false, + ) { tooltip+: { sort: 2 } }; + +local diskSpaceUtilisation = + graphPanel.new( + 'Disk Space Utilisation', + datasource='$datasource', + span=12, + format='percentunit', + stack=true, + fill=10, + legend_show=false, + ) { tooltip+: { sort: 2 } }; + +{ + _clusterTemplate:: template.new( + name='cluster', + datasource='$datasource', + query='label_values(node_time_seconds, %s)' % $._config.clusterLabel, + current='', + hide=if $._config.showMultiCluster then '' else '2', + refresh=2, + includeAll=false, + sort=1 + ), + + grafanaDashboards+:: { + 'node-rsrc-use.json': + + dashboard.new( + '%sUSE Method / Node' % $._config.dashboardNamePrefix, + time_from='now-1h', + tags=($._config.dashboardTags), + timezone='utc', + refresh='30s', + graphTooltip='shared_crosshair' + ) + .addTemplate(datasourceTemplate) + .addTemplate($._clusterTemplate) + .addTemplate( + template.new( + 'instance', + '$datasource', + 'label_values(node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}, instance)' % $._config, + refresh='time', + sort=1 + ) + ) + .addRow( + row.new('CPU') + .addPanel(CPUUtilisation.addTarget(prometheus.target('instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation'))) + .addPanel(CPUSaturation.addTarget(prometheus.target('instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Saturation'))) + ) + .addRow( + row.new('Memory') + .addPanel(memoryUtilisation.addTarget(prometheus.target('instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation'))) + .addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Major page Faults'))) + ) + .addRow( + row.new('Network') + .addPanel( + networkUtilisation + .addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive')) + .addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit')) + ) + .addPanel( + networkSaturation + .addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive')) + .addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit')) + ) + ) + .addRow( + row.new('Disk IO') + .addPanel(diskIOUtilisation.addTarget(prometheus.target('instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}'))) + .addPanel(diskIOSaturation.addTarget(prometheus.target('instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}'))) + ) + .addRow( + row.new('Disk Space') + .addPanel( + diskSpaceUtilisation.addTarget(prometheus.target( + ||| + sort_desc(1 - + ( + max without (mountpoint, fstype) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"}) + / + max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"}) + ) != 0 + ) + ||| % $._config, legendFormat='{{device}}' + )) + ) + ), + + 'node-cluster-rsrc-use.json': + dashboard.new( + '%sUSE Method / Cluster' % $._config.dashboardNamePrefix, + time_from='now-1h', + tags=($._config.dashboardTags), + timezone='utc', + refresh='30s', + graphTooltip='shared_crosshair' + ) + .addTemplate(datasourceTemplate) + .addTemplate($._clusterTemplate) + .addRow( + row.new('CPU') + .addPanel( + CPUUtilisation + .addTarget(prometheus.target( + ||| + (( + instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + * + instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + ) != 0 ) + / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ||| % $._config, legendFormat='{{ instance }}' + )) + ) + .addPanel( + CPUSaturation + .addTarget(prometheus.target( + ||| + ( + instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ) != 0 + ||| % $._config, legendFormat='{{instance}}' + )) + ) + ) + .addRow( + row.new('Memory') + .addPanel( + memoryUtilisation + .addTarget(prometheus.target( + ||| + ( + instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ) != 0 + ||| % $._config, legendFormat='{{instance}}', + )) + ) + .addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{instance}}'))) + ) + .addRow( + row.new('Network') + .addPanel( + networkUtilisation + .addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive')) + .addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit')) + ) + .addPanel( + networkSaturation + .addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive')) + .addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit')) + ) + ) + .addRow( + row.new('Disk IO') + .addPanel( + diskIOUtilisation + .addTarget(prometheus.target( + ||| + ( + instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ) != 0 + ||| % $._config, legendFormat='{{instance}} {{device}}' + )) + ) + .addPanel( + diskIOSaturation + .addTarget(prometheus.target( + ||| + ( + instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} + / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) + ) != 0 + ||| % $._config, legendFormat='{{instance}} {{device}}' + )) + ) + ) + .addRow( + row.new('Disk Space') + .addPanel( + diskSpaceUtilisation + .addTarget(prometheus.target( + ||| + sum without (device) ( + max without (fstype, mountpoint) (( + node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(clusterLabel)s="$cluster"} + - + node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(clusterLabel)s="$cluster"} + ) != 0) + ) + / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(clusterLabel)s="$cluster"}))) + ||| % $._config, legendFormat='{{instance}}' + )) + ) + ), + } + + if $._config.showMultiCluster then { + 'node-multicluster-rsrc-use.json': + dashboard.new( + '%sUSE Method / Multi-cluster' % $._config.dashboardNamePrefix, + time_from='now-1h', + tags=($._config.dashboardTags), + timezone='utc', + refresh='30s', + graphTooltip='shared_crosshair' + ) + .addTemplate(datasourceTemplate) + .addRow( + row.new('CPU') + .addPanel( + CPUUtilisation + .addTarget(prometheus.target( + ||| + sum( + (( + instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s} + * + instance:node_num_cpu:sum{%(nodeExporterSelector)s} + ) != 0) + / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s})) + ) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config + )) + ) + .addPanel( + CPUSaturation + .addTarget(prometheus.target( + ||| + sum(( + instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s} + / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config + )) + ) + ) + .addRow( + row.new('Memory') + .addPanel( + memoryUtilisation + .addTarget(prometheus.target( + ||| + sum(( + instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s} + / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config + )) + ) + .addPanel( + memorySaturation + .addTarget(prometheus.target( + ||| + sum(( + instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config + )) + ) + ) + .addRow( + row.new('Network') + .addPanel( + networkUtilisation + .addTarget(prometheus.target( + ||| + sum(( + instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config + )) + .addTarget(prometheus.target( + ||| + sum(( + instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config + )) + ) + .addPanel( + networkSaturation + .addTarget(prometheus.target( + ||| + sum(( + instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config + )) + .addTarget(prometheus.target( + ||| + sum(( + instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} + ) != 0) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config + )) + ) + ) + .addRow( + row.new('Disk IO') + .addPanel( + diskIOUtilisation + .addTarget(prometheus.target( + ||| + sum(( + instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s} + / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s, device) + ||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config + )) + ) + .addPanel( + diskIOSaturation + .addTarget(prometheus.target( + ||| + sum(( + instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s} + / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s})) + ) != 0) by (%(clusterLabel)s, device) + ||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config + )) + ) + ) + .addRow( + row.new('Disk Space') + .addPanel( + diskSpaceUtilisation + .addTarget(prometheus.target( + ||| + sum ( + sum without (device) ( + max without (fstype, mountpoint, instance, pod) (( + node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s} + ) != 0) + ) + / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s}))) + ) by (%(clusterLabel)s) + ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config + )) + ) + ), + } else {}, +} diff --git a/docs/node-mixin/jsonnetfile.json b/docs/node-mixin/jsonnetfile.json new file mode 100644 index 0000000000..46ebffe47c --- /dev/null +++ b/docs/node-mixin/jsonnetfile.json @@ -0,0 +1,33 @@ +{ + "version": 1, + "dependencies": [ + { + "source": { + "git": { + "remote": "https://github.com/grafana/grafonnet-lib.git", + "subdir": "grafonnet" + } + }, + "version": "master" + }, + { + "source": { + "git": { + "remote": "https://github.com/grafana/jsonnet-libs.git", + "subdir": "grafana-builder" + } + }, + "version": "master" + }, + { + "source": { + "git": { + "remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git", + "subdir": "lib/promgrafonnet" + } + }, + "version": "master" + } + ], + "legacyImports": false +} diff --git a/docs/node-mixin/mixin.libsonnet b/docs/node-mixin/mixin.libsonnet new file mode 100644 index 0000000000..b9831f9380 --- /dev/null +++ b/docs/node-mixin/mixin.libsonnet @@ -0,0 +1,4 @@ +(import 'config.libsonnet') + +(import 'alerts/alerts.libsonnet') + +(import 'dashboards/dashboards.libsonnet') + +(import 'rules/rules.libsonnet') diff --git a/docs/node-mixin/rules.jsonnet b/docs/node-mixin/rules.jsonnet new file mode 100644 index 0000000000..dbe13f417b --- /dev/null +++ b/docs/node-mixin/rules.jsonnet @@ -0,0 +1 @@ +std.manifestYamlDoc((import 'mixin.libsonnet').prometheusRules) diff --git a/docs/node-mixin/rules/rules.libsonnet b/docs/node-mixin/rules/rules.libsonnet new file mode 100644 index 0000000000..9c8eb90dd1 --- /dev/null +++ b/docs/node-mixin/rules/rules.libsonnet @@ -0,0 +1,119 @@ +{ + prometheusRules+:: { + groups+: [ + { + name: 'node-exporter.rules', + rules: [ + { + // This rule gives the number of CPUs per node. + record: 'instance:node_num_cpu:sum', + expr: ||| + count without (cpu, mode) ( + node_cpu_seconds_total{%(nodeExporterSelector)s,mode="idle"} + ) + ||| % $._config, + }, + { + // CPU utilisation is % CPU without {idle,iowait,steal}. + record: 'instance:node_cpu_utilisation:rate%(rateInterval)s' % $._config, + expr: ||| + 1 - avg without (cpu) ( + sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal"}[%(rateInterval)s])) + ) + ||| % $._config, + }, + { + // This is CPU saturation: 1min avg run queue length / number of CPUs. + // Can go over 1. + // TODO: There are situation where a run queue >1/core is just normal and fine. + // We need to clarify how to read this metric and if its usage is helpful at all. + record: 'instance:node_load1_per_cpu:ratio', + expr: ||| + ( + node_load1{%(nodeExporterSelector)s} + / + instance:node_num_cpu:sum{%(nodeExporterSelector)s} + ) + ||| % $._config, + }, + { + // Memory utilisation (ratio of used memory per instance). + record: 'instance:node_memory_utilisation:ratio', + expr: ||| + 1 - ( + ( + node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} + or + ( + node_memory_Buffers_bytes{%(nodeExporterSelector)s} + + + node_memory_Cached_bytes{%(nodeExporterSelector)s} + + + node_memory_MemFree_bytes{%(nodeExporterSelector)s} + + + node_memory_Slab_bytes{%(nodeExporterSelector)s} + ) + ) + / + node_memory_MemTotal_bytes{%(nodeExporterSelector)s} + ) + ||| % $._config, + }, + { + record: 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s' % $._config, + expr: ||| + rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[%(rateInterval)s]) + ||| % $._config, + }, + { + // Disk utilisation (seconds spent, 1 second rate). + record: 'instance_device:node_disk_io_time_seconds:rate%(rateInterval)s' % $._config, + expr: ||| + rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s]) + ||| % $._config, + }, + { + // Disk saturation (weighted seconds spent, 1 second rate). + record: 'instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s' % $._config, + expr: ||| + rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s]) + ||| % $._config, + }, + { + record: 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s' % $._config, + expr: ||| + sum without (device) ( + rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) + ) + ||| % $._config, + }, + { + record: 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s' % $._config, + expr: ||| + sum without (device) ( + rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) + ) + ||| % $._config, + }, + // TODO: Find out if those drops ever happen on modern switched networks. + { + record: 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s' % $._config, + expr: ||| + sum without (device) ( + rate(node_network_receive_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) + ) + ||| % $._config, + }, + { + record: 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s' % $._config, + expr: ||| + sum without (device) ( + rate(node_network_transmit_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) + ) + ||| % $._config, + }, + ], + }, + ], + }, +} diff --git a/end-to-end-test.sh b/end-to-end-test.sh index c1e45f6729..f024890de1 100755 --- a/end-to-end-test.sh +++ b/end-to-end-test.sh @@ -5,18 +5,23 @@ set -euf -o pipefail enabled_collectors=$(cat << COLLECTORS arp bcache + btrfs buddyinfo conntrack cpu + cpufreq diskstats drbd edac entropy + fibrechannel filefd hwmon infiniband + interrupts ipvs ksmd + lnstat loadavg mdadm meminfo @@ -25,23 +30,29 @@ enabled_collectors=$(cat << COLLECTORS netdev netstat nfs + nfsd + pressure qdisc + rapl + schedstat sockstat stat + thermal_zone textfile bonding - megacli + udp_queues + vmstat wifi xfs zfs + processes + zoneinfo COLLECTORS ) disabled_collectors=$(cat << COLLECTORS filesystem - time timex uname - vmstat COLLECTORS ) cd "$(dirname $0)" @@ -49,7 +60,14 @@ cd "$(dirname $0)" port="$((10000 + (RANDOM % 10000)))" tmpdir=$(mktemp -d /tmp/node_exporter_e2e_test.XXXXXX) -skip_re="^(go_|node_exporter_build_info|node_scrape_collector_duration_seconds|process_|node_textfile_mtime)" +skip_re="^(go_|node_exporter_build_info|node_scrape_collector_duration_seconds|process_|node_textfile_mtime_seconds|node_time_(zone|seconds))" + +arch="$(uname -m)" + +case "${arch}" in + aarch64|ppc64le) fixture='collector/fixtures/e2e-64k-page-output.txt' ;; + *) fixture='collector/fixtures/e2e-output.txt' ;; +esac keep=0; update=0; verbose=0 while getopts 'hkuv' opt @@ -82,14 +100,20 @@ then fi ./node_exporter \ + --path.rootfs="collector/fixtures" \ --path.procfs="collector/fixtures/proc" \ --path.sysfs="collector/fixtures/sys" \ $(for c in ${enabled_collectors}; do echo --collector.${c} ; done) \ $(for c in ${disabled_collectors}; do echo --no-collector.${c} ; done) \ --collector.textfile.directory="collector/fixtures/textfile/two_metric_files/" \ - --collector.megacli.command="collector/fixtures/megacli" \ --collector.wifi.fixtures="collector/fixtures/wifi" \ --collector.qdisc.fixtures="collector/fixtures/qdisc/" \ + --collector.netclass.ignored-devices="(dmz|int)" \ + --collector.netclass.ignore-invalid-speed \ + --collector.bcache.priorityStats \ + --collector.cpu.info \ + --collector.cpu.info.flags-include="^(aes|avx.?|constant_tsc)$" \ + --collector.cpu.info.bugs-include="^(cpu_meltdown|spectre_.*|mds)$" \ --web.listen-address "127.0.0.1:${port}" \ --log.level="debug" > "${tmpdir}/node_exporter.log" 2>&1 & @@ -107,7 +131,7 @@ EOF if [ ${update} -ne 0 ] then - cp "${tmpdir}/e2e-output.txt" "collector/fixtures/e2e-output.txt" + cp "${tmpdir}/e2e-output.txt" "${fixture}" fi if [ ${keep} -eq 0 ] @@ -140,5 +164,5 @@ sleep 1 get "127.0.0.1:${port}/metrics" | grep -E -v "${skip_re}" > "${tmpdir}/e2e-output.txt" diff -u \ - "collector/fixtures/e2e-output.txt" \ + "${fixture}" \ "${tmpdir}/e2e-output.txt" diff --git a/example-rules.yml b/example-rules.yml new file mode 100644 index 0000000000..d9c125dab3 --- /dev/null +++ b/example-rules.yml @@ -0,0 +1,18 @@ +groups: + - name: example-node-exporter-rules + rules: + # The count of CPUs per node, useful for getting CPU time as a percent of total. + - record: instance:node_cpus:count + expr: count(node_cpu_seconds_total{mode="idle"}) without (cpu,mode) + + # CPU in use by CPU. + - record: instance_cpu:node_cpu_seconds_not_idle:rate5m + expr: sum(rate(node_cpu_seconds_total{mode!="idle"}[5m])) without (mode) + + # CPU in use by mode. + - record: instance_mode:node_cpu_seconds:rate5m + expr: sum(rate(node_cpu_seconds_total[5m])) without (cpu) + + # CPU in use ratio. + - record: instance:node_cpu_utilization:ratio + expr: sum(instance_mode:node_cpu_seconds:rate5m{mode!="idle"}) without (mode) / instance:node_cpus:count diff --git a/example.rules b/example.rules deleted file mode 100644 index a44fecf47b..0000000000 --- a/example.rules +++ /dev/null @@ -1,8 +0,0 @@ -# The count of CPUs per node, useful for getting CPU time as a percent of total. -instance:node_cpus:count = count(node_cpu{mode="idle"}) without (cpu,mode) - -# CPU in use by CPU. -instance_cpu:node_cpu_not_idle:rate5m = sum(rate(node_cpu{mode!="idle"}[5m])) without (mode) - -# CPU in use by mode. -instance_mode:node_cpu:rate5m = sum(rate(node_cpu[5m])) without (cpu) diff --git a/examples/init.d/node_exporter b/examples/init.d/node_exporter new file mode 100755 index 0000000000..32c2deefdf --- /dev/null +++ b/examples/init.d/node_exporter @@ -0,0 +1,63 @@ +#!/bin/bash + +RETVAL=0 +PROG="node_exporter" +EXEC="/etc/node_exporter/node_exporter" +LOCKFILE="/var/lock/subsys/$PROG" +OPTIONS="--web.listen-address=:9100" + +# Source function library. +if [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +else + echo "/etc/rc.d/init.d/functions does not exist" + exit 0 +fi + +start() { + if [ -f $LOCKFILE ] + then + echo "$PROG is already running!" + else + echo -n "Starting $PROG: " + nohup $EXEC $OPTIONS >/dev/null 2>&1 & + RETVAL=$? + [ $RETVAL -eq 0 ] && touch $LOCKFILE && success || failure + echo + return $RETVAL + fi +} + +stop() { + echo -n "Stopping $PROG: " + killproc $EXEC + RETVAL=$? + [ $RETVAL -eq 0 ] && rm -r $LOCKFILE && success || failure + echo +} + +restart () +{ + stop + sleep 1 + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status $PROG + ;; + restart) + restart + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 +esac +exit $RETVAL diff --git a/examples/launchctl/README.md b/examples/launchctl/README.md new file mode 100644 index 0000000000..bc390f2543 --- /dev/null +++ b/examples/launchctl/README.md @@ -0,0 +1,25 @@ +# MacOS LaunchDaemon + +If you're installing through a package manager, you probably don't need to deal +with this file. + +The `plist` file should be put in `/Library/LaunchDaemons/` (user defined daemons), and the binary installed at +`/usr/local/bin/node_exporter`. + +Ex. install globally by + + sudo cp -n node_exporter /usr/local/bin/ + sudo cp -n examples/launchctl/io.prometheus.node_exporter.plist /Library/LaunchDaemons/ + sudo launchctl bootstrap system/ /Library/LaunchDaemons/io.prometheus.node_exporter.plist + + # Optionally configure by dropping CLI arguments in a file + echo -- '--web.listen-address=:9101' | sudo tee /usr/local/etc/node_exporter.args + + # Check it's running + sudo launchctl list | grep node_exporter + + # See full process state + sudo launchctl print system/io.prometheus.node_exporter + + # View logs + sudo tail /tmp/node_exporter.log diff --git a/examples/launchctl/io.prometheus.node_exporter.plist b/examples/launchctl/io.prometheus.node_exporter.plist new file mode 100644 index 0000000000..f32ffb8835 --- /dev/null +++ b/examples/launchctl/io.prometheus.node_exporter.plist @@ -0,0 +1,38 @@ + + + + + Label + io.prometheus.node_exporter + ProgramArguments + + sh + -c + /usr/local/bin/node_exporter $(< /usr/local/etc/node_exporter.args) + + UserName + nobody + GroupName + nobody + RunAtLoad + + KeepAlive + + WorkingDirectory + /usr/local + StandardErrorPath + /tmp/node_exporter.log + StandardOutPath + /tmp/node_exporter.log + HardResourceLimits + + NumberOfFiles + 4096 + + SoftResourceLimits + + NumberOfFiles + 4096 + + + diff --git a/examples/openbsd-rc.d/node_exporter b/examples/openbsd-rc.d/node_exporter new file mode 100755 index 0000000000..e0a73830d1 --- /dev/null +++ b/examples/openbsd-rc.d/node_exporter @@ -0,0 +1,12 @@ +#!/bin/ksh +# Shawn Craver, 2019-04-02 + + +daemon="/usr/local/bin/node_exporter" + +. /etc/rc.d/rc.subr + +rc_bg=YES + +rc_cmd $1 + diff --git a/examples/openwrt-init.d/node_exporter b/examples/openwrt-init.d/node_exporter new file mode 100755 index 0000000000..1aed38becf --- /dev/null +++ b/examples/openwrt-init.d/node_exporter @@ -0,0 +1,13 @@ +#!/bin/sh /etc/rc.common + +START=99 + +USE_PROCD=1 +PROG="/usr/bin/node_exporter" +OPTIONS="--web.listen-address=:9100" + +start_service() { + procd_open_instance + procd_set_param command "$PROG" "${OPTIONS}" + procd_close_instance +} diff --git a/examples/systemd/README.md b/examples/systemd/README.md index c1f011fbf4..67039bed13 100644 --- a/examples/systemd/README.md +++ b/examples/systemd/README.md @@ -5,4 +5,5 @@ If you are using distribution packages or the copr repository, you don't need to The unit file in this directory is to be put into `/etc/systemd/system`. It needs a user named `node_exporter`, whose shell should be `/sbin/nologin` and should not have any special privileges. It needs a sysconfig file in `/etc/sysconfig/node_exporter`. +It needs a directory named `/var/lib/node_exporter/textfile_collector`, whose owner should be `node_exporter`:`node_exporter`. A sample file can be found in `sysconfig.node_exporter`. diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000..eabe178025 --- /dev/null +++ b/go.mod @@ -0,0 +1,28 @@ +module github.com/prometheus/node_exporter + +require ( + github.com/beevik/ntp v0.3.0 + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/ema/qdisc v0.0.0-20200603082823-62d0308e3e00 + github.com/go-kit/log v0.2.0 + github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 + github.com/hashicorp/go-envparse v0.0.0-20200406174449-d9cfd743a15e + github.com/hodgesds/perf-utils v0.4.0 + github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 + github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 + github.com/lufia/iostat v1.2.0 + github.com/mattn/go-xmlrpc v0.0.3 + github.com/mdlayher/wifi v0.0.0-20200527114002-84f0b9457fdd + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.32.1 + github.com/prometheus/exporter-toolkit v0.7.0 + github.com/prometheus/procfs v0.7.4-0.20211011103944-1a7a2bd3279f + github.com/safchain/ethtool v0.1.0 + github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 // indirect + github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a + golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1 + gopkg.in/alecthomas/kingpin.v2 v2.2.6 +) + +go 1.14 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000..84ae5c12d5 --- /dev/null +++ b/go.sum @@ -0,0 +1,577 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= +github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ema/qdisc v0.0.0-20200603082823-62d0308e3e00 h1:0GHzegkDz/zSrt+Zph1OueNImPdUxoToypnkhhRYTjI= +github.com/ema/qdisc v0.0.0-20200603082823-62d0308e3e00/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/go-envparse v0.0.0-20200406174449-d9cfd743a15e h1:v1d9+AJMP6i4p8BSKNU0InuvmIAdZjQLNN19V86AG4Q= +github.com/hashicorp/go-envparse v0.0.0-20200406174449-d9cfd743a15e/go.mod h1:/NlxCzN2D4C4L2uDE6ux/h6jM+n98VFQM14nnCIfHJU= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hodgesds/perf-utils v0.4.0 h1:onWrAGy6RYr7938qNXtSsTr54K4BLx8Hh3EXAr+xy+U= +github.com/hodgesds/perf-utils v0.4.0/go.mod h1:wpXb8IDP3gn1iCsHuql0e9fyARRjRPvxN7lRPDihOds= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:hk4LPqXIY/c9XzRbe7dA6qQxaT6Axcbny0L/G5a4owQ= +github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:PoK3ejP3LJkGTzKqRlpvCIFas3ncU02v8zzWDW+g0FY= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 h1:uhL5Gw7BINiiPAo24A2sxkcDI0Jt/sqp1v5xQCniEFA= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 h1:N527AHMa793TP5z5GNAn/VLPzlc0ewzWdeP/25gDfgQ= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lufia/iostat v1.2.0 h1:Botv3++V0FnQyhRlSt82DHUBv7XlxFtaNInpLq1jrAU= +github.com/lufia/iostat v1.2.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= +github.com/mattn/go-xmlrpc v0.0.3 h1:Y6WEMLEsqs3RviBrAa1/7qmbGB7DVD3brZIbqMbQdGY= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1 h1:I154BCU+mKlIf7BgcAJB2r7QjveNPty6uNY1g9ChVfI= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 h1:qEtkL8n1DAHpi5/AOgAckwGQUlMe4+jhL/GMt+GKIks= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/wifi v0.0.0-20200527114002-84f0b9457fdd h1:50p1vPNK43pzCVX10+5MmiOerbBzC1vR6+sLB3FZewE= +github.com/mdlayher/wifi v0.0.0-20200527114002-84f0b9457fdd/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/exporter-toolkit v0.7.0 h1:XtYeVeeC5daG4txbc9+mieKq+/AK4gtIBLl9Mulrjnk= +github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.4-0.20211011103944-1a7a2bd3279f h1:ncXqc93eJV1Ncr3f6GA3MrIDNkNHvcPonRC2QgZaVkQ= +github.com/prometheus/procfs v0.7.4-0.20211011103944-1a7a2bd3279f/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/safchain/ethtool v0.1.0 h1:SsRnt87qssm3RltLJze6kM+4fs32twq6mZEcBxbDMVg= +github.com/safchain/ethtool v0.1.0/go.mod h1:WkKB1DnNtvsMlDmQ50sgwowDJV/hGbJSOvJoEXs1AJQ= +github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:GfSdC6wKfTGcgCS7BtzF5694Amne1pGCSTY252WhlEY= +github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a h1:os5OBNhwOwybXZMNLqT96XqtjdTtwRFw2w08uluvNeI= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1 h1:kwrAHlwJ0DUBZwQ238v+Uod/3eZ8B2K5rYsUHBQvzmI= +golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/node_exporter.go b/node_exporter.go index 107718825c..9a0e3d09f1 100644 --- a/node_exporter.go +++ b/node_exporter.go @@ -14,55 +14,178 @@ package main import ( + "fmt" + stdlog "log" "net/http" _ "net/http/pprof" + "os" + "os/user" + "sort" + "github.com/prometheus/common/promlog" + "github.com/prometheus/common/promlog/flag" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + promcollectors "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/log" "github.com/prometheus/common/version" + "github.com/prometheus/exporter-toolkit/web" "github.com/prometheus/node_exporter/collector" - "gopkg.in/alecthomas/kingpin.v2" + kingpin "gopkg.in/alecthomas/kingpin.v2" ) -func init() { - prometheus.MustRegister(version.NewCollector("node_exporter")) +// handler wraps an unfiltered http.Handler but uses a filtered handler, +// created on the fly, if filtering is requested. Create instances with +// newHandler. +type handler struct { + unfilteredHandler http.Handler + // exporterMetricsRegistry is a separate registry for the metrics about + // the exporter itself. + exporterMetricsRegistry *prometheus.Registry + includeExporterMetrics bool + maxRequests int + logger log.Logger +} + +func newHandler(includeExporterMetrics bool, maxRequests int, logger log.Logger) *handler { + h := &handler{ + exporterMetricsRegistry: prometheus.NewRegistry(), + includeExporterMetrics: includeExporterMetrics, + maxRequests: maxRequests, + logger: logger, + } + if h.includeExporterMetrics { + h.exporterMetricsRegistry.MustRegister( + promcollectors.NewProcessCollector(promcollectors.ProcessCollectorOpts{}), + promcollectors.NewGoCollector(), + ) + } + if innerHandler, err := h.innerHandler(); err != nil { + panic(fmt.Sprintf("Couldn't create metrics handler: %s", err)) + } else { + h.unfilteredHandler = innerHandler + } + return h +} + +// ServeHTTP implements http.Handler. +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + filters := r.URL.Query()["collect[]"] + level.Debug(h.logger).Log("msg", "collect query:", "filters", filters) + + if len(filters) == 0 { + // No filters, use the prepared unfiltered handler. + h.unfilteredHandler.ServeHTTP(w, r) + return + } + // To serve filtered metrics, we create a filtering handler on the fly. + filteredHandler, err := h.innerHandler(filters...) + if err != nil { + level.Warn(h.logger).Log("msg", "Couldn't create filtered metrics handler:", "err", err) + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err))) + return + } + filteredHandler.ServeHTTP(w, r) +} + +// innerHandler is used to create both the one unfiltered http.Handler to be +// wrapped by the outer handler and also the filtered handlers created on the +// fly. The former is accomplished by calling innerHandler without any arguments +// (in which case it will log all the collectors enabled via command-line +// flags). +func (h *handler) innerHandler(filters ...string) (http.Handler, error) { + nc, err := collector.NewNodeCollector(h.logger, filters...) + if err != nil { + return nil, fmt.Errorf("couldn't create collector: %s", err) + } + + // Only log the creation of an unfiltered handler, which should happen + // only once upon startup. + if len(filters) == 0 { + level.Info(h.logger).Log("msg", "Enabled collectors") + collectors := []string{} + for n := range nc.Collectors { + collectors = append(collectors, n) + } + sort.Strings(collectors) + for _, c := range collectors { + level.Info(h.logger).Log("collector", c) + } + } + + r := prometheus.NewRegistry() + r.MustRegister(version.NewCollector("node_exporter")) + if err := r.Register(nc); err != nil { + return nil, fmt.Errorf("couldn't register node collector: %s", err) + } + handler := promhttp.HandlerFor( + prometheus.Gatherers{h.exporterMetricsRegistry, r}, + promhttp.HandlerOpts{ + ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0), + ErrorHandling: promhttp.ContinueOnError, + MaxRequestsInFlight: h.maxRequests, + Registry: h.exporterMetricsRegistry, + }, + ) + if h.includeExporterMetrics { + // Note that we have to use h.exporterMetricsRegistry here to + // use the same promhttp metrics for all expositions. + handler = promhttp.InstrumentMetricHandler( + h.exporterMetricsRegistry, handler, + ) + } + return handler, nil } func main() { var ( - listenAddress = kingpin.Flag("web.listen-address", "Address on which to expose metrics and web interface.").Default(":9100").String() - metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String() + listenAddress = kingpin.Flag( + "web.listen-address", + "Address on which to expose metrics and web interface.", + ).Default(":9100").String() + metricsPath = kingpin.Flag( + "web.telemetry-path", + "Path under which to expose metrics.", + ).Default("/metrics").String() + disableExporterMetrics = kingpin.Flag( + "web.disable-exporter-metrics", + "Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).", + ).Bool() + maxRequests = kingpin.Flag( + "web.max-requests", + "Maximum number of parallel scrape requests. Use 0 to disable.", + ).Default("40").Int() + disableDefaultCollectors = kingpin.Flag( + "collector.disable-defaults", + "Set all collectors to disabled by default.", + ).Default("false").Bool() + configFile = kingpin.Flag( + "web.config", + "[EXPERIMENTAL] Path to config yaml file that can enable TLS or authentication.", + ).Default("").String() ) - log.AddFlags(kingpin.CommandLine) + promlogConfig := &promlog.Config{} + flag.AddFlags(kingpin.CommandLine, promlogConfig) kingpin.Version(version.Print("node_exporter")) + kingpin.CommandLine.UsageWriter(os.Stdout) kingpin.HelpFlag.Short('h') kingpin.Parse() + logger := promlog.New(promlogConfig) - log.Infoln("Starting node_exporter", version.Info()) - log.Infoln("Build context", version.BuildContext()) - - nc, err := collector.NewNodeCollector() - if err != nil { - log.Fatalf("Couldn't create collector: %s", err) + if *disableDefaultCollectors { + collector.DisableDefaultCollectors() } - log.Infof("Enabled collectors:") - for n := range nc.Collectors { - log.Infof(" - %s", n) + level.Info(logger).Log("msg", "Starting node_exporter", "version", version.Info()) + level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext()) + if user, err := user.Current(); err == nil && user.Uid == "0" { + level.Warn(logger).Log("msg", "Node Exporter is running as root user. This exporter is designed to run as unpriviledged user, root is not required.") } - if err := prometheus.Register(nc); err != nil { - log.Fatalf("Couldn't register collector: %s", err) - } - handler := promhttp.HandlerFor(prometheus.DefaultGatherer, - promhttp.HandlerOpts{ - ErrorLog: log.NewErrorLogger(), - ErrorHandling: promhttp.ContinueOnError, - }) - - // TODO(ts): Remove deprecated and problematic InstrumentHandler usage. - http.Handle(*metricsPath, prometheus.InstrumentHandler("prometheus", handler)) + http.Handle(*metricsPath, newHandler(!*disableExporterMetrics, *maxRequests, logger)) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(` Node Exporter @@ -73,9 +196,10 @@ func main() { `)) }) - log.Infoln("Listening on", *listenAddress) - err = http.ListenAndServe(*listenAddress, nil) - if err != nil { - log.Fatal(err) + level.Info(logger).Log("msg", "Listening on", "address", *listenAddress) + server := &http.Server{Addr: *listenAddress} + if err := web.ListenAndServe(server, *configFile, logger); err != nil { + level.Error(logger).Log("err", err) + os.Exit(1) } } diff --git a/node_exporter_test.go b/node_exporter_test.go index a1a214c987..f10db7c661 100644 --- a/node_exporter_test.go +++ b/node_exporter_test.go @@ -1,3 +1,16 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package main import ( @@ -25,9 +38,13 @@ func TestFileDescriptorLeak(t *testing.T) { if _, err := os.Stat(binary); err != nil { t.Skipf("node_exporter binary not available, try to run `make build` first: %s", err) } - if _, err := procfs.NewStat(); err != nil { + fs, err := procfs.NewDefaultFS() + if err != nil { t.Skipf("proc filesystem is not available, but currently required to read number of open file descriptors: %s", err) } + if _, err := fs.Stat(); err != nil { + t.Errorf("unable to read process stats: %s", err) + } exporter := exec.Command(binary, "--web.listen-address", address) test := func(pid int) error { @@ -129,14 +146,9 @@ func runCommandAndTests(cmd *exec.Cmd, address string, fn func(pid int) error) e errc <- fn(pid) }(cmd.Process.Pid) - select { - case err := <-errc: - if cmd.Process != nil { - cmd.Process.Kill() - } - if err != nil { - return err - } + err := <-errc + if cmd.Process != nil { + cmd.Process.Kill() } - return nil + return err } diff --git a/scripts/errcheck_excludes.txt b/scripts/errcheck_excludes.txt new file mode 100644 index 0000000000..14b824f85f --- /dev/null +++ b/scripts/errcheck_excludes.txt @@ -0,0 +1,4 @@ +// Used in HTTP handlers, any error is handled by the server itself. +(net/http.ResponseWriter).Write +// Never check for logger errors. +(github.com/go-kit/log.Logger).Log diff --git a/staticcheck.conf b/staticcheck.conf new file mode 100644 index 0000000000..82a0d9b058 --- /dev/null +++ b/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "ST1003"] diff --git a/text_collector_examples/README.md b/text_collector_examples/README.md index 540c3ddf4c..3794261b0a 100644 --- a/text_collector_examples/README.md +++ b/text_collector_examples/README.md @@ -1,7 +1,4 @@ # Text collector example scripts -These scripts are examples to be used with the Node Exporter Textfile -Collector. - -For more information see: -https://github.com/prometheus/node_exporter#textfile-collector +The scripts have been moved to +https://github.com/prometheus-community/node-exporter-textfile-collector-scripts diff --git a/text_collector_examples/apt.sh b/text_collector_examples/apt.sh deleted file mode 100755 index 3919b7c547..0000000000 --- a/text_collector_examples/apt.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# -# Description: Expose metrics from apt updates. -# -# Author: Ben Kochie - -upgrades="$(/usr/bin/apt-get --just-print upgrade \ - | /usr/bin/awk '/^Inst/ {print $5, $6}' \ - | /usr/bin/sort \ - | /usr/bin/uniq -c \ - | awk '{ gsub(/\\\\/, "\\\\", $2); gsub(/\"/, "\\\"", $2); - gsub(/\[/, "", $3); gsub(/\]/, "", $3); - print "apt_upgrades_pending{origin=\"" $2 "\",arch=\"" $3 "\"} " $1}' -)" - -echo '# HELP apt_upgrades_pending Apt package pending updates by origin.' -echo '# TYPE apt_upgrades_pending gauge' -if [[ -n "${upgrades}" ]] ; then - echo "${upgrades}" -else - echo 'apt_upgrades_pending{origin="",arch=""} 0' -fi - -echo '# HELP node_reboot_required Node reboot is required for software updates.' -echo '# TYPE node_reboot_required gauge' -if [[ -f '/run/reboot-required' ]] ; then - echo 'node_reboot_required 1' -else - echo 'node_reboot_required 0' -fi diff --git a/text_collector_examples/ntpd_metrics.py b/text_collector_examples/ntpd_metrics.py deleted file mode 100755 index ab55a130eb..0000000000 --- a/text_collector_examples/ntpd_metrics.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python3 -# -# Description: Extract NTPd metrics from ntpq -np. -# Author: Ben Kochie - -import re -import subprocess -import sys - -# NTP peers status, with no DNS lookups. -ntpq_cmd = ['ntpq', '-np'] -ntpq_rv_cmd = ['ntpq', '-c', 'rv 0 offset,sys_jitter,rootdisp,rootdelay'] - -# Regex to match all of the fields in the output of ntpq -np -metrics_fields = [ - '^(?P.)(?P[\w\.]+)', - '(?P[\w\.]+)', - '(?P\d+)', - '(?P\w)', - '(?P\d+)', - '(?P\d+)', - '(?P\d+)', - '(?P\d+\.\d+)', - '(?P-?\d+\.\d+)', - '(?P\d+\.\d+)', -] -metrics_re = '\s+'.join(metrics_fields) - -# Remote types -# http://support.ntp.org/bin/view/Support/TroubleshootingNTP -remote_types = { - 'l': 'local', - 'u': 'unicast', - 'm': 'multicast', - 'b': 'broadcast', - '-': 'netaddr', -} - -# Status codes: -# http://www.eecis.udel.edu/~mills/ntp/html/decode.html#peer -status_types = { - ' ': 0, - 'x': 1, - '.': 2, - '-': 3, - '+': 4, - '#': 5, - '*': 6, - 'o': 7, -} - - -# Run the ntpq command. -def get_output(command): - try: - output = subprocess.check_output(command, stderr=subprocess.DEVNULL) - except subprocess.CalledProcessError as e: - return None - return output.decode() - - -# Print metrics in Prometheus format. -def print_prometheus(metric, values): - print("# HELP ntpd_%s NTPd metric for %s" % (metric, metric)) - print("# TYPE ntpd_%s gauge" % (metric)) - for labels in values: - if labels is None: - print("ntpd_%s %f" % (metric, values[labels])) - else: - print("ntpd_%s{%s} %f" % (metric, labels, values[labels])) - - -# Parse raw ntpq lines. -def parse_line(line): - if re.match('\s+remote\s+refid', line): - return None - if re.match('=+', line): - return None - if re.match('.+\.(LOCL|POOL)\.', line): - return None - if re.match('^$', line): - return None - return re.match(metrics_re, line) - - -# Main function -def main(argv): - ntpq = get_output(ntpq_cmd) - peer_status_metrics = {} - delay_metrics = {} - offset_metrics = {} - jitter_metrics = {} - for line in ntpq.split('\n'): - metric_match = parse_line(line) - if metric_match is None: - continue - remote = metric_match.group('remote') - refid = metric_match.group('refid') - stratum = metric_match.group('stratum') - remote_type = remote_types[metric_match.group('type')] - common_labels = "remote=\"%s\",reference=\"%s\"" % (remote, refid) - peer_labels = "%s,stratum=\"%s\",type=\"%s\"" % (common_labels, stratum, remote_type) - - peer_status_metrics[peer_labels] = float(status_types[metric_match.group('status')]) - delay_metrics[common_labels] = float(metric_match.group('delay')) - offset_metrics[common_labels] = float(metric_match.group('offset')) - jitter_metrics[common_labels] = float(metric_match.group('jitter')) - - print_prometheus('peer_status', peer_status_metrics) - print_prometheus('delay_milliseconds', delay_metrics) - print_prometheus('offset_milliseconds', offset_metrics) - print_prometheus('jitter_milliseconds', jitter_metrics) - - ntpq_rv = get_output(ntpq_rv_cmd) - for metric in ntpq_rv.split(','): - metric_name, metric_value = metric.strip().split('=') - print_prometheus(metric_name, {None: float(metric_value)}) - - -# Go go go! -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/text_collector_examples/smartmon.sh b/text_collector_examples/smartmon.sh deleted file mode 100755 index 5c8077f523..0000000000 --- a/text_collector_examples/smartmon.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -# Script informed by the collectd monitoring script for smartmontools (using smartctl) -# by Samuel B. (c) 2012 -# source at: http://devel.dob.sk/collectd-scripts/ - -# TODO: This probably needs to be a little more complex. The raw numbers can have more -# data in them than you'd think. -# http://arstechnica.com/civis/viewtopic.php?p=22062211 - -disks="$(/usr/sbin/smartctl --scan | awk '{print $1 "|" $3}')" - -parse_smartctl_attributes_awk="$(cat << 'SMARTCTLAWK' -$1 ~ /^[0-9]+$/ && $2 ~ /^[a-zA-Z0-9_-]+$/ { - gsub(/-/, "_"); - printf "%s_value{%s,smart_id=\"%s\"} %d\n", $2, labels, $1, $4 - printf "%s_worst{%s,smart_id=\"%s\"} %d\n", $2, labels, $1, $5 - printf "%s_threshold{%s,smart_id=\"%s\"} %d\n", $2, labels, $1, $6 - printf "%s_raw_value{%s,smart_id=\"%s\"} %e\n", $2, labels, $1, $10 -} -SMARTCTLAWK -)" - -smartmon_attrs="$(cat << 'SMARTMONATTRS' -airflow_temperature_cel -command_timeout -current_pending_sector -end_to_end_error -erase_fail_count -g_sense_error_rate -hardware_ecc_recovered -host_reads_mib -host_reads_32mib -host_writes_mib -host_writes_32mib -load_cycle_count -media_wearout_indicator -nand_writes_1gib -offline_uncorrectable -power_cycle_count -power_on_hours -program_fail_count -raw_read_error_rate -reallocated_sector_ct -reported_uncorrect -sata_downshift_count -spin_retry_count -spin_up_time -start_stop_count -temperature_celsius -total_lbas_read -total_lbas_written -udma_crc_error_count -unsafe_shutdown_count -workld_host_reads_perc -workld_media_wear_indic -workload_minutes -SMARTMONATTRS -)" -smartmon_attrs="$(echo ${smartmon_attrs} | xargs | tr ' ' '|')" - -parse_smartctl_attributes() { - local disk="$1" - local disk_type="$2" - local labels="disk=\"${disk}\",type=\"${disk_type}\"" - local vars="$(echo "${smartmon_attrs}" | xargs | tr ' ' '|')" - sed 's/^ \+//g' \ - | awk -v labels="${labels}" "${parse_smartctl_attributes_awk}" 2>/dev/null \ - | tr A-Z a-z \ - | grep -E "(${smartmon_attrs})" -} - -parse_smartctl_info() { - local -i smart_available=0 smart_enabled=0 smart_healthy=0 - local disk="$1" disk_type="$2" - while read line ; do - info_type="$(echo "${line}" | cut -f1 -d: | tr ' ' '_')" - info_value="$(echo "${line}" | cut -f2- -d: | sed 's/^ \+//g')" - case "${info_type}" in - Model_Family) model_family="${info_value}" ;; - Device_Model) device_model="${info_value}" ;; - Serial_Number) serial_number="${info_value}" ;; - Firmware_Version) fw_version="${info_value}" ;; - Vendor) vendor="${info_value}" ;; - Product) product="${info_value}" ;; - Revision) revision="${info_value}" ;; - Logical_Unit_id) lun_id="${info_value}" ;; - esac - if [[ "${info_type}" == 'SMART_support_is' ]] ; then - case "${info_value:0:7}" in - Enabled) smart_enabled=1 ;; - Availab) smart_available=1 ;; - Unavail) smart_available=0 ;; - esac - fi - if [[ "${info_type}" == 'SMART_overall-health_self-assessment_test_result' ]] ; then - case "${info_value:0:6}" in - PASSED) smart_healthy=1 ;; - esac - elif [[ "${info_type}" == 'SMART_Health_Status' ]] ; then - case "${info_value:0:2}" in - OK) smart_healthy=1 ;; - esac - fi - done - if [[ -n "${vendor}" ]] ; then - echo "device_info{disk=\"${disk}\",type=\"${disk_type}\",vendor=\"${vendor}\",product=\"${product}\",revision=\"${revision}\",lun_id=\"${lun_id}\"} 1" - else - echo "device_info{disk=\"${disk}\",type=\"${disk_type}\",model_family=\"${model_family}\",device_model=\"${device_model}\",serial_number=\"${serial_number}\",firmware_version=\"${fw_version}\"} 1" - fi - echo "device_smart_available{disk=\"${disk}\",type=\"${disk_type}\"} ${smart_available}" - echo "device_smart_enabled{disk=\"${disk}\",type=\"${disk_type}\"} ${smart_enabled}" - echo "device_smart_healthy{disk=\"${disk}\",type=\"${disk_type}\"} ${smart_healthy}" -} - -output_format_awk="$(cat << 'OUTPUTAWK' -BEGIN { v = "" } -v != $1 { - print "# HELP smartmon_" $1 " SMART metric " $1; - print "# TYPE smartmon_" $1 " gauge"; - v = $1 -} -{print "smartmon_" $0} -OUTPUTAWK -)" - -format_output() { - sort \ - | awk -F'{' "${output_format_awk}" -} - -smartctl_version="$(/usr/sbin/smartctl -V | head -n1 | awk '$1 == "smartctl" {print $2}')" - -echo "smartctl_version{version=\"${smartctl_version}\"} 1" | format_output - -if [[ "$(expr "${smartctl_version}" : '\([0-9]*\)\..*')" -lt 6 ]] ; then - exit -fi - -device_list="$(/usr/sbin/smartctl --scan-open | awk '{print $1 "|" $3}')" - -for device in ${device_list}; do - disk="$(echo ${device} | cut -f1 -d'|')" - type="$(echo ${device} | cut -f2 -d'|')" - echo "smartctl_run{disk=\"${disk}\",type=\"${type}\"}" $(TZ=UTC date '+%s') - # Get the SMART information and health - /usr/sbin/smartctl -i -H -d "${type}" "${disk}" | parse_smartctl_info "${disk}" "${type}" - # Get the SMART attributes - /usr/sbin/smartctl -A -d "${type}" "${disk}" | parse_smartctl_attributes "${disk}" "${type}" -done | format_output diff --git a/text_collector_examples/storcli.py b/text_collector_examples/storcli.py deleted file mode 100755 index f1a8a60251..0000000000 --- a/text_collector_examples/storcli.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python - -# Script to parse StorCLI's JSON output and expose -# MegaRAID health as Prometheus metrics. -# -# Tested against StorCLI 'Ver 1.14.12 Nov 25, 2014'. -# -# StorCLI reference manual: -# http://docs.avagotech.com/docs/12352476 -# -# Advanced Software Options (ASO) not exposed as metrics currently. -# -# JSON key abbreviations used by StorCLI are documented in the standard command -# output, i.e. when you omit the trailing 'J' from the command. - -import argparse -import json -import subprocess - -DESCRIPTION = """Parses StorCLI's JSON output and exposes MegaRAID health as - Prometheus metrics.""" -VERSION = '0.0.1' - -METRIC_PREFIX = 'megaraid_' -METRIC_CONTROLLER_LABELS = '{{controller="{}", model="{}"}}' - - -def main(args): - data = json.loads(get_storcli_json(args.storcli_path)) - - # It appears that the data we need will always be present in the first - # item in the Controllers array - status = data['Controllers'][0] - - metrics = { - 'status_code': status['Command Status']['Status Code'], - 'controllers': status['Response Data']['Number of Controllers'], - } - - for name, value in metrics.iteritems(): - print("{}{} {}".format(METRIC_PREFIX, name, value)) - - controller_info = [] - controller_metrics = {} - overview = [] - - try: - overview = status['Response Data']['System Overview'] - except KeyError: - pass - - for controller in overview: - controller_index = controller['Ctl'] - model = controller['Model'] - controller_info.append(METRIC_CONTROLLER_LABELS.format(controller_index, model)) - - controller_metrics = { - # FIXME: Parse dimmer switch options - # 'dimmer_switch': controller['DS'], - - 'battery_backup_healthy': int(controller['BBU'] == 'Opt'), - 'degraded': int(controller['Hlth'] == 'Dgd'), - 'drive_groups': controller['DGs'], - 'emergency_hot_spare': int(controller['EHS'] == 'Y'), - 'failed': int(controller['Hlth'] == 'Fld'), - 'healthy': int(controller['Hlth'] == 'Opt'), - 'physical_drives': controller['PDs'], - 'ports': controller['Ports'], - 'scheduled_patrol_read': int(controller['sPR'] == 'On'), - 'virtual_drives': controller['VDs'], - - # Reverse StorCLI's logic to make metrics consistent - 'drive_groups_optimal': int(controller['DNOpt'] == 0), - 'virtual_drives_optimal': int(controller['VNOpt'] == 0), - } - - for name, value in controller_metrics.iteritems(): - print('{}{}{{controller="{}"}} {}'.format(METRIC_PREFIX, name, controller_index, value)) - - for labels in controller_info: - print('{}{}{} {}'.format(METRIC_PREFIX, 'controller_info', labels, 1)) - - -def get_storcli_json(storcli_path): - storcli_cmd = [storcli_path, 'show', 'all', 'J'] - proc = subprocess.Popen(storcli_cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - return proc.communicate()[0] - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description=DESCRIPTION, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--storcli_path', - default='/opt/MegaRAID/storcli/storcli64', - help='path to StorCLi binary') - parser.add_argument('--version', - action='version', - version='%(prog)s {}'.format(VERSION)) - args = parser.parse_args() - - main(args) diff --git a/tls_config_noAuth.bad.yml b/tls_config_noAuth.bad.yml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ttar b/ttar index c3472c1b79..b0171a12b5 100755 --- a/ttar +++ b/ttar @@ -1,11 +1,26 @@ #!/usr/bin/env bash + # Purpose: plain text tar format # Limitations: - only suitable for text files, directories, and symlinks # - stores only filename, content, and mode # - not designed for untrusted input - +# # Note: must work with bash version 3.2 (macOS) +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -o errexit -o nounset # Sanitize environment (for instance, standard sorting of glob matches) @@ -13,7 +28,55 @@ export LC_ALL=C path="" CMD="" -ARG_STRING="$@" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ function usage { bname=$(basename "$0") @@ -24,6 +87,7 @@ Usage: $bname [-C ] -c -f (create archive) Options: -C (change directory) + -v (verbose) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ @@ -46,6 +110,8 @@ function set_cmd { CMD=$1 } +unset VERBOSE + while getopts :cf:htxvC: opt; do case $opt in c) @@ -143,8 +209,37 @@ function extract { fi while IFS= read -r line; do line_no=$(( line_no + 1 )) + local eof_without_newline if [ "$size" -gt 0 ]; then - echo "$line" >> "$path" + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceeded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceeded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceeded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi size=$(( size - 1 )) continue fi @@ -188,11 +283,14 @@ function get_mode { local mfile=$1 if [ -z "${STAT_OPTION:-}" ]; then if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat STAT_OPTION='-c' STAT_FORMAT='%a' else + # BSD stat STAT_OPTION='-f' - STAT_FORMAT='%A' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' fi fi stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" @@ -201,6 +299,7 @@ function get_mode { function _create { shopt -s nullglob local mode + local eof_without_newline while (( "$#" )); do file=$1 if [ -L "$file" ]; then @@ -224,8 +323,30 @@ function _create { elif [ -f "$file" ]; then echo "Path: $file" lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi echo "Lines: $lines" - cat "$file" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi mode=$(get_mode "$file") echo "Mode: $mode" vecho "$mode $file" @@ -254,6 +375,8 @@ function create { _create "$@" } +test_environment + if [ -n "${CDIR:-}" ]; then if [[ "$ARCHIVE" != /* ]]; then # Relative path: preserve the archive's location before changing diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/github.com/alecthomas/template/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md deleted file mode 100644 index ef6a8ee303..0000000000 --- a/vendor/github.com/alecthomas/template/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Go's `text/template` package with newline elision - -This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. - -eg. - -``` -{{if true}}\ -hello -{{end}}\ -``` - -Will result in: - -``` -hello\n -``` - -Rather than: - -``` -\n -hello\n -\n -``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go deleted file mode 100644 index 223c595c25..0000000000 --- a/vendor/github.com/alecthomas/template/doc.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package template implements data-driven templates for generating textual output. - -To generate HTML output, see package html/template, which has the same interface -as this package but automatically secures HTML output against certain attacks. - -Templates are executed by applying them to a data structure. Annotations in the -template refer to elements of the data structure (typically a field of a struct -or a key in a map) to control execution and derive values to be displayed. -Execution of the template walks the structure and sets the cursor, represented -by a period '.' and called "dot", to the value at the current location in the -structure as execution proceeds. - -The input text for a template is UTF-8-encoded text in any format. -"Actions"--data evaluations or control structures--are delimited by -"{{" and "}}"; all text outside actions is copied to the output unchanged. -Actions may not span newlines, although comments can. - -Once parsed, a template may be executed safely in parallel. - -Here is a trivial example that prints "17 items are made of wool". - - type Inventory struct { - Material string - Count uint - } - sweaters := Inventory{"wool", 17} - tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") - if err != nil { panic(err) } - err = tmpl.Execute(os.Stdout, sweaters) - if err != nil { panic(err) } - -More intricate examples appear below. - -Actions - -Here is the list of actions. "Arguments" and "pipelines" are evaluations of -data, defined in detail below. - -*/ -// {{/* a comment */}} -// A comment; discarded. May contain newlines. -// Comments do not nest and must start and end at the -// delimiters, as shown here. -/* - - {{pipeline}} - The default textual representation of the value of the pipeline - is copied to the output. - - {{if pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, T1 is executed. The empty values are false, 0, any - nil pointer or interface value, and any array, slice, map, or - string of length zero. - Dot is unaffected. - - {{if pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, T0 is executed; - otherwise, T1 is executed. Dot is unaffected. - - {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} - To simplify the appearance of if-else chains, the else action - of an if may include another if directly; the effect is exactly - the same as writing - {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} - - {{range pipeline}} T1 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, nothing is output; - otherwise, dot is set to the successive elements of the array, - slice, or map and T1 is executed. If the value is a map and the - keys are of basic type with a defined order ("comparable"), the - elements will be visited in sorted key order. - - {{range pipeline}} T1 {{else}} T0 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, dot is unaffected and - T0 is executed; otherwise, dot is set to the successive elements - of the array, slice, or map and T1 is executed. - - {{template "name"}} - The template with the specified name is executed with nil data. - - {{template "name" pipeline}} - The template with the specified name is executed with dot set - to the value of the pipeline. - - {{with pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, dot is set to the value of the pipeline and T1 is - executed. - - {{with pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, dot is unaffected and T0 - is executed; otherwise, dot is set to the value of the pipeline - and T1 is executed. - -Arguments - -An argument is a simple value, denoted by one of the following. - - - A boolean, string, character, integer, floating-point, imaginary - or complex constant in Go syntax. These behave like Go's untyped - constants, although raw strings may not span newlines. - - The keyword nil, representing an untyped Go nil. - - The character '.' (period): - . - The result is the value of dot. - - A variable name, which is a (possibly empty) alphanumeric string - preceded by a dollar sign, such as - $piOver2 - or - $ - The result is the value of the variable. - Variables are described below. - - The name of a field of the data, which must be a struct, preceded - by a period, such as - .Field - The result is the value of the field. Field invocations may be - chained: - .Field1.Field2 - Fields can also be evaluated on variables, including chaining: - $x.Field1.Field2 - - The name of a key of the data, which must be a map, preceded - by a period, such as - .Key - The result is the map element value indexed by the key. - Key invocations may be chained and combined with fields to any - depth: - .Field1.Key1.Field2.Key2 - Although the key must be an alphanumeric identifier, unlike with - field names they do not need to start with an upper case letter. - Keys can also be evaluated on variables, including chaining: - $x.key1.key2 - - The name of a niladic method of the data, preceded by a period, - such as - .Method - The result is the value of invoking the method with dot as the - receiver, dot.Method(). Such a method must have one return value (of - any type) or two return values, the second of which is an error. - If it has two and the returned error is non-nil, execution terminates - and an error is returned to the caller as the value of Execute. - Method invocations may be chained and combined with fields and keys - to any depth: - .Field1.Key1.Method1.Field2.Key2.Method2 - Methods can also be evaluated on variables, including chaining: - $x.Method1.Field - - The name of a niladic function, such as - fun - The result is the value of invoking the function, fun(). The return - types and values behave as in methods. Functions and function - names are described below. - - A parenthesized instance of one the above, for grouping. The result - may be accessed by a field or map key invocation. - print (.F1 arg1) (.F2 arg2) - (.StructValuedMethod "arg").Field - -Arguments may evaluate to any type; if they are pointers the implementation -automatically indirects to the base type when required. -If an evaluation yields a function value, such as a function-valued -field of a struct, the function is not invoked automatically, but it -can be used as a truth value for an if action and the like. To invoke -it, use the call function, defined below. - -A pipeline is a possibly chained sequence of "commands". A command is a simple -value (argument) or a function or method call, possibly with multiple arguments: - - Argument - The result is the value of evaluating the argument. - .Method [Argument...] - The method can be alone or the last element of a chain but, - unlike methods in the middle of a chain, it can take arguments. - The result is the value of calling the method with the - arguments: - dot.Method(Argument1, etc.) - functionName [Argument...] - The result is the value of calling the function associated - with the name: - function(Argument1, etc.) - Functions and function names are described below. - -Pipelines - -A pipeline may be "chained" by separating a sequence of commands with pipeline -characters '|'. In a chained pipeline, the result of the each command is -passed as the last argument of the following command. The output of the final -command in the pipeline is the value of the pipeline. - -The output of a command will be either one value or two values, the second of -which has type error. If that second value is present and evaluates to -non-nil, execution terminates and the error is returned to the caller of -Execute. - -Variables - -A pipeline inside an action may initialize a variable to capture the result. -The initialization has syntax - - $variable := pipeline - -where $variable is the name of the variable. An action that declares a -variable produces no output. - -If a "range" action initializes a variable, the variable is set to the -successive elements of the iteration. Also, a "range" may declare two -variables, separated by a comma: - - range $index, $element := pipeline - -in which case $index and $element are set to the successive values of the -array/slice index or map key and element, respectively. Note that if there is -only one variable, it is assigned the element; this is opposite to the -convention in Go range clauses. - -A variable's scope extends to the "end" action of the control structure ("if", -"with", or "range") in which it is declared, or to the end of the template if -there is no such control structure. A template invocation does not inherit -variables from the point of its invocation. - -When execution begins, $ is set to the data argument passed to Execute, that is, -to the starting value of dot. - -Examples - -Here are some example one-line templates demonstrating pipelines and variables. -All produce the quoted word "output": - - {{"\"output\""}} - A string constant. - {{`"output"`}} - A raw string constant. - {{printf "%q" "output"}} - A function call. - {{"output" | printf "%q"}} - A function call whose final argument comes from the previous - command. - {{printf "%q" (print "out" "put")}} - A parenthesized argument. - {{"put" | printf "%s%s" "out" | printf "%q"}} - A more elaborate call. - {{"output" | printf "%s" | printf "%q"}} - A longer chain. - {{with "output"}}{{printf "%q" .}}{{end}} - A with action using dot. - {{with $x := "output" | printf "%q"}}{{$x}}{{end}} - A with action that creates and uses a variable. - {{with $x := "output"}}{{printf "%q" $x}}{{end}} - A with action that uses the variable in another action. - {{with $x := "output"}}{{$x | printf "%q"}}{{end}} - The same, but pipelined. - -Functions - -During execution functions are found in two function maps: first in the -template, then in the global function map. By default, no functions are defined -in the template but the Funcs method can be used to add them. - -Predefined global functions are named as follows. - - and - Returns the boolean AND of its arguments by returning the - first empty argument or the last argument, that is, - "and x y" behaves as "if x then y else x". All the - arguments are evaluated. - call - Returns the result of calling the first argument, which - must be a function, with the remaining arguments as parameters. - Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where - Y is a func-valued field, map entry, or the like. - The first argument must be the result of an evaluation - that yields a value of function type (as distinct from - a predefined function such as print). The function must - return either one or two result values, the second of which - is of type error. If the arguments don't match the function - or the returned error value is non-nil, execution stops. - html - Returns the escaped HTML equivalent of the textual - representation of its arguments. - index - Returns the result of indexing its first argument by the - following arguments. Thus "index x 1 2 3" is, in Go syntax, - x[1][2][3]. Each indexed item must be a map, slice, or array. - js - Returns the escaped JavaScript equivalent of the textual - representation of its arguments. - len - Returns the integer length of its argument. - not - Returns the boolean negation of its single argument. - or - Returns the boolean OR of its arguments by returning the - first non-empty argument or the last argument, that is, - "or x y" behaves as "if x then x else y". All the - arguments are evaluated. - print - An alias for fmt.Sprint - printf - An alias for fmt.Sprintf - println - An alias for fmt.Sprintln - urlquery - Returns the escaped value of the textual representation of - its arguments in a form suitable for embedding in a URL query. - -The boolean functions take any zero value to be false and a non-zero -value to be true. - -There is also a set of binary comparison operators defined as -functions: - - eq - Returns the boolean truth of arg1 == arg2 - ne - Returns the boolean truth of arg1 != arg2 - lt - Returns the boolean truth of arg1 < arg2 - le - Returns the boolean truth of arg1 <= arg2 - gt - Returns the boolean truth of arg1 > arg2 - ge - Returns the boolean truth of arg1 >= arg2 - -For simpler multi-way equality tests, eq (only) accepts two or more -arguments and compares the second and subsequent to the first, -returning in effect - - arg1==arg2 || arg1==arg3 || arg1==arg4 ... - -(Unlike with || in Go, however, eq is a function call and all the -arguments will be evaluated.) - -The comparison functions work on basic types only (or named basic -types, such as "type Celsius float32"). They implement the Go rules -for comparison of values, except that size and exact type are -ignored, so any integer value, signed or unsigned, may be compared -with any other integer value. (The arithmetic value is compared, -not the bit pattern, so all negative integers are less than all -unsigned integers.) However, as usual, one may not compare an int -with a float32 and so on. - -Associated templates - -Each template is named by a string specified when it is created. Also, each -template is associated with zero or more other templates that it may invoke by -name; such associations are transitive and form a name space of templates. - -A template may use a template invocation to instantiate another associated -template; see the explanation of the "template" action above. The name must be -that of a template associated with the template that contains the invocation. - -Nested template definitions - -When parsing a template, another template may be defined and associated with the -template being parsed. Template definitions must appear at the top level of the -template, much like global variables in a Go program. - -The syntax of such definitions is to surround each template declaration with a -"define" and "end" action. - -The define action names the template being created by providing a string -constant. Here is a simple example: - - `{{define "T1"}}ONE{{end}} - {{define "T2"}}TWO{{end}} - {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} - {{template "T3"}}` - -This defines two templates, T1 and T2, and a third T3 that invokes the other two -when it is executed. Finally it invokes T3. If executed this template will -produce the text - - ONE TWO - -By construction, a template may reside in only one association. If it's -necessary to have a template addressable from multiple associations, the -template definition must be parsed multiple times to create distinct *Template -values, or must be copied with the Clone or AddParseTree method. - -Parse may be called multiple times to assemble the various associated templates; -see the ParseFiles and ParseGlob functions and methods for simple ways to parse -related templates stored in files. - -A template may be executed directly or through ExecuteTemplate, which executes -an associated template identified by name. To invoke our example above, we -might write, - - err := tmpl.Execute(os.Stdout, "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -or to invoke a particular template explicitly by name, - - err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -*/ -package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go deleted file mode 100644 index c3078e5d0c..0000000000 --- a/vendor/github.com/alecthomas/template/exec.go +++ /dev/null @@ -1,845 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "fmt" - "io" - "reflect" - "runtime" - "sort" - "strings" - - "github.com/alecthomas/template/parse" -) - -// state represents the state of an execution. It's not part of the -// template so that multiple executions of the same template -// can execute in parallel. -type state struct { - tmpl *Template - wr io.Writer - node parse.Node // current node, for errors - vars []variable // push-down stack of variable values. -} - -// variable holds the dynamic value of a variable such as $, $x etc. -type variable struct { - name string - value reflect.Value -} - -// push pushes a new variable on the stack. -func (s *state) push(name string, value reflect.Value) { - s.vars = append(s.vars, variable{name, value}) -} - -// mark returns the length of the variable stack. -func (s *state) mark() int { - return len(s.vars) -} - -// pop pops the variable stack up to the mark. -func (s *state) pop(mark int) { - s.vars = s.vars[0:mark] -} - -// setVar overwrites the top-nth variable on the stack. Used by range iterations. -func (s *state) setVar(n int, value reflect.Value) { - s.vars[len(s.vars)-n].value = value -} - -// varValue returns the value of the named variable. -func (s *state) varValue(name string) reflect.Value { - for i := s.mark() - 1; i >= 0; i-- { - if s.vars[i].name == name { - return s.vars[i].value - } - } - s.errorf("undefined variable: %s", name) - return zero -} - -var zero reflect.Value - -// at marks the state to be on node n, for error reporting. -func (s *state) at(node parse.Node) { - s.node = node -} - -// doublePercent returns the string with %'s replaced by %%, if necessary, -// so it can be used safely inside a Printf format string. -func doublePercent(str string) string { - if strings.Contains(str, "%") { - str = strings.Replace(str, "%", "%%", -1) - } - return str -} - -// errorf formats the error and terminates processing. -func (s *state) errorf(format string, args ...interface{}) { - name := doublePercent(s.tmpl.Name()) - if s.node == nil { - format = fmt.Sprintf("template: %s: %s", name, format) - } else { - location, context := s.tmpl.ErrorContext(s.node) - format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) - } - panic(fmt.Errorf(format, args...)) -} - -// errRecover is the handler that turns panics into returns from the top -// level of Parse. -func errRecover(errp *error) { - e := recover() - if e != nil { - switch err := e.(type) { - case runtime.Error: - panic(e) - case error: - *errp = err - default: - panic(e) - } - } -} - -// ExecuteTemplate applies the template associated with t that has the given name -// to the specified data object and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { - tmpl := t.tmpl[name] - if tmpl == nil { - return fmt.Errorf("template: no template %q associated with template %q", name, t.name) - } - return tmpl.Execute(wr, data) -} - -// Execute applies a parsed template to the specified data object, -// and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { - defer errRecover(&err) - value := reflect.ValueOf(data) - state := &state{ - tmpl: t, - wr: wr, - vars: []variable{{"$", value}}, - } - t.init() - if t.Tree == nil || t.Root == nil { - var b bytes.Buffer - for name, tmpl := range t.tmpl { - if tmpl.Tree == nil || tmpl.Root == nil { - continue - } - if b.Len() > 0 { - b.WriteString(", ") - } - fmt.Fprintf(&b, "%q", name) - } - var s string - if b.Len() > 0 { - s = "; defined templates are: " + b.String() - } - state.errorf("%q is an incomplete or empty template%s", t.Name(), s) - } - state.walk(value, t.Root) - return -} - -// Walk functions step through the major pieces of the template structure, -// generating output as they go. -func (s *state) walk(dot reflect.Value, node parse.Node) { - s.at(node) - switch node := node.(type) { - case *parse.ActionNode: - // Do not pop variables so they persist until next end. - // Also, if the action declares variables, don't print the result. - val := s.evalPipeline(dot, node.Pipe) - if len(node.Pipe.Decl) == 0 { - s.printValue(node, val) - } - case *parse.IfNode: - s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) - case *parse.ListNode: - for _, node := range node.Nodes { - s.walk(dot, node) - } - case *parse.RangeNode: - s.walkRange(dot, node) - case *parse.TemplateNode: - s.walkTemplate(dot, node) - case *parse.TextNode: - if _, err := s.wr.Write(node.Text); err != nil { - s.errorf("%s", err) - } - case *parse.WithNode: - s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) - default: - s.errorf("unknown node: %s", node) - } -} - -// walkIfOrWith walks an 'if' or 'with' node. The two control structures -// are identical in behavior except that 'with' sets dot. -func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { - defer s.pop(s.mark()) - val := s.evalPipeline(dot, pipe) - truth, ok := isTrue(val) - if !ok { - s.errorf("if/with can't use %v", val) - } - if truth { - if typ == parse.NodeWith { - s.walk(val, list) - } else { - s.walk(dot, list) - } - } else if elseList != nil { - s.walk(dot, elseList) - } -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} - -func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { - s.at(r) - defer s.pop(s.mark()) - val, _ := indirect(s.evalPipeline(dot, r.Pipe)) - // mark top of stack before any variables in the body are pushed. - mark := s.mark() - oneIteration := func(index, elem reflect.Value) { - // Set top var (lexically the second if there are two) to the element. - if len(r.Pipe.Decl) > 0 { - s.setVar(1, elem) - } - // Set next var (lexically the first if there are two) to the index. - if len(r.Pipe.Decl) > 1 { - s.setVar(2, index) - } - s.walk(elem, r.List) - s.pop(mark) - } - switch val.Kind() { - case reflect.Array, reflect.Slice: - if val.Len() == 0 { - break - } - for i := 0; i < val.Len(); i++ { - oneIteration(reflect.ValueOf(i), val.Index(i)) - } - return - case reflect.Map: - if val.Len() == 0 { - break - } - for _, key := range sortKeys(val.MapKeys()) { - oneIteration(key, val.MapIndex(key)) - } - return - case reflect.Chan: - if val.IsNil() { - break - } - i := 0 - for ; ; i++ { - elem, ok := val.Recv() - if !ok { - break - } - oneIteration(reflect.ValueOf(i), elem) - } - if i == 0 { - break - } - return - case reflect.Invalid: - break // An invalid value is likely a nil map, etc. and acts like an empty map. - default: - s.errorf("range can't iterate over %v", val) - } - if r.ElseList != nil { - s.walk(dot, r.ElseList) - } -} - -func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { - s.at(t) - tmpl := s.tmpl.tmpl[t.Name] - if tmpl == nil { - s.errorf("template %q not defined", t.Name) - } - // Variables declared by the pipeline persist. - dot = s.evalPipeline(dot, t.Pipe) - newState := *s - newState.tmpl = tmpl - // No dynamic scoping: template invocations inherit no variables. - newState.vars = []variable{{"$", dot}} - newState.walk(dot, tmpl.Root) -} - -// Eval functions evaluate pipelines, commands, and their elements and extract -// values from the data structure by examining fields, calling methods, and so on. -// The printing of those values happens only through walk functions. - -// evalPipeline returns the value acquired by evaluating a pipeline. If the -// pipeline has a variable declaration, the variable will be pushed on the -// stack. Callers should therefore pop the stack after they are finished -// executing commands depending on the pipeline value. -func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { - if pipe == nil { - return - } - s.at(pipe) - for _, cmd := range pipe.Cmds { - value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. - // If the object has type interface{}, dig down one level to the thing inside. - if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { - value = reflect.ValueOf(value.Interface()) // lovely! - } - } - for _, variable := range pipe.Decl { - s.push(variable.Ident[0], value) - } - return value -} - -func (s *state) notAFunction(args []parse.Node, final reflect.Value) { - if len(args) > 1 || final.IsValid() { - s.errorf("can't give argument to non-function %s", args[0]) - } -} - -func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { - firstWord := cmd.Args[0] - switch n := firstWord.(type) { - case *parse.FieldNode: - return s.evalFieldNode(dot, n, cmd.Args, final) - case *parse.ChainNode: - return s.evalChainNode(dot, n, cmd.Args, final) - case *parse.IdentifierNode: - // Must be a function. - return s.evalFunction(dot, n, cmd, cmd.Args, final) - case *parse.PipeNode: - // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. - return s.evalPipeline(dot, n) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, cmd.Args, final) - } - s.at(firstWord) - s.notAFunction(cmd.Args, final) - switch word := firstWord.(type) { - case *parse.BoolNode: - return reflect.ValueOf(word.True) - case *parse.DotNode: - return dot - case *parse.NilNode: - s.errorf("nil is not a command") - case *parse.NumberNode: - return s.idealConstant(word) - case *parse.StringNode: - return reflect.ValueOf(word.Text) - } - s.errorf("can't evaluate command %q", firstWord) - panic("not reached") -} - -// idealConstant is called to return the value of a number in a context where -// we don't know the type. In that case, the syntax of the number tells us -// its type, and we use Go rules to resolve. Note there is no such thing as -// a uint ideal constant in this situation - the value must be of int type. -func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { - // These are ideal constants but we don't know the type - // and we have no context. (If it was a method argument, - // we'd know what we need.) The syntax guides us to some extent. - s.at(constant) - switch { - case constant.IsComplex: - return reflect.ValueOf(constant.Complex128) // incontrovertible. - case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: - return reflect.ValueOf(constant.Float64) - case constant.IsInt: - n := int(constant.Int64) - if int64(n) != constant.Int64 { - s.errorf("%s overflows int", constant.Text) - } - return reflect.ValueOf(n) - case constant.IsUint: - s.errorf("%s overflows int", constant.Text) - } - return zero -} - -func isHexConstant(s string) bool { - return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') -} - -func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(field) - return s.evalFieldChain(dot, dot, field, field.Ident, args, final) -} - -func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(chain) - // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. - pipe := s.evalArg(dot, nil, chain.Node) - if len(chain.Field) == 0 { - s.errorf("internal error: no fields in evalChainNode") - } - return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) -} - -func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { - // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. - s.at(variable) - value := s.varValue(variable.Ident[0]) - if len(variable.Ident) == 1 { - s.notAFunction(args, final) - return value - } - return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) -} - -// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. -// dot is the environment in which to evaluate arguments, while -// receiver is the value being walked along the chain. -func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { - n := len(ident) - for i := 0; i < n-1; i++ { - receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) - } - // Now if it's a method, it gets the arguments. - return s.evalField(dot, ident[n-1], node, args, final, receiver) -} - -func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { - s.at(node) - name := node.Ident - function, ok := findFunction(name, s.tmpl) - if !ok { - s.errorf("%q is not a defined function", name) - } - return s.evalCall(dot, function, cmd, name, args, final) -} - -// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). -// The 'final' argument represents the return value from the preceding -// value of the pipeline, if any. -func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { - if !receiver.IsValid() { - return zero - } - typ := receiver.Type() - receiver, _ = indirect(receiver) - // Unless it's an interface, need to get to a value of type *T to guarantee - // we see all methods of T and *T. - ptr := receiver - if ptr.Kind() != reflect.Interface && ptr.CanAddr() { - ptr = ptr.Addr() - } - if method := ptr.MethodByName(fieldName); method.IsValid() { - return s.evalCall(dot, method, node, fieldName, args, final) - } - hasArgs := len(args) > 1 || final.IsValid() - // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. - receiver, isNil := indirect(receiver) - if isNil { - s.errorf("nil pointer evaluating %s.%s", typ, fieldName) - } - switch receiver.Kind() { - case reflect.Struct: - tField, ok := receiver.Type().FieldByName(fieldName) - if ok { - field := receiver.FieldByIndex(tField.Index) - if tField.PkgPath != "" { // field is unexported - s.errorf("%s is an unexported field of struct type %s", fieldName, typ) - } - // If it's a function, we must call it. - if hasArgs { - s.errorf("%s has arguments but cannot be invoked as function", fieldName) - } - return field - } - s.errorf("%s is not a field of struct type %s", fieldName, typ) - case reflect.Map: - // If it's a map, attempt to use the field name as a key. - nameVal := reflect.ValueOf(fieldName) - if nameVal.Type().AssignableTo(receiver.Type().Key()) { - if hasArgs { - s.errorf("%s is not a method but has arguments", fieldName) - } - return receiver.MapIndex(nameVal) - } - } - s.errorf("can't evaluate field %s in type %s", fieldName, typ) - panic("not reached") -} - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so -// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] -// as the function itself. -func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { - if args != nil { - args = args[1:] // Zeroth arg is function name/node; not passed to function. - } - typ := fun.Type() - numIn := len(args) - if final.IsValid() { - numIn++ - } - numFixed := len(args) - if typ.IsVariadic() { - numFixed = typ.NumIn() - 1 // last arg is the variadic one. - if numIn < numFixed { - s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) - } - } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { - s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) - } - if !goodFunc(typ) { - // TODO: This could still be a confusing error; maybe goodFunc should provide info. - s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) - } - // Build the arg list. - argv := make([]reflect.Value, numIn) - // Args must be evaluated. Fixed args first. - i := 0 - for ; i < numFixed && i < len(args); i++ { - argv[i] = s.evalArg(dot, typ.In(i), args[i]) - } - // Now the ... args. - if typ.IsVariadic() { - argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. - for ; i < len(args); i++ { - argv[i] = s.evalArg(dot, argType, args[i]) - } - } - // Add final value if necessary. - if final.IsValid() { - t := typ.In(typ.NumIn() - 1) - if typ.IsVariadic() { - t = t.Elem() - } - argv[i] = s.validateType(final, t) - } - result := fun.Call(argv) - // If we have an error that is not nil, stop execution and return that error to the caller. - if len(result) == 2 && !result[1].IsNil() { - s.at(node) - s.errorf("error calling %s: %s", name, result[1].Interface().(error)) - } - return result[0] -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// validateType guarantees that the value is valid and assignable to the type. -func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { - if !value.IsValid() { - if typ == nil || canBeNil(typ) { - // An untyped nil interface{}. Accept as a proper nil value. - return reflect.Zero(typ) - } - s.errorf("invalid value; expected %s", typ) - } - if typ != nil && !value.Type().AssignableTo(typ) { - if value.Kind() == reflect.Interface && !value.IsNil() { - value = value.Elem() - if value.Type().AssignableTo(typ) { - return value - } - // fallthrough - } - // Does one dereference or indirection work? We could do more, as we - // do with method receivers, but that gets messy and method receivers - // are much more constrained, so it makes more sense there than here. - // Besides, one is almost always all you need. - switch { - case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): - value = value.Elem() - if !value.IsValid() { - s.errorf("dereference of nil pointer of type %s", typ) - } - case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): - value = value.Addr() - default: - s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) - } - } - return value -} - -func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - switch arg := n.(type) { - case *parse.DotNode: - return s.validateType(dot, typ) - case *parse.NilNode: - if canBeNil(typ) { - return reflect.Zero(typ) - } - s.errorf("cannot assign nil to %s", typ) - case *parse.FieldNode: - return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) - case *parse.VariableNode: - return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) - case *parse.PipeNode: - return s.validateType(s.evalPipeline(dot, arg), typ) - case *parse.IdentifierNode: - return s.evalFunction(dot, arg, arg, nil, zero) - case *parse.ChainNode: - return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) - } - switch typ.Kind() { - case reflect.Bool: - return s.evalBool(typ, n) - case reflect.Complex64, reflect.Complex128: - return s.evalComplex(typ, n) - case reflect.Float32, reflect.Float64: - return s.evalFloat(typ, n) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return s.evalInteger(typ, n) - case reflect.Interface: - if typ.NumMethod() == 0 { - return s.evalEmptyInterface(dot, n) - } - case reflect.String: - return s.evalString(typ, n) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return s.evalUnsignedInteger(typ, n) - } - s.errorf("can't handle %s for arg of type %s", n, typ) - panic("not reached") -} - -func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.BoolNode); ok { - value := reflect.New(typ).Elem() - value.SetBool(n.True) - return value - } - s.errorf("expected bool; found %s", n) - panic("not reached") -} - -func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.StringNode); ok { - value := reflect.New(typ).Elem() - value.SetString(n.Text) - return value - } - s.errorf("expected string; found %s", n) - panic("not reached") -} - -func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsInt { - value := reflect.New(typ).Elem() - value.SetInt(n.Int64) - return value - } - s.errorf("expected integer; found %s", n) - panic("not reached") -} - -func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsUint { - value := reflect.New(typ).Elem() - value.SetUint(n.Uint64) - return value - } - s.errorf("expected unsigned integer; found %s", n) - panic("not reached") -} - -func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { - value := reflect.New(typ).Elem() - value.SetFloat(n.Float64) - return value - } - s.errorf("expected float; found %s", n) - panic("not reached") -} - -func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { - if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { - value := reflect.New(typ).Elem() - value.SetComplex(n.Complex128) - return value - } - s.errorf("expected complex; found %s", n) - panic("not reached") -} - -func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { - s.at(n) - switch n := n.(type) { - case *parse.BoolNode: - return reflect.ValueOf(n.True) - case *parse.DotNode: - return dot - case *parse.FieldNode: - return s.evalFieldNode(dot, n, nil, zero) - case *parse.IdentifierNode: - return s.evalFunction(dot, n, n, nil, zero) - case *parse.NilNode: - // NilNode is handled in evalArg, the only place that calls here. - s.errorf("evalEmptyInterface: nil (can't happen)") - case *parse.NumberNode: - return s.idealConstant(n) - case *parse.StringNode: - return reflect.ValueOf(n.Text) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, nil, zero) - case *parse.PipeNode: - return s.evalPipeline(dot, n) - } - s.errorf("can't handle assignment of %s to empty interface argument", n) - panic("not reached") -} - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printValue writes the textual representation of the value to the output of -// the template. -func (s *state) printValue(n parse.Node, v reflect.Value) { - s.at(n) - iface, ok := printableValue(v) - if !ok { - s.errorf("can't print %s of type %s", n, v.Type()) - } - fmt.Fprint(s.wr, iface) -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// Types to help sort the keys in a map for reproducible output. - -type rvs []reflect.Value - -func (x rvs) Len() int { return len(x) } -func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -type rvInts struct{ rvs } - -func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } - -type rvUints struct{ rvs } - -func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } - -type rvFloats struct{ rvs } - -func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } - -type rvStrings struct{ rvs } - -func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } - -// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. -func sortKeys(v []reflect.Value) []reflect.Value { - if len(v) <= 1 { - return v - } - switch v[0].Kind() { - case reflect.Float32, reflect.Float64: - sort.Sort(rvFloats{v}) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - sort.Sort(rvInts{v}) - case reflect.String: - sort.Sort(rvStrings{v}) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - sort.Sort(rvUints{v}) - } - return v -} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go deleted file mode 100644 index 39ee5ed68f..0000000000 --- a/vendor/github.com/alecthomas/template/funcs.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// addFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string, tmpl *Template) (reflect.Value, bool) { - if tmpl != nil && tmpl.common != nil { - if fn := tmpl.execFuncs[name]; fn.IsValid() { - return fn, true - } - } - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -} diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go deleted file mode 100644 index 3636fb54d6..0000000000 --- a/vendor/github.com/alecthomas/template/helper.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Helper functions to make constructing templates easier. - -package template - -import ( - "fmt" - "io/ioutil" - "path/filepath" -) - -// Functions and methods to parse templates. - -// Must is a helper that wraps a call to a function returning (*Template, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var t = template.Must(template.New("name").Parse("text")) -func Must(t *Template, err error) *Template { - if err != nil { - panic(err) - } - return t -} - -// ParseFiles creates a new Template and parses the template definitions from -// the named files. The returned template's name will have the (base) name and -// (parsed) contents of the first file. There must be at least one file. -// If an error occurs, parsing stops and the returned *Template is nil. -func ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(nil, filenames...) -} - -// ParseFiles parses the named files and associates the resulting templates with -// t. If an error occurs, parsing stops and the returned template is nil; -// otherwise it is t. There must be at least one file. -func (t *Template) ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(t, filenames...) -} - -// parseFiles is the helper for the method and function. If the argument -// template is nil, it is created from the first file. -func parseFiles(t *Template, filenames ...string) (*Template, error) { - if len(filenames) == 0 { - // Not really a problem, but be consistent. - return nil, fmt.Errorf("template: no files named in call to ParseFiles") - } - for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - s := string(b) - name := filepath.Base(filename) - // First template becomes return value if not already defined, - // and we use that one for subsequent New calls to associate - // all the templates together. Also, if this file has the same name - // as t, this file becomes the contents of t, so - // t, err := New(name).Funcs(xxx).ParseFiles(name) - // works. Otherwise we create a new template associated with t. - var tmpl *Template - if t == nil { - t = New(name) - } - if name == t.Name() { - tmpl = t - } else { - tmpl = t.New(name) - } - _, err = tmpl.Parse(s) - if err != nil { - return nil, err - } - } - return t, nil -} - -// ParseGlob creates a new Template and parses the template definitions from the -// files identified by the pattern, which must match at least one file. The -// returned template will have the (base) name and (parsed) contents of the -// first file matched by the pattern. ParseGlob is equivalent to calling -// ParseFiles with the list of files matched by the pattern. -func ParseGlob(pattern string) (*Template, error) { - return parseGlob(nil, pattern) -} - -// ParseGlob parses the template definitions in the files identified by the -// pattern and associates the resulting templates with t. The pattern is -// processed by filepath.Glob and must match at least one file. ParseGlob is -// equivalent to calling t.ParseFiles with the list of files matched by the -// pattern. -func (t *Template) ParseGlob(pattern string) (*Template, error) { - return parseGlob(t, pattern) -} - -// parseGlob is the implementation of the function and method ParseGlob. -func parseGlob(t *Template, pattern string) (*Template, error) { - filenames, err := filepath.Glob(pattern) - if err != nil { - return nil, err - } - if len(filenames) == 0 { - return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) - } - return parseFiles(t, filenames...) -} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go deleted file mode 100644 index 55f1c051e8..0000000000 --- a/vendor/github.com/alecthomas/template/parse/lex.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos Pos // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case i.typ > itemKeyword: - return fmt.Sprintf("<%s>", i.val) - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemBool // boolean constant - itemChar // printable ASCII character; grab bag for comma etc. - itemCharConstant // character constant - itemComplex // complex constant (1+2i); imaginary is just a number - itemColonEquals // colon-equals (':=') introducing a declaration - itemEOF - itemField // alphanumeric identifier starting with '.' - itemIdentifier // alphanumeric identifier not starting with '.' - itemLeftDelim // left action delimiter - itemLeftParen // '(' inside action - itemNumber // simple number, including imaginary - itemPipe // pipe symbol - itemRawString // raw quoted string (includes quotes) - itemRightDelim // right action delimiter - itemElideNewline // elide newline after right delim - itemRightParen // ')' inside action - itemSpace // run of spaces separating arguments - itemString // quoted string (includes quotes) - itemText // plain text - itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' - // Keywords appear after all the rest. - itemKeyword // used only to delimit the keywords - itemDot // the cursor, spelled '.' - itemDefine // define keyword - itemElse // else keyword - itemEnd // end keyword - itemIf // if keyword - itemNil // the untyped nil constant, easiest to treat as a keyword - itemRange // range keyword - itemTemplate // template keyword - itemWith // with keyword -) - -var key = map[string]itemType{ - ".": itemDot, - "define": itemDefine, - "else": itemElse, - "end": itemEnd, - "if": itemIf, - "range": itemRange, - "nil": itemNil, - "template": itemTemplate, - "with": itemWith, -} - -const eof = -1 - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - name string // the name of the input; used only for error reports - input string // the string being scanned - leftDelim string // start of action - rightDelim string // end of action - state stateFn // the next lexing function to enter - pos Pos // current position in the input - start Pos // start position of this item - width Pos // width of last rune read from input - lastPos Pos // position of most recent item returned by nextItem - items chan item // channel of scanned items - parenDepth int // nesting depth of ( ) exprs -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if int(l.pos) >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = Pos(w) - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos]} - l.start = l.pos -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.IndexRune(valid, l.next()) >= 0 { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.IndexRune(valid, l.next()) >= 0 { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - item := <-l.items - l.lastPos = item.pos - return item -} - -// lex creates a new scanner for the input string. -func lex(name, input, left, right string) *lexer { - if left == "" { - left = leftDelim - } - if right == "" { - right = rightDelim - } - l := &lexer{ - name: name, - input: input, - leftDelim: left, - rightDelim: right, - items: make(chan item), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexText; l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -const ( - leftDelim = "{{" - rightDelim = "}}" - leftComment = "/*" - rightComment = "*/" -) - -// lexText scans until an opening action delimiter, "{{". -func lexText(l *lexer) stateFn { - for { - if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { - if l.pos > l.start { - l.emit(itemText) - } - return lexLeftDelim - } - if l.next() == eof { - break - } - } - // Correctly reached EOF. - if l.pos > l.start { - l.emit(itemText) - } - l.emit(itemEOF) - return nil -} - -// lexLeftDelim scans the left delimiter, which is known to be present. -func lexLeftDelim(l *lexer) stateFn { - l.pos += Pos(len(l.leftDelim)) - if strings.HasPrefix(l.input[l.pos:], leftComment) { - return lexComment - } - l.emit(itemLeftDelim) - l.parenDepth = 0 - return lexInsideAction -} - -// lexComment scans a comment. The left comment marker is known to be present. -func lexComment(l *lexer) stateFn { - l.pos += Pos(len(leftComment)) - i := strings.Index(l.input[l.pos:], rightComment) - if i < 0 { - return l.errorf("unclosed comment") - } - l.pos += Pos(i + len(rightComment)) - if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - return l.errorf("comment ends before closing delimiter") - - } - l.pos += Pos(len(l.rightDelim)) - l.ignore() - return lexText -} - -// lexRightDelim scans the right delimiter, which is known to be present. -func lexRightDelim(l *lexer) stateFn { - l.pos += Pos(len(l.rightDelim)) - l.emit(itemRightDelim) - if l.peek() == '\\' { - l.pos++ - l.emit(itemElideNewline) - } - return lexText -} - -// lexInsideAction scans the elements inside action delimiters. -func lexInsideAction(l *lexer) stateFn { - // Either number, quoted string, or identifier. - // Spaces separate arguments; runs of spaces turn into itemSpace. - // Pipe symbols separate and are emitted. - if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - if l.parenDepth == 0 { - return lexRightDelim - } - return l.errorf("unclosed left paren") - } - switch r := l.next(); { - case r == eof || isEndOfLine(r): - return l.errorf("unclosed action") - case isSpace(r): - return lexSpace - case r == ':': - if l.next() != '=' { - return l.errorf("expected :=") - } - l.emit(itemColonEquals) - case r == '|': - l.emit(itemPipe) - case r == '"': - return lexQuote - case r == '`': - return lexRawQuote - case r == '$': - return lexVariable - case r == '\'': - return lexChar - case r == '.': - // special look-ahead for ".field" so we don't break l.backup(). - if l.pos < Pos(len(l.input)) { - r := l.input[l.pos] - if r < '0' || '9' < r { - return lexField - } - } - fallthrough // '.' can start a number. - case r == '+' || r == '-' || ('0' <= r && r <= '9'): - l.backup() - return lexNumber - case isAlphaNumeric(r): - l.backup() - return lexIdentifier - case r == '(': - l.emit(itemLeftParen) - l.parenDepth++ - return lexInsideAction - case r == ')': - l.emit(itemRightParen) - l.parenDepth-- - if l.parenDepth < 0 { - return l.errorf("unexpected right paren %#U", r) - } - return lexInsideAction - case r <= unicode.MaxASCII && unicode.IsPrint(r): - l.emit(itemChar) - return lexInsideAction - default: - return l.errorf("unrecognized character in action: %#U", r) - } - return lexInsideAction -} - -// lexSpace scans a run of space characters. -// One space has already been seen. -func lexSpace(l *lexer) stateFn { - for isSpace(l.peek()) { - l.next() - } - l.emit(itemSpace) - return lexInsideAction -} - -// lexIdentifier scans an alphanumeric. -func lexIdentifier(l *lexer) stateFn { -Loop: - for { - switch r := l.next(); { - case isAlphaNumeric(r): - // absorb. - default: - l.backup() - word := l.input[l.start:l.pos] - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - switch { - case key[word] > itemKeyword: - l.emit(key[word]) - case word[0] == '.': - l.emit(itemField) - case word == "true", word == "false": - l.emit(itemBool) - default: - l.emit(itemIdentifier) - } - break Loop - } - } - return lexInsideAction -} - -// lexField scans a field: .Alphanumeric. -// The . has been scanned. -func lexField(l *lexer) stateFn { - return lexFieldOrVariable(l, itemField) -} - -// lexVariable scans a Variable: $Alphanumeric. -// The $ has been scanned. -func lexVariable(l *lexer) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "$". - l.emit(itemVariable) - return lexInsideAction - } - return lexFieldOrVariable(l, itemVariable) -} - -// lexVariable scans a field or variable: [.$]Alphanumeric. -// The . or $ has been scanned. -func lexFieldOrVariable(l *lexer, typ itemType) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "." or "$". - if typ == itemVariable { - l.emit(itemVariable) - } else { - l.emit(itemDot) - } - return lexInsideAction - } - var r rune - for { - r = l.next() - if !isAlphaNumeric(r) { - l.backup() - break - } - } - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - l.emit(typ) - return lexInsideAction -} - -// atTerminator reports whether the input is at valid termination character to -// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases -// like "$x+2" not being acceptable without a space, in case we decide one -// day to implement arithmetic. -func (l *lexer) atTerminator() bool { - r := l.peek() - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '|', ':', ')', '(': - return true - } - // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will - // succeed but should fail) but only in extremely rare cases caused by willfully - // bad choice of delimiter. - if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { - return true - } - return false -} - -// lexChar scans a character constant. The initial quote is already -// scanned. Syntax checking is done by the parser. -func lexChar(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated character constant") - case '\'': - break Loop - } - } - l.emit(itemCharConstant) - return lexInsideAction -} - -// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This -// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" -// and "089" - but when it's wrong the input is invalid and the parser (via -// strconv) will notice. -func lexNumber(l *lexer) stateFn { - if !l.scanNumber() { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - if sign := l.peek(); sign == '+' || sign == '-' { - // Complex: 1+2i. No spaces, must end in 'i'. - if !l.scanNumber() || l.input[l.pos-1] != 'i' { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - l.emit(itemComplex) - } else { - l.emit(itemNumber) - } - return lexInsideAction -} - -func (l *lexer) scanNumber() bool { - // Optional leading sign. - l.accept("+-") - // Is it hex? - digits := "0123456789" - if l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" - } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") - } - // Is it imaginary? - l.accept("i") - // Next thing mustn't be alphanumeric. - if isAlphaNumeric(l.peek()) { - l.next() - return false - } - return true -} - -// lexQuote scans a quoted string. -func lexQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated quoted string") - case '"': - break Loop - } - } - l.emit(itemString) - return lexInsideAction -} - -// lexRawQuote scans a raw quoted string. -func lexRawQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case eof, '\n': - return l.errorf("unterminated raw quoted string") - case '`': - break Loop - } - } - l.emit(itemRawString) - return lexInsideAction -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go deleted file mode 100644 index 55c37f6dba..0000000000 --- a/vendor/github.com/alecthomas/template/parse/node.go +++ /dev/null @@ -1,834 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Parse nodes. - -package parse - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -var textFormat = "%s" // Changed to "%q" in tests for better error messages. - -// A Node is an element in the parse tree. The interface is trivial. -// The interface contains an unexported method so that only -// types local to this package can satisfy it. -type Node interface { - Type() NodeType - String() string - // Copy does a deep copy of the Node and all its components. - // To avoid type assertions, some XxxNodes also have specialized - // CopyXxx methods that return *XxxNode. - Copy() Node - Position() Pos // byte position of start of node in full original input string - // tree returns the containing *Tree. - // It is unexported so all implementations of Node are in this package. - tree() *Tree -} - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Pos represents a byte position in the original input text from which -// this template was parsed. -type Pos int - -func (p Pos) Position() Pos { - return p -} - -// Type returns itself and provides an easy default implementation -// for embedding in a Node. Embedded in all non-trivial Nodes. -func (t NodeType) Type() NodeType { - return t -} - -const ( - NodeText NodeType = iota // Plain text. - NodeAction // A non-control action such as a field evaluation. - NodeBool // A boolean constant. - NodeChain // A sequence of field accesses. - NodeCommand // An element of a pipeline. - NodeDot // The cursor, dot. - nodeElse // An else action. Not added to tree. - nodeEnd // An end action. Not added to tree. - NodeField // A field or method name. - NodeIdentifier // An identifier; always a function name. - NodeIf // An if action. - NodeList // A list of Nodes. - NodeNil // An untyped nil constant. - NodeNumber // A numerical constant. - NodePipe // A pipeline of commands. - NodeRange // A range action. - NodeString // A string constant. - NodeTemplate // A template invocation action. - NodeVariable // A $ variable. - NodeWith // A with action. -) - -// Nodes. - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Pos - tr *Tree - Nodes []Node // The element nodes in lexical order. -} - -func (t *Tree) newList(pos Pos) *ListNode { - return &ListNode{tr: t, NodeType: NodeList, Pos: pos} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) tree() *Tree { - return l.tr -} - -func (l *ListNode) String() string { - b := new(bytes.Buffer) - for _, n := range l.Nodes { - fmt.Fprint(b, n) - } - return b.String() -} - -func (l *ListNode) CopyList() *ListNode { - if l == nil { - return l - } - n := l.tr.newList(l.Pos) - for _, elem := range l.Nodes { - n.append(elem.Copy()) - } - return n -} - -func (l *ListNode) Copy() Node { - return l.CopyList() -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Pos - tr *Tree - Text []byte // The text; may span newlines. -} - -func (t *Tree) newText(pos Pos, text string) *TextNode { - return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} -} - -func (t *TextNode) String() string { - return fmt.Sprintf(textFormat, t.Text) -} - -func (t *TextNode) tree() *Tree { - return t.tr -} - -func (t *TextNode) Copy() Node { - return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} -} - -// PipeNode holds a pipeline with optional declaration -type PipeNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Decl []*VariableNode // Variable declarations in lexical order. - Cmds []*CommandNode // The commands in lexical order. -} - -func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { - return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} -} - -func (p *PipeNode) append(command *CommandNode) { - p.Cmds = append(p.Cmds, command) -} - -func (p *PipeNode) String() string { - s := "" - if len(p.Decl) > 0 { - for i, v := range p.Decl { - if i > 0 { - s += ", " - } - s += v.String() - } - s += " := " - } - for i, c := range p.Cmds { - if i > 0 { - s += " | " - } - s += c.String() - } - return s -} - -func (p *PipeNode) tree() *Tree { - return p.tr -} - -func (p *PipeNode) CopyPipe() *PipeNode { - if p == nil { - return p - } - var decl []*VariableNode - for _, d := range p.Decl { - decl = append(decl, d.Copy().(*VariableNode)) - } - n := p.tr.newPipeline(p.Pos, p.Line, decl) - for _, c := range p.Cmds { - n.append(c.Copy().(*CommandNode)) - } - return n -} - -func (p *PipeNode) Copy() Node { - return p.CopyPipe() -} - -// ActionNode holds an action (something bounded by delimiters). -// Control actions have their own nodes; ActionNode represents simple -// ones such as field evaluations and parenthesized pipelines. -type ActionNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline in the action. -} - -func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { - return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} -} - -func (a *ActionNode) String() string { - return fmt.Sprintf("{{%s}}", a.Pipe) - -} - -func (a *ActionNode) tree() *Tree { - return a.tr -} - -func (a *ActionNode) Copy() Node { - return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) - -} - -// CommandNode holds a command (a pipeline inside an evaluating action). -type CommandNode struct { - NodeType - Pos - tr *Tree - Args []Node // Arguments in lexical order: Identifier, field, or constant. -} - -func (t *Tree) newCommand(pos Pos) *CommandNode { - return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} -} - -func (c *CommandNode) append(arg Node) { - c.Args = append(c.Args, arg) -} - -func (c *CommandNode) String() string { - s := "" - for i, arg := range c.Args { - if i > 0 { - s += " " - } - if arg, ok := arg.(*PipeNode); ok { - s += "(" + arg.String() + ")" - continue - } - s += arg.String() - } - return s -} - -func (c *CommandNode) tree() *Tree { - return c.tr -} - -func (c *CommandNode) Copy() Node { - if c == nil { - return c - } - n := c.tr.newCommand(c.Pos) - for _, c := range c.Args { - n.append(c.Copy()) - } - return n -} - -// IdentifierNode holds an identifier. -type IdentifierNode struct { - NodeType - Pos - tr *Tree - Ident string // The identifier's name. -} - -// NewIdentifier returns a new IdentifierNode with the given identifier name. -func NewIdentifier(ident string) *IdentifierNode { - return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} -} - -// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { - i.Pos = pos - return i -} - -// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { - i.tr = t - return i -} - -func (i *IdentifierNode) String() string { - return i.Ident -} - -func (i *IdentifierNode) tree() *Tree { - return i.tr -} - -func (i *IdentifierNode) Copy() Node { - return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) -} - -// VariableNode holds a list of variable names, possibly with chained field -// accesses. The dollar sign is part of the (first) name. -type VariableNode struct { - NodeType - Pos - tr *Tree - Ident []string // Variable name and fields in lexical order. -} - -func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { - return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} -} - -func (v *VariableNode) String() string { - s := "" - for i, id := range v.Ident { - if i > 0 { - s += "." - } - s += id - } - return s -} - -func (v *VariableNode) tree() *Tree { - return v.tr -} - -func (v *VariableNode) Copy() Node { - return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} -} - -// DotNode holds the special identifier '.'. -type DotNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newDot(pos Pos) *DotNode { - return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} -} - -func (d *DotNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeDot -} - -func (d *DotNode) String() string { - return "." -} - -func (d *DotNode) tree() *Tree { - return d.tr -} - -func (d *DotNode) Copy() Node { - return d.tr.newDot(d.Pos) -} - -// NilNode holds the special identifier 'nil' representing an untyped nil constant. -type NilNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newNil(pos Pos) *NilNode { - return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} -} - -func (n *NilNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeNil -} - -func (n *NilNode) String() string { - return "nil" -} - -func (n *NilNode) tree() *Tree { - return n.tr -} - -func (n *NilNode) Copy() Node { - return n.tr.newNil(n.Pos) -} - -// FieldNode holds a field (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The period is dropped from each ident. -type FieldNode struct { - NodeType - Pos - tr *Tree - Ident []string // The identifiers in lexical order. -} - -func (t *Tree) newField(pos Pos, ident string) *FieldNode { - return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period -} - -func (f *FieldNode) String() string { - s := "" - for _, id := range f.Ident { - s += "." + id - } - return s -} - -func (f *FieldNode) tree() *Tree { - return f.tr -} - -func (f *FieldNode) Copy() Node { - return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} -} - -// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The periods are dropped from each ident. -type ChainNode struct { - NodeType - Pos - tr *Tree - Node Node - Field []string // The identifiers in lexical order. -} - -func (t *Tree) newChain(pos Pos, node Node) *ChainNode { - return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} -} - -// Add adds the named field (which should start with a period) to the end of the chain. -func (c *ChainNode) Add(field string) { - if len(field) == 0 || field[0] != '.' { - panic("no dot in field") - } - field = field[1:] // Remove leading dot. - if field == "" { - panic("empty field") - } - c.Field = append(c.Field, field) -} - -func (c *ChainNode) String() string { - s := c.Node.String() - if _, ok := c.Node.(*PipeNode); ok { - s = "(" + s + ")" - } - for _, field := range c.Field { - s += "." + field - } - return s -} - -func (c *ChainNode) tree() *Tree { - return c.tr -} - -func (c *ChainNode) Copy() Node { - return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} -} - -// BoolNode holds a boolean constant. -type BoolNode struct { - NodeType - Pos - tr *Tree - True bool // The value of the boolean constant. -} - -func (t *Tree) newBool(pos Pos, true bool) *BoolNode { - return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} -} - -func (b *BoolNode) String() string { - if b.True { - return "true" - } - return "false" -} - -func (b *BoolNode) tree() *Tree { - return b.tr -} - -func (b *BoolNode) Copy() Node { - return b.tr.newBool(b.Pos, b.True) -} - -// NumberNode holds a number: signed or unsigned integer, float, or complex. -// The value is parsed and stored under all the types that can represent the value. -// This simulates in a small amount of code the behavior of Go's ideal constants. -type NumberNode struct { - NodeType - Pos - tr *Tree - IsInt bool // Number has an integral value. - IsUint bool // Number has an unsigned integral value. - IsFloat bool // Number has a floating-point value. - IsComplex bool // Number is complex. - Int64 int64 // The signed integer value. - Uint64 uint64 // The unsigned integer value. - Float64 float64 // The floating-point value. - Complex128 complex128 // The complex value. - Text string // The original textual representation from the input. -} - -func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { - n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} - switch typ { - case itemCharConstant: - rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) - if err != nil { - return nil, err - } - if tail != "'" { - return nil, fmt.Errorf("malformed character constant: %s", text) - } - n.Int64 = int64(rune) - n.IsInt = true - n.Uint64 = uint64(rune) - n.IsUint = true - n.Float64 = float64(rune) // odd but those are the rules. - n.IsFloat = true - return n, nil - case itemComplex: - // fmt.Sscan can parse the pair, so let it do the work. - if _, err := fmt.Sscan(text, &n.Complex128); err != nil { - return nil, err - } - n.IsComplex = true - n.simplifyComplex() - return n, nil - } - // Imaginary constants can only be complex unless they are zero. - if len(text) > 0 && text[len(text)-1] == 'i' { - f, err := strconv.ParseFloat(text[:len(text)-1], 64) - if err == nil { - n.IsComplex = true - n.Complex128 = complex(0, f) - n.simplifyComplex() - return n, nil - } - } - // Do integer test first so we get 0x123 etc. - u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. - if err == nil { - n.IsUint = true - n.Uint64 = u - } - i, err := strconv.ParseInt(text, 0, 64) - if err == nil { - n.IsInt = true - n.Int64 = i - if i == 0 { - n.IsUint = true // in case of -0. - n.Uint64 = u - } - } - // If an integer extraction succeeded, promote the float. - if n.IsInt { - n.IsFloat = true - n.Float64 = float64(n.Int64) - } else if n.IsUint { - n.IsFloat = true - n.Float64 = float64(n.Uint64) - } else { - f, err := strconv.ParseFloat(text, 64) - if err == nil { - n.IsFloat = true - n.Float64 = f - // If a floating-point extraction succeeded, extract the int if needed. - if !n.IsInt && float64(int64(f)) == f { - n.IsInt = true - n.Int64 = int64(f) - } - if !n.IsUint && float64(uint64(f)) == f { - n.IsUint = true - n.Uint64 = uint64(f) - } - } - } - if !n.IsInt && !n.IsUint && !n.IsFloat { - return nil, fmt.Errorf("illegal number syntax: %q", text) - } - return n, nil -} - -// simplifyComplex pulls out any other types that are represented by the complex number. -// These all require that the imaginary part be zero. -func (n *NumberNode) simplifyComplex() { - n.IsFloat = imag(n.Complex128) == 0 - if n.IsFloat { - n.Float64 = real(n.Complex128) - n.IsInt = float64(int64(n.Float64)) == n.Float64 - if n.IsInt { - n.Int64 = int64(n.Float64) - } - n.IsUint = float64(uint64(n.Float64)) == n.Float64 - if n.IsUint { - n.Uint64 = uint64(n.Float64) - } - } -} - -func (n *NumberNode) String() string { - return n.Text -} - -func (n *NumberNode) tree() *Tree { - return n.tr -} - -func (n *NumberNode) Copy() Node { - nn := new(NumberNode) - *nn = *n // Easy, fast, correct. - return nn -} - -// StringNode holds a string constant. The value has been "unquoted". -type StringNode struct { - NodeType - Pos - tr *Tree - Quoted string // The original text of the string, with quotes. - Text string // The string, after quote processing. -} - -func (t *Tree) newString(pos Pos, orig, text string) *StringNode { - return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} -} - -func (s *StringNode) String() string { - return s.Quoted -} - -func (s *StringNode) tree() *Tree { - return s.tr -} - -func (s *StringNode) Copy() Node { - return s.tr.newString(s.Pos, s.Quoted, s.Text) -} - -// endNode represents an {{end}} action. -// It does not appear in the final parse tree. -type endNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newEnd(pos Pos) *endNode { - return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} -} - -func (e *endNode) String() string { - return "{{end}}" -} - -func (e *endNode) tree() *Tree { - return e.tr -} - -func (e *endNode) Copy() Node { - return e.tr.newEnd(e.Pos) -} - -// elseNode represents an {{else}} action. Does not appear in the final tree. -type elseNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) -} - -func (t *Tree) newElse(pos Pos, line int) *elseNode { - return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} -} - -func (e *elseNode) Type() NodeType { - return nodeElse -} - -func (e *elseNode) String() string { - return "{{else}}" -} - -func (e *elseNode) tree() *Tree { - return e.tr -} - -func (e *elseNode) Copy() Node { - return e.tr.newElse(e.Pos, e.Line) -} - -// BranchNode is the common representation of if, range, and with. -type BranchNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline to be evaluated. - List *ListNode // What to execute if the value is non-empty. - ElseList *ListNode // What to execute if the value is empty (nil if absent). -} - -func (b *BranchNode) String() string { - name := "" - switch b.NodeType { - case NodeIf: - name = "if" - case NodeRange: - name = "range" - case NodeWith: - name = "with" - default: - panic("unknown branch type") - } - if b.ElseList != nil { - return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) - } - return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) -} - -func (b *BranchNode) tree() *Tree { - return b.tr -} - -func (b *BranchNode) Copy() Node { - switch b.NodeType { - case NodeIf: - return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeRange: - return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeWith: - return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - default: - panic("unknown branch type") - } -} - -// IfNode represents an {{if}} action and its commands. -type IfNode struct { - BranchNode -} - -func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { - return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (i *IfNode) Copy() Node { - return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) -} - -// RangeNode represents a {{range}} action and its commands. -type RangeNode struct { - BranchNode -} - -func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { - return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (r *RangeNode) Copy() Node { - return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) -} - -// WithNode represents a {{with}} action and its commands. -type WithNode struct { - BranchNode -} - -func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { - return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (w *WithNode) Copy() Node { - return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) -} - -// TemplateNode represents a {{template}} action. -type TemplateNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Name string // The name of the template (unquoted). - Pipe *PipeNode // The command to evaluate as dot for the template. -} - -func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { - return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} -} - -func (t *TemplateNode) String() string { - if t.Pipe == nil { - return fmt.Sprintf("{{template %q}}", t.Name) - } - return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) -} - -func (t *TemplateNode) tree() *Tree { - return t.tr -} - -func (t *TemplateNode) Copy() Node { - return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) -} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go deleted file mode 100644 index 0d77ade871..0000000000 --- a/vendor/github.com/alecthomas/template/parse/parse.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package parse builds parse trees for templates as defined by text/template -// and html/template. Clients should use those packages to construct templates -// rather than this one, which provides shared internal data structures not -// intended for general use. -package parse - -import ( - "bytes" - "fmt" - "runtime" - "strconv" - "strings" -) - -// Tree is the representation of a single parsed template. -type Tree struct { - Name string // name of the template represented by the tree. - ParseName string // name of the top-level template during parsing, for error messages. - Root *ListNode // top-level root of the tree. - text string // text parsed to create the template (or its parent) - // Parsing only; cleared after parse. - funcs []map[string]interface{} - lex *lexer - token [3]item // three-token lookahead for parser. - peekCount int - vars []string // variables defined at the moment. -} - -// Copy returns a copy of the Tree. Any parsing state is discarded. -func (t *Tree) Copy() *Tree { - if t == nil { - return nil - } - return &Tree{ - Name: t.Name, - ParseName: t.ParseName, - Root: t.Root.CopyList(), - text: t.text, - } -} - -// Parse returns a map from template name to parse.Tree, created by parsing the -// templates described in the argument string. The top-level template will be -// given the specified name. If an error is encountered, parsing stops and an -// empty map is returned with the error. -func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { - treeSet = make(map[string]*Tree) - t := New(name) - t.text = text - _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) - return -} - -// next returns the next token. -func (t *Tree) next() item { - if t.peekCount > 0 { - t.peekCount-- - } else { - t.token[0] = t.lex.nextItem() - } - return t.token[t.peekCount] -} - -// backup backs the input stream up one token. -func (t *Tree) backup() { - t.peekCount++ -} - -// backup2 backs the input stream up two tokens. -// The zeroth token is already there. -func (t *Tree) backup2(t1 item) { - t.token[1] = t1 - t.peekCount = 2 -} - -// backup3 backs the input stream up three tokens -// The zeroth token is already there. -func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. - t.token[1] = t1 - t.token[2] = t2 - t.peekCount = 3 -} - -// peek returns but does not consume the next token. -func (t *Tree) peek() item { - if t.peekCount > 0 { - return t.token[t.peekCount-1] - } - t.peekCount = 1 - t.token[0] = t.lex.nextItem() - return t.token[0] -} - -// nextNonSpace returns the next non-space token. -func (t *Tree) nextNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - return token -} - -// peekNonSpace returns but does not consume the next non-space token. -func (t *Tree) peekNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - t.backup() - return token -} - -// Parsing. - -// New allocates a new parse tree with the given name. -func New(name string, funcs ...map[string]interface{}) *Tree { - return &Tree{ - Name: name, - funcs: funcs, - } -} - -// ErrorContext returns a textual representation of the location of the node in the input text. -// The receiver is only used when the node does not have a pointer to the tree inside, -// which can occur in old code. -func (t *Tree) ErrorContext(n Node) (location, context string) { - pos := int(n.Position()) - tree := n.tree() - if tree == nil { - tree = t - } - text := tree.text[:pos] - byteNum := strings.LastIndex(text, "\n") - if byteNum == -1 { - byteNum = pos // On first line. - } else { - byteNum++ // After the newline. - byteNum = pos - byteNum - } - lineNum := 1 + strings.Count(text, "\n") - context = n.String() - if len(context) > 20 { - context = fmt.Sprintf("%.20s...", context) - } - return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context -} - -// errorf formats the error and terminates processing. -func (t *Tree) errorf(format string, args ...interface{}) { - t.Root = nil - format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -// error terminates processing. -func (t *Tree) error(err error) { - t.errorf("%s", err) -} - -// expect consumes the next token and guarantees it has the required type. -func (t *Tree) expect(expected itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected { - t.unexpected(token, context) - } - return token -} - -// expectOneOf consumes the next token and guarantees it has one of the required types. -func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected1 && token.typ != expected2 { - t.unexpected(token, context) - } - return token -} - -// unexpected complains about the token and terminates processing. -func (t *Tree) unexpected(token item, context string) { - t.errorf("unexpected %s in %s", token, context) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (t *Tree) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - if t != nil { - t.stopParse() - } - *errp = e.(error) - } - return -} - -// startParse initializes the parser, using the lexer. -func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { - t.Root = nil - t.lex = lex - t.vars = []string{"$"} - t.funcs = funcs -} - -// stopParse terminates parsing. -func (t *Tree) stopParse() { - t.lex = nil - t.vars = nil - t.funcs = nil -} - -// Parse parses the template definition string to construct a representation of -// the template for execution. If either action delimiter string is empty, the -// default ("{{" or "}}") is used. Embedded template definitions are added to -// the treeSet map. -func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { - defer t.recover(&err) - t.ParseName = t.Name - t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) - t.text = text - t.parse(treeSet) - t.add(treeSet) - t.stopParse() - return t, nil -} - -// add adds tree to the treeSet. -func (t *Tree) add(treeSet map[string]*Tree) { - tree := treeSet[t.Name] - if tree == nil || IsEmptyTree(tree.Root) { - treeSet[t.Name] = t - return - } - if !IsEmptyTree(t.Root) { - t.errorf("template: multiple definition of template %q", t.Name) - } -} - -// IsEmptyTree reports whether this tree (node) is empty of everything but space. -func IsEmptyTree(n Node) bool { - switch n := n.(type) { - case nil: - return true - case *ActionNode: - case *IfNode: - case *ListNode: - for _, node := range n.Nodes { - if !IsEmptyTree(node) { - return false - } - } - return true - case *RangeNode: - case *TemplateNode: - case *TextNode: - return len(bytes.TrimSpace(n.Text)) == 0 - case *WithNode: - default: - panic("unknown node: " + n.String()) - } - return false -} - -// parse is the top-level parser for a template, essentially the same -// as itemList except it also parses {{define}} actions. -// It runs to EOF. -func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { - t.Root = t.newList(t.peek().pos) - for t.peek().typ != itemEOF { - if t.peek().typ == itemLeftDelim { - delim := t.next() - if t.nextNonSpace().typ == itemDefine { - newT := New("definition") // name will be updated once we know it. - newT.text = t.text - newT.ParseName = t.ParseName - newT.startParse(t.funcs, t.lex) - newT.parseDefinition(treeSet) - continue - } - t.backup2(delim) - } - n := t.textOrAction() - if n.Type() == nodeEnd { - t.errorf("unexpected %s", n) - } - t.Root.append(n) - } - return nil -} - -// parseDefinition parses a {{define}} ... {{end}} template definition and -// installs the definition in the treeSet map. The "define" keyword has already -// been scanned. -func (t *Tree) parseDefinition(treeSet map[string]*Tree) { - const context = "define clause" - name := t.expectOneOf(itemString, itemRawString, context) - var err error - t.Name, err = strconv.Unquote(name.val) - if err != nil { - t.error(err) - } - t.expect(itemRightDelim, context) - var end Node - t.Root, end = t.itemList() - if end.Type() != nodeEnd { - t.errorf("unexpected %s in %s", end, context) - } - t.add(treeSet) - t.stopParse() -} - -// itemList: -// textOrAction* -// Terminates at {{end}} or {{else}}, returned separately. -func (t *Tree) itemList() (list *ListNode, next Node) { - list = t.newList(t.peekNonSpace().pos) - for t.peekNonSpace().typ != itemEOF { - n := t.textOrAction() - switch n.Type() { - case nodeEnd, nodeElse: - return list, n - } - list.append(n) - } - t.errorf("unexpected EOF") - return -} - -// textOrAction: -// text | action -func (t *Tree) textOrAction() Node { - switch token := t.nextNonSpace(); token.typ { - case itemElideNewline: - return t.elideNewline() - case itemText: - return t.newText(token.pos, token.val) - case itemLeftDelim: - return t.action() - default: - t.unexpected(token, "input") - } - return nil -} - -// elideNewline: -// Remove newlines trailing rightDelim if \\ is present. -func (t *Tree) elideNewline() Node { - token := t.peek() - if token.typ != itemText { - t.unexpected(token, "input") - return nil - } - - t.next() - stripped := strings.TrimLeft(token.val, "\n\r") - diff := len(token.val) - len(stripped) - if diff > 0 { - // This is a bit nasty. We mutate the token in-place to remove - // preceding newlines. - token.pos += Pos(diff) - token.val = stripped - } - return t.newText(token.pos, token.val) -} - -// Action: -// control -// command ("|" command)* -// Left delim is past. Now get actions. -// First word could be a keyword such as range. -func (t *Tree) action() (n Node) { - switch token := t.nextNonSpace(); token.typ { - case itemElse: - return t.elseControl() - case itemEnd: - return t.endControl() - case itemIf: - return t.ifControl() - case itemRange: - return t.rangeControl() - case itemTemplate: - return t.templateControl() - case itemWith: - return t.withControl() - } - t.backup() - // Do not pop variables; they persist until "end". - return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) -} - -// Pipeline: -// declarations? command ('|' command)* -func (t *Tree) pipeline(context string) (pipe *PipeNode) { - var decl []*VariableNode - pos := t.peekNonSpace().pos - // Are there declarations? - for { - if v := t.peekNonSpace(); v.typ == itemVariable { - t.next() - // Since space is a token, we need 3-token look-ahead here in the worst case: - // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an - // argument variable rather than a declaration. So remember the token - // adjacent to the variable so we can push it back if necessary. - tokenAfterVariable := t.peek() - if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { - t.nextNonSpace() - variable := t.newVariable(v.pos, v.val) - decl = append(decl, variable) - t.vars = append(t.vars, v.val) - if next.typ == itemChar && next.val == "," { - if context == "range" && len(decl) < 2 { - continue - } - t.errorf("too many declarations in %s", context) - } - } else if tokenAfterVariable.typ == itemSpace { - t.backup3(v, tokenAfterVariable) - } else { - t.backup2(v) - } - } - break - } - pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) - for { - switch token := t.nextNonSpace(); token.typ { - case itemRightDelim, itemRightParen: - if len(pipe.Cmds) == 0 { - t.errorf("missing value for %s", context) - } - if token.typ == itemRightParen { - t.backup() - } - return - case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, - itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: - t.backup() - pipe.append(t.command()) - default: - t.unexpected(token, context) - } - } -} - -func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { - defer t.popVars(len(t.vars)) - line = t.lex.lineNumber() - pipe = t.pipeline(context) - var next Node - list, next = t.itemList() - switch next.Type() { - case nodeEnd: //done - case nodeElse: - if allowElseIf { - // Special case for "else if". If the "else" is followed immediately by an "if", - // the elseControl will have left the "if" token pending. Treat - // {{if a}}_{{else if b}}_{{end}} - // as - // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. - // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} - // is assumed. This technique works even for long if-else-if chains. - // TODO: Should we allow else-if in with and range? - if t.peek().typ == itemIf { - t.next() // Consume the "if" token. - elseList = t.newList(next.Position()) - elseList.append(t.ifControl()) - // Do not consume the next item - only one {{end}} required. - break - } - } - elseList, next = t.itemList() - if next.Type() != nodeEnd { - t.errorf("expected end; found %s", next) - } - } - return pipe.Position(), line, pipe, list, elseList -} - -// If: -// {{if pipeline}} itemList {{end}} -// {{if pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) ifControl() Node { - return t.newIf(t.parseControl(true, "if")) -} - -// Range: -// {{range pipeline}} itemList {{end}} -// {{range pipeline}} itemList {{else}} itemList {{end}} -// Range keyword is past. -func (t *Tree) rangeControl() Node { - return t.newRange(t.parseControl(false, "range")) -} - -// With: -// {{with pipeline}} itemList {{end}} -// {{with pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) withControl() Node { - return t.newWith(t.parseControl(false, "with")) -} - -// End: -// {{end}} -// End keyword is past. -func (t *Tree) endControl() Node { - return t.newEnd(t.expect(itemRightDelim, "end").pos) -} - -// Else: -// {{else}} -// Else keyword is past. -func (t *Tree) elseControl() Node { - // Special case for "else if". - peek := t.peekNonSpace() - if peek.typ == itemIf { - // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". - return t.newElse(peek.pos, t.lex.lineNumber()) - } - return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) -} - -// Template: -// {{template stringValue pipeline}} -// Template keyword is past. The name must be something that can evaluate -// to a string. -func (t *Tree) templateControl() Node { - var name string - token := t.nextNonSpace() - switch token.typ { - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - name = s - default: - t.unexpected(token, "template invocation") - } - var pipe *PipeNode - if t.nextNonSpace().typ != itemRightDelim { - t.backup() - // Do not pop variables; they persist until "end". - pipe = t.pipeline("template") - } - return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) -} - -// command: -// operand (space operand)* -// space-separated arguments up to a pipeline character or right delimiter. -// we consume the pipe character but leave the right delim to terminate the action. -func (t *Tree) command() *CommandNode { - cmd := t.newCommand(t.peekNonSpace().pos) - for { - t.peekNonSpace() // skip leading spaces. - operand := t.operand() - if operand != nil { - cmd.append(operand) - } - switch token := t.next(); token.typ { - case itemSpace: - continue - case itemError: - t.errorf("%s", token.val) - case itemRightDelim, itemRightParen: - t.backup() - case itemPipe: - default: - t.errorf("unexpected %s in operand; missing space?", token) - } - break - } - if len(cmd.Args) == 0 { - t.errorf("empty command") - } - return cmd -} - -// operand: -// term .Field* -// An operand is a space-separated component of a command, -// a term possibly followed by field accesses. -// A nil return means the next item is not an operand. -func (t *Tree) operand() Node { - node := t.term() - if node == nil { - return nil - } - if t.peek().typ == itemField { - chain := t.newChain(t.peek().pos, node) - for t.peek().typ == itemField { - chain.Add(t.next().val) - } - // Compatibility with original API: If the term is of type NodeField - // or NodeVariable, just put more fields on the original. - // Otherwise, keep the Chain node. - // TODO: Switch to Chains always when we can. - switch node.Type() { - case NodeField: - node = t.newField(chain.Position(), chain.String()) - case NodeVariable: - node = t.newVariable(chain.Position(), chain.String()) - default: - node = chain - } - } - return node -} - -// term: -// literal (number, string, nil, boolean) -// function (identifier) -// . -// .Field -// $ -// '(' pipeline ')' -// A term is a simple "expression". -// A nil return means the next item is not a term. -func (t *Tree) term() Node { - switch token := t.nextNonSpace(); token.typ { - case itemError: - t.errorf("%s", token.val) - case itemIdentifier: - if !t.hasFunction(token.val) { - t.errorf("function %q not defined", token.val) - } - return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) - case itemDot: - return t.newDot(token.pos) - case itemNil: - return t.newNil(token.pos) - case itemVariable: - return t.useVar(token.pos, token.val) - case itemField: - return t.newField(token.pos, token.val) - case itemBool: - return t.newBool(token.pos, token.val == "true") - case itemCharConstant, itemComplex, itemNumber: - number, err := t.newNumber(token.pos, token.val, token.typ) - if err != nil { - t.error(err) - } - return number - case itemLeftParen: - pipe := t.pipeline("parenthesized pipeline") - if token := t.next(); token.typ != itemRightParen { - t.errorf("unclosed right paren: unexpected %s", token) - } - return pipe - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - return t.newString(token.pos, token.val, s) - } - t.backup() - return nil -} - -// hasFunction reports if a function name exists in the Tree's maps. -func (t *Tree) hasFunction(name string) bool { - for _, funcMap := range t.funcs { - if funcMap == nil { - continue - } - if funcMap[name] != nil { - return true - } - } - return false -} - -// popVars trims the variable list to the specified length -func (t *Tree) popVars(n int) { - t.vars = t.vars[:n] -} - -// useVar returns a node for a variable reference. It errors if the -// variable is not defined. -func (t *Tree) useVar(pos Pos, name string) Node { - v := t.newVariable(pos, name) - for _, varName := range t.vars { - if varName == v.Ident[0] { - return v - } - } - t.errorf("undefined variable %q", v.Ident[0]) - return nil -} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go deleted file mode 100644 index 447ed2abae..0000000000 --- a/vendor/github.com/alecthomas/template/template.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "fmt" - "reflect" - - "github.com/alecthomas/template/parse" -) - -// common holds the information shared by related templates. -type common struct { - tmpl map[string]*Template - // We use two maps, one for parsing and one for execution. - // This separation makes the API cleaner since it doesn't - // expose reflection to the client. - parseFuncs FuncMap - execFuncs map[string]reflect.Value -} - -// Template is the representation of a parsed template. The *parse.Tree -// field is exported only for use by html/template and should be treated -// as unexported by all other clients. -type Template struct { - name string - *parse.Tree - *common - leftDelim string - rightDelim string -} - -// New allocates a new template with the given name. -func New(name string) *Template { - return &Template{ - name: name, - } -} - -// Name returns the name of the template. -func (t *Template) Name() string { - return t.name -} - -// New allocates a new template associated with the given one and with the same -// delimiters. The association, which is transitive, allows one template to -// invoke another with a {{template}} action. -func (t *Template) New(name string) *Template { - t.init() - return &Template{ - name: name, - common: t.common, - leftDelim: t.leftDelim, - rightDelim: t.rightDelim, - } -} - -func (t *Template) init() { - if t.common == nil { - t.common = new(common) - t.tmpl = make(map[string]*Template) - t.parseFuncs = make(FuncMap) - t.execFuncs = make(map[string]reflect.Value) - } -} - -// Clone returns a duplicate of the template, including all associated -// templates. The actual representation is not copied, but the name space of -// associated templates is, so further calls to Parse in the copy will add -// templates to the copy but not to the original. Clone can be used to prepare -// common templates and use them with variant definitions for other templates -// by adding the variants after the clone is made. -func (t *Template) Clone() (*Template, error) { - nt := t.copy(nil) - nt.init() - nt.tmpl[t.name] = nt - for k, v := range t.tmpl { - if k == t.name { // Already installed. - continue - } - // The associated templates share nt's common structure. - tmpl := v.copy(nt.common) - nt.tmpl[k] = tmpl - } - for k, v := range t.parseFuncs { - nt.parseFuncs[k] = v - } - for k, v := range t.execFuncs { - nt.execFuncs[k] = v - } - return nt, nil -} - -// copy returns a shallow copy of t, with common set to the argument. -func (t *Template) copy(c *common) *Template { - nt := New(t.name) - nt.Tree = t.Tree - nt.common = c - nt.leftDelim = t.leftDelim - nt.rightDelim = t.rightDelim - return nt -} - -// AddParseTree creates a new template with the name and parse tree -// and associates it with t. -func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { - if t.common != nil && t.tmpl[name] != nil { - return nil, fmt.Errorf("template: redefinition of template %q", name) - } - nt := t.New(name) - nt.Tree = tree - t.tmpl[name] = nt - return nt, nil -} - -// Templates returns a slice of the templates associated with t, including t -// itself. -func (t *Template) Templates() []*Template { - if t.common == nil { - return nil - } - // Return a slice so we don't expose the map. - m := make([]*Template, 0, len(t.tmpl)) - for _, v := range t.tmpl { - m = append(m, v) - } - return m -} - -// Delims sets the action delimiters to the specified strings, to be used in -// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template -// definitions will inherit the settings. An empty delimiter stands for the -// corresponding default: {{ or }}. -// The return value is the template, so calls can be chained. -func (t *Template) Delims(left, right string) *Template { - t.leftDelim = left - t.rightDelim = right - return t -} - -// Funcs adds the elements of the argument map to the template's function map. -// It panics if a value in the map is not a function with appropriate return -// type. However, it is legal to overwrite elements of the map. The return -// value is the template, so calls can be chained. -func (t *Template) Funcs(funcMap FuncMap) *Template { - t.init() - addValueFuncs(t.execFuncs, funcMap) - addFuncs(t.parseFuncs, funcMap) - return t -} - -// Lookup returns the template with the given name that is associated with t, -// or nil if there is no such template. -func (t *Template) Lookup(name string) *Template { - if t.common == nil { - return nil - } - return t.tmpl[name] -} - -// Parse parses a string into a template. Nested template definitions will be -// associated with the top-level template t. Parse may be called multiple times -// to parse definitions of templates to associate with t. It is an error if a -// resulting template is non-empty (contains content other than template -// definitions) and would replace a non-empty template with the same name. -// (In multiple calls to Parse with the same receiver template, only one call -// can contain text other than space, comments, and template definitions.) -func (t *Template) Parse(text string) (*Template, error) { - t.init() - trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) - if err != nil { - return nil, err - } - // Add the newly parsed trees, including the one for t, into our common structure. - for name, tree := range trees { - // If the name we parsed is the name of this template, overwrite this template. - // The associate method checks it's not a redefinition. - tmpl := t - if name != t.name { - tmpl = t.New(name) - } - // Even if t == tmpl, we need to install it in the common.tmpl map. - if replace, err := t.associate(tmpl, tree); err != nil { - return nil, err - } else if replace { - tmpl.Tree = tree - } - tmpl.leftDelim = t.leftDelim - tmpl.rightDelim = t.rightDelim - } - return t, nil -} - -// associate installs the new template into the group of templates associated -// with t. It is an error to reuse a name except to overwrite an empty -// template. The two are already known to share the common structure. -// The boolean return value reports wither to store this tree as t.Tree. -func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { - if new.common != t.common { - panic("internal error: associate not common") - } - name := new.name - if old := t.tmpl[name]; old != nil { - oldIsEmpty := parse.IsEmptyTree(old.Root) - newIsEmpty := parse.IsEmptyTree(tree.Root) - if newIsEmpty { - // Whether old is empty or not, new is empty; no reason to replace old. - return false, nil - } - if !oldIsEmpty { - return false, fmt.Errorf("template: redefinition of template %q", name) - } - } - t.tmpl[name] = new - return true, nil -} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING deleted file mode 100644 index 2993ec085d..0000000000 --- a/vendor/github.com/alecthomas/units/COPYING +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 Alec Thomas - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md deleted file mode 100644 index bee884e3c1..0000000000 --- a/vendor/github.com/alecthomas/units/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Units - Helpful unit multipliers and functions for Go - -The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. - -It allows for code like this: - -```go -n, err := ParseBase2Bytes("1KB") -// n == 1024 -n = units.Mebibyte * 512 -``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go deleted file mode 100644 index eaadeb8005..0000000000 --- a/vendor/github.com/alecthomas/units/bytes.go +++ /dev/null @@ -1,83 +0,0 @@ -package units - -// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, -// etc.). -type Base2Bytes int64 - -// Base-2 byte units. -const ( - Kibibyte Base2Bytes = 1024 - KiB = Kibibyte - Mebibyte = Kibibyte * 1024 - MiB = Mebibyte - Gibibyte = Mebibyte * 1024 - GiB = Gibibyte - Tebibyte = Gibibyte * 1024 - TiB = Tebibyte - Pebibyte = Tebibyte * 1024 - PiB = Pebibyte - Exbibyte = Pebibyte * 1024 - EiB = Exbibyte -) - -var ( - bytesUnitMap = MakeUnitMap("iB", "B", 1024) - oldBytesUnitMap = MakeUnitMap("B", "B", 1024) -) - -// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB -// and KiB are both 1024. -func ParseBase2Bytes(s string) (Base2Bytes, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, oldBytesUnitMap) - } - return Base2Bytes(n), err -} - -func (b Base2Bytes) String() string { - return ToString(int64(b), 1024, "iB", "B") -} - -var ( - metricBytesUnitMap = MakeUnitMap("B", "B", 1000) -) - -// MetricBytes are SI byte units (1000 bytes in a kilobyte). -type MetricBytes SI - -// SI base-10 byte units. -const ( - Kilobyte MetricBytes = 1000 - KB = Kilobyte - Megabyte = Kilobyte * 1000 - MB = Megabyte - Gigabyte = Megabyte * 1000 - GB = Gigabyte - Terabyte = Gigabyte * 1000 - TB = Terabyte - Petabyte = Terabyte * 1000 - PB = Petabyte - Exabyte = Petabyte * 1000 - EB = Exabyte -) - -// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. -func ParseMetricBytes(s string) (MetricBytes, error) { - n, err := ParseUnit(s, metricBytesUnitMap) - return MetricBytes(n), err -} - -func (m MetricBytes) String() string { - return ToString(int64(m), 1000, "B", "B") -} - -// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, -// respectively. That is, KiB represents 1024 and KB represents 1000. -func ParseStrictBytes(s string) (int64, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, metricBytesUnitMap) - } - return int64(n), err -} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go deleted file mode 100644 index 156ae38672..0000000000 --- a/vendor/github.com/alecthomas/units/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package units provides helpful unit multipliers and functions for Go. -// -// The goal of this package is to have functionality similar to the time [1] package. -// -// -// [1] http://golang.org/pkg/time/ -// -// It allows for code like this: -// -// n, err := ParseBase2Bytes("1KB") -// // n == 1024 -// n = units.Mebibyte * 512 -package units diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go deleted file mode 100644 index 8234a9d52c..0000000000 --- a/vendor/github.com/alecthomas/units/si.go +++ /dev/null @@ -1,26 +0,0 @@ -package units - -// SI units. -type SI int64 - -// SI unit multiples. -const ( - Kilo SI = 1000 - Mega = Kilo * 1000 - Giga = Mega * 1000 - Tera = Giga * 1000 - Peta = Tera * 1000 - Exa = Peta * 1000 -) - -func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { - return map[string]float64{ - shortSuffix: 1, - "K" + suffix: float64(scale), - "M" + suffix: float64(scale * scale), - "G" + suffix: float64(scale * scale * scale), - "T" + suffix: float64(scale * scale * scale * scale), - "P" + suffix: float64(scale * scale * scale * scale * scale), - "E" + suffix: float64(scale * scale * scale * scale * scale * scale), - } -} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go deleted file mode 100644 index 6527e92d16..0000000000 --- a/vendor/github.com/alecthomas/units/util.go +++ /dev/null @@ -1,138 +0,0 @@ -package units - -import ( - "errors" - "fmt" - "strings" -) - -var ( - siUnits = []string{"", "K", "M", "G", "T", "P", "E"} -) - -func ToString(n int64, scale int64, suffix, baseSuffix string) string { - mn := len(siUnits) - out := make([]string, mn) - for i, m := range siUnits { - if n%scale != 0 || i == 0 && n == 0 { - s := suffix - if i == 0 { - s = baseSuffix - } - out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) - } - n /= scale - if n == 0 { - break - } - } - return strings.Join(out, "") -} - -// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 -var errLeadingInt = errors.New("units: bad [0-9]*") // never printed - -// leadingInt consumes the leading [0-9]* from s. -func leadingInt(s string) (x int64, rem string, err error) { - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - break - } - if x >= (1<<63-10)/10 { - // overflow - return 0, "", errLeadingInt - } - x = x*10 + int64(c) - '0' - } - return x, s[i:], nil -} - -func ParseUnit(s string, unitMap map[string]float64) (int64, error) { - // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ - orig := s - f := float64(0) - neg := false - - // Consume [-+]? - if s != "" { - c := s[0] - if c == '-' || c == '+' { - neg = c == '-' - s = s[1:] - } - } - // Special case: if all that is left is "0", this is zero. - if s == "0" { - return 0, nil - } - if s == "" { - return 0, errors.New("units: invalid " + orig) - } - for s != "" { - g := float64(0) // this element of the sequence - - var x int64 - var err error - - // The next character must be [0-9.] - if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { - return 0, errors.New("units: invalid " + orig) - } - // Consume [0-9]* - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - g = float64(x) - pre := pl != len(s) // whether we consumed anything before a period - - // Consume (\.[0-9]*)? - post := false - if s != "" && s[0] == '.' { - s = s[1:] - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - scale := 1.0 - for n := pl - len(s); n > 0; n-- { - scale *= 10 - } - g += float64(x) / scale - post = pl != len(s) - } - if !pre && !post { - // no digits (e.g. ".s" or "-.s") - return 0, errors.New("units: invalid " + orig) - } - - // Consume unit. - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c == '.' || ('0' <= c && c <= '9') { - break - } - } - u := s[:i] - s = s[i:] - unit, ok := unitMap[u] - if !ok { - return 0, errors.New("units: unknown unit " + u + " in " + orig) - } - - f += g * unit - } - - if neg { - f = -f - } - if f < float64(-1<<63) || f > float64(1<<63-1) { - return 0, errors.New("units: overflow parsing unit") - } - return int64(f), nil -} diff --git a/vendor/github.com/beevik/ntp/CONTRIBUTORS b/vendor/github.com/beevik/ntp/CONTRIBUTORS deleted file mode 100644 index 626c12eb59..0000000000 --- a/vendor/github.com/beevik/ntp/CONTRIBUTORS +++ /dev/null @@ -1,7 +0,0 @@ -Brett Vickers (beevik) -Mikhail Salosin (AlphaB) -Anton Tolchanov (knyar) -Christopher Batey (chbatey) -Meng Zhuo (mengzhuo) -Leonid Evdokimov (darkk) -Ask Bjørn Hansen (abh) \ No newline at end of file diff --git a/vendor/github.com/beevik/ntp/LICENSE b/vendor/github.com/beevik/ntp/LICENSE deleted file mode 100644 index e14ad682a0..0000000000 --- a/vendor/github.com/beevik/ntp/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2015 Brett Vickers. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/ntp/README.md b/vendor/github.com/beevik/ntp/README.md deleted file mode 100644 index 7d0d23fb8e..0000000000 --- a/vendor/github.com/beevik/ntp/README.md +++ /dev/null @@ -1,25 +0,0 @@ -[![Build Status](https://travis-ci.org/beevik/ntp.svg?branch=master)](https://travis-ci.org/beevik/ntp) -[![GoDoc](https://godoc.org/github.com/beevik/ntp?status.svg)](https://godoc.org/github.com/beevik/ntp) - -ntp -=== - -The ntp package is an implementation of a Simple NTP (SNTP) client based on -[RFC5905](https://tools.ietf.org/html/rfc5905). It allows you to connect to -a remote NTP server and request the current time. - -If all you care about is the current time according to a known remote NTP -server, simply use the `Time` function: -```go -time, err := ntp.Time("0.beevik-ntp.pool.ntp.org") -``` - -If you want the time as well as additional metadata about the time, use the -`Query` function instead: -```go -response, err := ntp.Query("0.beevik-ntp.pool.ntp.org") -``` - -To use the NTP pool in your application, please request your own -[vendor zone](http://www.pool.ntp.org/en/vendors.html). Avoid using -the `[number].pool.ntp.org` zone names in your applications. diff --git a/vendor/github.com/beevik/ntp/RELEASE_NOTES.md b/vendor/github.com/beevik/ntp/RELEASE_NOTES.md deleted file mode 100644 index 4266ee776e..0000000000 --- a/vendor/github.com/beevik/ntp/RELEASE_NOTES.md +++ /dev/null @@ -1,44 +0,0 @@ -Release v0.1.1 -============== - -**Breaking changes** - -* Removed the `MaxStratum` constant. - -**Deprecations** - -* Officially deprecated the `TimeV` function. - -**Internal changes** - -* Removed `minDispersion` from the `RootDistance` calculation, since the value - was arbitrary. -* Moved some validation into main code path so that invalid `TransmitTime` and - `mode` responses trigger an error even when `Response.Validate` is not - called. - - -Release v0.1.0 -============== - -This is the initial release of the `ntp` package. Currently it supports the following features: -* `Time()` to query the current time according to a remote NTP server. -* `Query()` to query multiple pieces of time-related information from a remote NTP server. -* `QueryWithOptions()`, which is like `Query()` but with the ability to override default query options. - -Time-related information returned by the `Query` functions includes: -* `Time`: the time the server transmitted its response, according to the server's clock. -* `ClockOffset`: the estimated offset of the client's clock relative to the server's clock. You may apply this offset to any local system clock reading once the query is complete. -* `RTT`: an estimate of the round-trip-time delay between the client and the server. -* `Precision`: the precision of the server's clock reading. -* `Stratum`: the "stratum" level of the server, where 1 indicates a server directly connected to a reference clock, and values greater than 1 indicating the number of hops from the reference clock. -* `ReferenceID`: A unique identifier for the NTP server that was contacted. -* `ReferenceTime`: The time at which the server last updated its local clock setting. -* `RootDelay`: The server's round-trip delay to the reference clock. -* `RootDispersion`: The server's total dispersion to the referenced clock. -* `RootDistance`: An estimate of the root synchronization distance. -* `Leap`: The leap second indicator. -* `MinError`: A lower bound on the clock error between the client and the server. -* `Poll`: the maximum polling interval between successive messages on the server. - -The `Response` structure returned by the `Query` functions also contains a `Response.Validate()` function that returns an error if any of the fields returned by the server are invalid. diff --git a/vendor/github.com/beevik/ntp/ntp.go b/vendor/github.com/beevik/ntp/ntp.go deleted file mode 100644 index 558b2e2d6d..0000000000 --- a/vendor/github.com/beevik/ntp/ntp.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2015-2017 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ntp provides an implementation of a Simple NTP (SNTP) client -// capable of querying the current time from a remote NTP server. See -// RFC5905 (https://tools.ietf.org/html/rfc5905) for more details. -// -// This approach grew out of a go-nuts post by Michael Hofmann: -// https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ -package ntp - -import ( - "crypto/rand" - "encoding/binary" - "errors" - "net" - "time" - - "golang.org/x/net/ipv4" -) - -// The LeapIndicator is used to warn if a leap second should be inserted -// or deleted in the last minute of the current month. -type LeapIndicator uint8 - -const ( - // LeapNoWarning indicates no impending leap second. - LeapNoWarning LeapIndicator = 0 - - // LeapAddSecond indicates the last minute of the day has 61 seconds. - LeapAddSecond = 1 - - // LeapDelSecond indicates the last minute of the day has 59 seconds. - LeapDelSecond = 2 - - // LeapNotInSync indicates an unsynchronized leap second. - LeapNotInSync = 3 -) - -// Internal constants -const ( - defaultNtpVersion = 4 - nanoPerSec = 1000000000 - maxStratum = 16 - defaultTimeout = 5 * time.Second - maxPollInterval = (1 << 17) * time.Second - maxDispersion = 16 * time.Second -) - -// Internal variables -var ( - ntpEpoch = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) -) - -type mode uint8 - -// NTP modes. This package uses only client mode. -const ( - reserved mode = 0 + iota - symmetricActive - symmetricPassive - client - server - broadcast - controlMessage - reservedPrivate -) - -// An ntpTime is a 64-bit fixed-point (Q32.32) representation of the number of -// seconds elapsed. -type ntpTime uint64 - -// Duration interprets the fixed-point ntpTime as a number of elapsed seconds -// and returns the corresponding time.Duration value. -func (t ntpTime) Duration() time.Duration { - sec := (t >> 32) * nanoPerSec - frac := (t & 0xffffffff) * nanoPerSec >> 32 - return time.Duration(sec + frac) -} - -// Time interprets the fixed-point ntpTime as an absolute time and returns -// the corresponding time.Time value. -func (t ntpTime) Time() time.Time { - return ntpEpoch.Add(t.Duration()) -} - -// toNtpTime converts the time.Time value t into its 64-bit fixed-point -// ntpTime representation. -func toNtpTime(t time.Time) ntpTime { - nsec := uint64(t.Sub(ntpEpoch)) - sec := nsec / nanoPerSec - // Round up the fractional component so that repeated conversions - // between time.Time and ntpTime do not yield continually decreasing - // results. - frac := (((nsec - sec*nanoPerSec) << 32) + nanoPerSec - 1) / nanoPerSec - return ntpTime(sec<<32 | frac) -} - -// An ntpTimeShort is a 32-bit fixed-point (Q16.16) representation of the -// number of seconds elapsed. -type ntpTimeShort uint32 - -// Duration interprets the fixed-point ntpTimeShort as a number of elapsed -// seconds and returns the corresponding time.Duration value. -func (t ntpTimeShort) Duration() time.Duration { - t64 := uint64(t) - sec := (t64 >> 16) * nanoPerSec - frac := (t64 & 0xffff) * nanoPerSec >> 16 - return time.Duration(sec + frac) -} - -// msg is an internal representation of an NTP packet. -type msg struct { - LiVnMode uint8 // Leap Indicator (2) + Version (3) + Mode (3) - Stratum uint8 - Poll int8 - Precision int8 - RootDelay ntpTimeShort - RootDispersion ntpTimeShort - ReferenceID uint32 - ReferenceTime ntpTime - OriginTime ntpTime - ReceiveTime ntpTime - TransmitTime ntpTime -} - -// setVersion sets the NTP protocol version on the message. -func (m *msg) setVersion(v int) { - m.LiVnMode = (m.LiVnMode & 0xc7) | uint8(v)<<3 -} - -// setMode sets the NTP protocol mode on the message. -func (m *msg) setMode(md mode) { - m.LiVnMode = (m.LiVnMode & 0xf8) | uint8(md) -} - -// setLeap modifies the leap indicator on the message. -func (m *msg) setLeap(li LeapIndicator) { - m.LiVnMode = (m.LiVnMode & 0x3f) | uint8(li)<<6 -} - -// getVersion returns the version value in the message. -func (m *msg) getVersion() int { - return int((m.LiVnMode >> 3) & 0x07) -} - -// getMode returns the mode value in the message. -func (m *msg) getMode() mode { - return mode(m.LiVnMode & 0x07) -} - -// getLeap returns the leap indicator on the message. -func (m *msg) getLeap() LeapIndicator { - return LeapIndicator((m.LiVnMode >> 6) & 0x03) -} - -// QueryOptions contains the list of configurable options that may be used -// with the QueryWithOptions function. -type QueryOptions struct { - Timeout time.Duration // defaults to 5 seconds - Version int // NTP protocol version, defaults to 4 - LocalAddress string // IP address to use for the client address - Port int // Server port, defaults to 123 - TTL int // IP TTL to use, defaults to system default -} - -// A Response contains time data, some of which is returned by the NTP server -// and some of which is calculated by the client. -type Response struct { - // Time is the transmit time reported by the server just before it - // responded to the client's NTP query. - Time time.Time - - // ClockOffset is the estimated offset of the client clock relative to - // the server. Add this to the client's system clock time to obtain a - // more accurate time. - ClockOffset time.Duration - - // RTT is the measured round-trip-time delay estimate between the client - // and the server. - RTT time.Duration - - // Precision is the reported precision of the server's clock. - Precision time.Duration - - // Stratum is the "stratum level" of the server. The smaller the number, - // the closer the server is to the reference clock. Stratum 1 servers are - // attached directly to the reference clock. A stratum value of 0 - // indicates the "kiss of death," which typically occurs when the client - // issues too many requests to the server in a short period of time. - Stratum uint8 - - // ReferenceID is a 32-bit identifier identifying the server or - // reference clock. - ReferenceID uint32 - - // ReferenceTime is the time when the server's system clock was last - // set or corrected. - ReferenceTime time.Time - - // RootDelay is the server's estimated aggregate round-trip-time delay to - // the stratum 1 server. - RootDelay time.Duration - - // RootDispersion is the server's estimated maximum measurement error - // relative to the stratum 1 server. - RootDispersion time.Duration - - // RootDistance is an estimate of the total synchronization distance - // between the client and the stratum 1 server. - RootDistance time.Duration - - // Leap indicates whether a leap second should be added or removed from - // the current month's last minute. - Leap LeapIndicator - - // MinError is a lower bound on the error between the client and server - // clocks. When the client and server are not synchronized to the same - // clock, the reported timestamps may appear to violate the principle of - // causality. In other words, the NTP server's response may indicate - // that a message was received before it was sent. In such cases, the - // minimum error may be useful. - MinError time.Duration - - // Poll is the maximum interval between successive NTP polling messages. - // It is not relevant for simple NTP clients like this one. - Poll time.Duration -} - -// Validate checks if the response is valid for the purposes of time -// synchronization. -func (r *Response) Validate() error { - // Handle invalid stratum values. - if r.Stratum == 0 { - return errors.New("kiss of death received") - } - if r.Stratum >= maxStratum { - return errors.New("invalid stratum in response") - } - - // Handle invalid leap second indicator. - if r.Leap == LeapNotInSync { - return errors.New("invalid leap second") - } - - // Estimate the "freshness" of the time. If it exceeds the maximum - // polling interval (~36 hours), then it cannot be considered "fresh". - freshness := r.Time.Sub(r.ReferenceTime) - if freshness > maxPollInterval { - return errors.New("server clock not fresh") - } - - // Calculate the peer synchronization distance, lambda: - // lambda := RootDelay/2 + RootDispersion - // If this value exceeds MAXDISP (16s), then the time is not suitable - // for synchronization purposes. - // https://tools.ietf.org/html/rfc5905#appendix-A.5.1.1. - lambda := r.RootDelay/2 + r.RootDispersion - if lambda > maxDispersion { - return errors.New("invalid dispersion") - } - - // If the server's transmit time is before its reference time, the - // response is invalid. - if r.Time.Before(r.ReferenceTime) { - return errors.New("invalid time reported") - } - - // nil means the response is valid. - return nil -} - -// Query returns a response from the remote NTP server host. It contains -// the time at which the server transmitted the response as well as other -// useful information about the time and the remote server. -func Query(host string) (*Response, error) { - return QueryWithOptions(host, QueryOptions{}) -} - -// QueryWithOptions performs the same function as Query but allows for the -// customization of several query options. -func QueryWithOptions(host string, opt QueryOptions) (*Response, error) { - m, now, err := getTime(host, opt) - if err != nil { - return nil, err - } - return parseTime(m, now), nil -} - -// TimeV returns the current time using information from a remote NTP server. -// On error, it returns the local system time. The version may be 2, 3, or 4. -// -// Deprecated: TimeV is deprecated. Use QueryWithOptions instead. -func TimeV(host string, version int) (time.Time, error) { - m, recvTime, err := getTime(host, QueryOptions{Version: version}) - if err != nil { - return time.Now(), err - } - - r := parseTime(m, recvTime) - err = r.Validate() - if err != nil { - return time.Now(), err - } - - // Use the clock offset to calculate the time. - return time.Now().Add(r.ClockOffset), nil -} - -// Time returns the current time using information from a remote NTP server. -// It uses version 4 of the NTP protocol. On error, it returns the local -// system time. -func Time(host string) (time.Time, error) { - return TimeV(host, defaultNtpVersion) -} - -// getTime performs the NTP server query and returns the response message -// along with the local system time it was received. -func getTime(host string, opt QueryOptions) (*msg, ntpTime, error) { - if opt.Version == 0 { - opt.Version = defaultNtpVersion - } - if opt.Version < 2 || opt.Version > 4 { - return nil, 0, errors.New("invalid protocol version requested") - } - - // Resolve the remote NTP server address. - raddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, "123")) - if err != nil { - return nil, 0, err - } - - // Resolve the local address if specified as an option. - var laddr *net.UDPAddr - if opt.LocalAddress != "" { - laddr, err = net.ResolveUDPAddr("udp", net.JoinHostPort(opt.LocalAddress, "0")) - if err != nil { - return nil, 0, err - } - } - - // Override the port if requested. - if opt.Port != 0 { - raddr.Port = opt.Port - } - - // Prepare a "connection" to the remote server. - con, err := net.DialUDP("udp", laddr, raddr) - if err != nil { - return nil, 0, err - } - defer con.Close() - - // Set a TTL for the packet if requested. - if opt.TTL != 0 { - ipcon := ipv4.NewConn(con) - err = ipcon.SetTTL(opt.TTL) - if err != nil { - return nil, 0, err - } - } - - // Set a timeout on the connection. - if opt.Timeout == 0 { - opt.Timeout = defaultTimeout - } - con.SetDeadline(time.Now().Add(opt.Timeout)) - - // Allocate a message to hold the response. - recvMsg := new(msg) - - // Allocate a message to hold the query. - xmitMsg := new(msg) - xmitMsg.setMode(client) - xmitMsg.setVersion(opt.Version) - xmitMsg.setLeap(LeapNotInSync) - - // To ensure privacy and prevent spoofing, try to use a random 64-bit - // value for the TransmitTime. If crypto/rand couldn't generate a - // random value, fall back to using the system clock. Keep track of - // when the messsage was actually sent. - r := make([]byte, 8) - _, err = rand.Read(r) - var sendTime time.Time - if err == nil { - xmitMsg.TransmitTime = ntpTime(binary.BigEndian.Uint64(r)) - sendTime = time.Now() - } else { - sendTime = time.Now() - xmitMsg.TransmitTime = toNtpTime(sendTime) - } - - // Transmit the query. - err = binary.Write(con, binary.BigEndian, xmitMsg) - if err != nil { - return nil, 0, err - } - - // Receive the response. - err = binary.Read(con, binary.BigEndian, recvMsg) - if err != nil { - return nil, 0, err - } - - // Keep track of the time the response was received. - delta := time.Since(sendTime) - if delta < 0 { - // The system clock may have been set backwards since the packet was - // transmitted. In go 1.9 and later, time.Since ensures that a - // monotonic clock is used, and delta can never be less than zero. - // In versions before 1.9, we have to check. - return nil, 0, errors.New("client clock ticked backwards") - } - recvTime := toNtpTime(sendTime.Add(delta)) - - // Check for invalid fields. - if recvMsg.getMode() != server { - return nil, 0, errors.New("invalid mode in response") - } - if recvMsg.TransmitTime == ntpTime(0) { - return nil, 0, errors.New("invalid transmit time in response") - } - if recvMsg.OriginTime != xmitMsg.TransmitTime { - return nil, 0, errors.New("server response mismatch") - } - if recvMsg.ReceiveTime > recvMsg.TransmitTime { - return nil, 0, errors.New("server clock ticked backwards") - } - - // Correct the received message's origin time using the actual send - // time. - recvMsg.OriginTime = toNtpTime(sendTime) - - return recvMsg, recvTime, nil -} - -// parseTime parses the NTP packet along with the packet receive time to -// generate a Response record. -func parseTime(m *msg, recvTime ntpTime) *Response { - r := &Response{ - Time: m.TransmitTime.Time(), - ClockOffset: offset(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - RTT: rtt(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - Precision: toInterval(m.Precision), - Stratum: m.Stratum, - ReferenceID: m.ReferenceID, - ReferenceTime: m.ReferenceTime.Time(), - RootDelay: m.RootDelay.Duration(), - RootDispersion: m.RootDispersion.Duration(), - Leap: m.getLeap(), - MinError: minError(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - Poll: toInterval(m.Poll), - } - - // Calculate values depending on other calculated values - r.RootDistance = rootDistance(r.RTT, r.RootDelay, r.RootDispersion) - - return r -} - -// The following helper functions calculate additional metadata about the -// timestamps received from an NTP server. The timestamps returned by -// the server are given the following variable names: -// -// org = Origin Timestamp (client send time) -// rec = Receive Timestamp (server receive time) -// xmt = Transmit Timestamp (server reply time) -// dst = Destination Timestamp (client receive time) - -func rtt(org, rec, xmt, dst ntpTime) time.Duration { - // round trip delay time - // rtt = (dst-org) - (xmt-rec) - a := dst.Time().Sub(org.Time()) - b := xmt.Time().Sub(rec.Time()) - rtt := a - b - if rtt < 0 { - rtt = 0 - } - return rtt -} - -func offset(org, rec, xmt, dst ntpTime) time.Duration { - // local clock offset - // offset = ((rec-org) + (xmt-dst)) / 2 - a := rec.Time().Sub(org.Time()) - b := xmt.Time().Sub(dst.Time()) - return (a + b) / time.Duration(2) -} - -func minError(org, rec, xmt, dst ntpTime) time.Duration { - // Each NTP response contains two pairs of send/receive timestamps. - // When either pair indicates a "causality violation", we calculate the - // error as the difference in time between them. The minimum error is - // the greater of the two causality violations. - var error0, error1 ntpTime - if org >= rec { - error0 = org - rec - } - if xmt >= dst { - error1 = xmt - dst - } - if error0 > error1 { - return error0.Duration() - } - return error1.Duration() -} - -func rootDistance(rtt, rootDelay, rootDisp time.Duration) time.Duration { - // The root distance is: - // the maximum error due to all causes of the local clock - // relative to the primary server. It is defined as half the - // total delay plus total dispersion plus peer jitter. - // (https://tools.ietf.org/html/rfc5905#appendix-A.5.5.2) - // - // In the reference implementation, it is calculated as follows: - // rootDist = max(MINDISP, rootDelay + rtt)/2 + rootDisp - // + peerDisp + PHI * (uptime - peerUptime) - // + peerJitter - // For an SNTP client which sends only a single packet, most of these - // terms are irrelevant and become 0. - totalDelay := rtt + rootDelay - return totalDelay/2 + rootDisp -} - -func toInterval(t int8) time.Duration { - switch { - case t > 0: - return time.Duration(uint64(time.Second) << uint(t)) - case t < 0: - return time.Duration(uint64(time.Second) >> uint(-t)) - default: - return time.Second - } -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be66..0000000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7c..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index f4cabd6695..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - Æ’ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(Æ’) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - Æ’ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(Æ’) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - Æ’ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(Æ’) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(Æ’ invariant) *Stream { - x := &stream{Æ’: Æ’} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - Æ’ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.Æ’(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.Æ’(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.Æ’(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go deleted file mode 100644 index c1694fb522..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/dbus.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ -package dbus - -import ( - "fmt" - "os" - "strconv" - "strings" - "sync" - - "github.com/godbus/dbus" -) - -const ( - alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` - num = `0123456789` - alphanum = alpha + num - signalBuffer = 100 -) - -// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped -func needsEscape(i int, b byte) bool { - // Escape everything that is not a-z-A-Z-0-9 - // Also escape 0-9 if it's the first character - return strings.IndexByte(alphanum, b) == -1 || - (i == 0 && strings.IndexByte(num, b) != -1) -} - -// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the -// rules that systemd uses for serializing special characters. -func PathBusEscape(path string) string { - // Special case the empty string - if len(path) == 0 { - return "_" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if needsEscape(i, c) { - e := fmt.Sprintf("_%x", c) - n = append(n, []byte(e)...) - } else { - n = append(n, c) - } - } - return string(n) -} - -// Conn is a connection to systemd's dbus endpoint. -type Conn struct { - // sysconn/sysobj are only used to call dbus methods - sysconn *dbus.Conn - sysobj dbus.BusObject - - // sigconn/sigobj are only used to receive dbus signals - sigconn *dbus.Conn - sigobj dbus.BusObject - - jobListener struct { - jobs map[dbus.ObjectPath]chan<- string - sync.Mutex - } - subscriber struct { - updateCh chan<- *SubStateUpdate - errCh chan<- error - sync.Mutex - ignore map[dbus.ObjectPath]int64 - cleanIgnore int64 - } -} - -// New establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. -func New() (*Conn, error) { - conn, err := NewSystemConnection() - if err != nil && os.Geteuid() == 0 { - return NewSystemdConnection() - } - return conn, err -} - -// NewSystemConnection establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection -func NewSystemConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SystemBusPrivate) - }) -} - -// NewUserConnection establishes a connection to the session bus and -// authenticates. This can be used to connect to systemd user instances. -// Callers should call Close() when done with the connection. -func NewUserConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SessionBusPrivate) - }) -} - -// NewSystemdConnection establishes a private, direct connection to systemd. -// This can be used for communicating with systemd without a dbus daemon. -// Callers should call Close() when done with the connection. -func NewSystemdConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - // We skip Hello when talking directly to systemd. - return dbusAuthConnection(func() (*dbus.Conn, error) { - return dbus.Dial("unix:path=/run/systemd/private") - }) - }) -} - -// Close closes an established connection -func (c *Conn) Close() { - c.sysconn.Close() - c.sigconn.Close() -} - -// NewConnection establishes a connection to a bus using a caller-supplied function. -// This allows connecting to remote buses through a user-supplied mechanism. -// The supplied function may be called multiple times, and should return independent connections. -// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, -// and any authentication should be handled by the function. -func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { - sysconn, err := dialBus() - if err != nil { - return nil, err - } - - sigconn, err := dialBus() - if err != nil { - sysconn.Close() - return nil, err - } - - c := &Conn{ - sysconn: sysconn, - sysobj: systemdObject(sysconn), - sigconn: sigconn, - sigobj: systemdObject(sigconn), - } - - c.subscriber.ignore = make(map[dbus.ObjectPath]int64) - c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) - - // Setup the listeners on jobs so that we can get completions - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - - c.dispatch() - return c, nil -} - -// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager -// interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html -func (c *Conn) GetManagerProperty(prop string) (string, error) { - variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) - if err != nil { - return "", err - } - return variant.String(), nil -} - -func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus() - if err != nil { - return nil, err - } - - // Only use EXTERNAL method, and hardcode the uid (not username) - // to avoid a username lookup (which requires a dynamically linked - // libc) - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := dbusAuthConnection(createBus) - if err != nil { - return nil, err - } - - if err = conn.Hello(); err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func systemdObject(conn *dbus.Conn) dbus.BusObject { - return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go deleted file mode 100644 index ab17f7cc75..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/methods.go +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "path" - "strconv" - - "github.com/godbus/dbus" -) - -func (c *Conn) jobComplete(signal *dbus.Signal) { - var id uint32 - var job dbus.ObjectPath - var unit string - var result string - dbus.Store(signal.Body, &id, &job, &unit, &result) - c.jobListener.Lock() - out, ok := c.jobListener.jobs[job] - if ok { - out <- result - delete(c.jobListener.jobs, job) - } - c.jobListener.Unlock() -} - -func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { - if ch != nil { - c.jobListener.Lock() - defer c.jobListener.Unlock() - } - - var p dbus.ObjectPath - err := c.sysobj.Call(job, 0, args...).Store(&p) - if err != nil { - return 0, err - } - - if ch != nil { - c.jobListener.jobs[p] = ch - } - - // ignore error since 0 is fine if conversion fails - jobID, _ := strconv.Atoi(path.Base(string(p))) - - return jobID, nil -} - -// StartUnit enqueues a start job and depending jobs, if any (unless otherwise -// specified by the mode string). -// -// Takes the unit to activate, plus a mode string. The mode needs to be one of -// replace, fail, isolate, ignore-dependencies, ignore-requirements. If -// "replace" the call will start the unit and its dependencies, possibly -// replacing already queued jobs that conflict with this. If "fail" the call -// will start the unit and its dependencies, but will fail if this would change -// an already queued job. If "isolate" the call will start the unit in question -// and terminate all units that aren't dependencies of it. If -// "ignore-dependencies" it will start a unit but ignore all its dependencies. -// If "ignore-requirements" it will start a unit but only ignore the -// requirement dependencies. It is not recommended to make use of the latter -// two options. -// -// If the provided channel is non-nil, a result string will be sent to it upon -// job completion: one of done, canceled, timeout, failed, dependency, skipped. -// done indicates successful execution of a job. canceled indicates that a job -// has been canceled before it finished execution. timeout indicates that the -// job timeout was reached. failed indicates that the job failed. dependency -// indicates that a job this job has been depending on failed and the job hence -// has been removed too. skipped indicates that a job was skipped because it -// didn't apply to the units current state. -// -// If no error occurs, the ID of the underlying systemd job will be returned. There -// does exist the possibility for no error to be returned, but for the returned job -// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint -// should not be considered authoritative. -// -// If an error does occur, it will be returned to the user alongside a job ID of 0. -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) -} - -// StopUnit is similar to StartUnit but stops the specified unit rather -// than starting it. -func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) -} - -// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. -func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) -} - -// RestartUnit restarts a service. If a service is restarted that isn't -// running it will be started. -func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) -} - -// TryRestartUnit is like RestartUnit, except that a service that isn't running -// is not affected by the restart. -func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) -} - -// ReloadOrRestart attempts a reload if the unit supports it and use a restart -// otherwise. -func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) -} - -// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" -// flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) -} - -// StartTransientUnit() may be used to create and start a transient unit, which -// will be released as soon as it is not running or referenced anymore or the -// system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnit(), properties contains properties -// of the unit. -func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) -} - -// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's -// processes are killed. -func (c *Conn) KillUnit(name string, signal int32) { - c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() -} - -// ResetFailedUnit resets the "failed" state of a specific unit. -func (c *Conn) ResetFailedUnit(name string) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() -} - -// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface -func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { - var err error - var props map[string]dbus.Variant - - path := unitPath(unit) - if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) - if err != nil { - return nil, err - } - - out := make(map[string]interface{}, len(props)) - for k, v := range props { - out[k] = v.Value() - } - - return out, nil -} - -// GetUnitProperties takes the unit name and returns all of its dbus object properties. -func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { - return c.getProperties(unit, "org.freedesktop.systemd1.Unit") -} - -func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { - var err error - var prop dbus.Variant - - path := unitPath(unit) - if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: propertyName, Value: prop}, nil -} - -func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) -} - -// GetServiceProperty returns property for given service name and property name -func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { - return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) -} - -// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope -// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit -func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { - return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) -} - -// SetUnitProperties() may be used to modify certain unit properties at runtime. -// Not all properties may be changed at runtime, but many resource management -// settings (primarily those in systemd.cgroup(5)) may. The changes are applied -// instantly, and stored on disk for future boots, unless runtime is true, in which -// case the settings only apply until the next reboot. name is the name of the unit -// to modify. properties are the settings to set, encoded as an array of property -// name and value pairs. -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() -} - -func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) -} - -type UnitStatus struct { - Name string // The primary unit name as string - Description string // The human readable description string - LoadState string // The load state (i.e. whether the unit file has been loaded successfully) - ActiveState string // The active state (i.e. whether the unit is currently started or not) - SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) - Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. - Path dbus.ObjectPath // The unit object path - JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise - JobType string // The job type as string - JobPath dbus.ObjectPath // The job object path -} - -type storeFunc func(retvalues ...interface{}) error - -func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]UnitStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - err = dbus.Store(resultInterface, statusInterface...) - if err != nil { - return nil, err - } - - return status, nil -} - -// ListUnits returns an array with all currently loaded units. Note that -// units may be known by multiple names at the same time, and hence there might -// be more unit names loaded than actual units behind them. -func (c *Conn) ListUnits() ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) -} - -// ListUnitsFiltered returns an array with units filtered by state. -// It takes a list of units' statuses to filter. -func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) -} - -// ListUnitsByPatterns returns an array with units. -// It takes a list of units' statuses and names to filter. -// Note that units may be known by multiple names at the same time, -// and hence there might be more unit names loaded than actual units behind them. -func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) -} - -// ListUnitsByNames returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns -// method, this method returns statuses even for inactive or non-existing -// units. Input array should contain exact unit names, but not patterns. -func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) -} - -type UnitFile struct { - Path string - Type string -} - -func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - files := make([]UnitFile, len(result)) - fileInterface := make([]interface{}, len(files)) - for i := range files { - fileInterface[i] = &files[i] - } - - err = dbus.Store(resultInterface, fileInterface...) - if err != nil { - return nil, err - } - - return files, nil -} - -// ListUnitFiles returns an array of all available units on disk. -func (c *Conn) ListUnitFiles() ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) -} - -// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. -func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) -} - -type LinkUnitFileChange EnableUnitFileChange - -// LinkUnitFiles() links unit files (that are located outside of the -// usual unit search paths) into the unit search path. -// -// It takes a list of absolute paths to unit files to link and two -// booleans. The first boolean controls whether the unit shall be -// enabled for runtime only (true, /run), or persistently (false, -// /etc). -// The second controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns a list of the changes made. The list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -// EnableUnitFiles() may be used to enable one or more units in the system (by -// creating symlinks to them in /etc or /run). -// -// It takes a list of unit files to enable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and two booleans: the first controls whether the unit shall -// be enabled for runtime only (true, /run), or persistently (false, /etc). -// The second one controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns one boolean and an array with the changes made. The -// boolean signals whether the unit files contained any enablement -// information (i.e. an [Install]) section. The changes list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - var carries_install_info bool - - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) - if err != nil { - return false, nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return false, nil, err - } - - return carries_install_info, changes, nil -} - -type EnableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// DisableUnitFiles() may be used to disable one or more units in the system (by -// removing symlinks to them from /etc or /run). -// -// It takes a list of unit files to disable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and one boolean: whether the unit was enabled for runtime -// only (true, /run), or persistently (false, /etc). -// -// This call returns an array with the changes made. The changes list -// consists of structures with three strings: the type of the change (one of -// symlink or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type DisableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// MaskUnitFiles masks one or more units in the system -// -// It takes three arguments: -// * list of units to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -// * force flag -func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type MaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// UnmaskUnitFiles unmasks one or more units in the system -// -// It takes two arguments: -// * list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type UnmaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Reload instructs systemd to scan for and reload unit files. This is -// equivalent to a 'systemctl daemon-reload'. -func (c *Conn) Reload() error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() -} - -func unitPath(name string) dbus.ObjectPath { - return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go deleted file mode 100644 index 6c81895876..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/properties.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "github.com/godbus/dbus" -) - -// From the systemd docs: -// -// The properties array of StartTransientUnit() may take many of the settings -// that may also be configured in unit files. Not all parameters are currently -// accepted though, but we plan to cover more properties with future release. -// Currently you may set the Description, Slice and all dependency types of -// units, as well as RemainAfterExit, ExecStart for service units, -// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, -// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, -// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, -// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map -// directly to their counterparts in unit files and as normal D-Bus object -// properties. The exception here is the PIDs field of scope units which is -// used for construction of the scope only and specifies the initial PIDs to -// add to the scope object. - -type Property struct { - Name string - Value dbus.Variant -} - -type PropertyCollection struct { - Name string - Properties []Property -} - -type execStart struct { - Path string // the binary path to execute - Args []string // an array with all arguments to pass to the executed command, starting with argument 0 - UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly -} - -// PropExecStart sets the ExecStart service property. The first argument is a -// slice with the binary path to execute followed by the arguments to pass to -// the executed command. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= -func PropExecStart(command []string, uncleanIsFailure bool) Property { - execStarts := []execStart{ - execStart{ - Path: command[0], - Args: command, - UncleanIsFailure: uncleanIsFailure, - }, - } - - return Property{ - Name: "ExecStart", - Value: dbus.MakeVariant(execStarts), - } -} - -// PropRemainAfterExit sets the RemainAfterExit service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= -func PropRemainAfterExit(b bool) Property { - return Property{ - Name: "RemainAfterExit", - Value: dbus.MakeVariant(b), - } -} - -// PropType sets the Type service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= -func PropType(t string) Property { - return Property{ - Name: "Type", - Value: dbus.MakeVariant(t), - } -} - -// PropDescription sets the Description unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= -func PropDescription(desc string) Property { - return Property{ - Name: "Description", - Value: dbus.MakeVariant(desc), - } -} - -func propDependency(name string, units []string) Property { - return Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -// PropRequires sets the Requires unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= -func PropRequires(units ...string) Property { - return propDependency("Requires", units) -} - -// PropRequiresOverridable sets the RequiresOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= -func PropRequiresOverridable(units ...string) Property { - return propDependency("RequiresOverridable", units) -} - -// PropRequisite sets the Requisite unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= -func PropRequisite(units ...string) Property { - return propDependency("Requisite", units) -} - -// PropRequisiteOverridable sets the RequisiteOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= -func PropRequisiteOverridable(units ...string) Property { - return propDependency("RequisiteOverridable", units) -} - -// PropWants sets the Wants unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= -func PropWants(units ...string) Property { - return propDependency("Wants", units) -} - -// PropBindsTo sets the BindsTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= -func PropBindsTo(units ...string) Property { - return propDependency("BindsTo", units) -} - -// PropRequiredBy sets the RequiredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= -func PropRequiredBy(units ...string) Property { - return propDependency("RequiredBy", units) -} - -// PropRequiredByOverridable sets the RequiredByOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= -func PropRequiredByOverridable(units ...string) Property { - return propDependency("RequiredByOverridable", units) -} - -// PropWantedBy sets the WantedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= -func PropWantedBy(units ...string) Property { - return propDependency("WantedBy", units) -} - -// PropBoundBy sets the BoundBy unit property. See -// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= -func PropBoundBy(units ...string) Property { - return propDependency("BoundBy", units) -} - -// PropConflicts sets the Conflicts unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= -func PropConflicts(units ...string) Property { - return propDependency("Conflicts", units) -} - -// PropConflictedBy sets the ConflictedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= -func PropConflictedBy(units ...string) Property { - return propDependency("ConflictedBy", units) -} - -// PropBefore sets the Before unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= -func PropBefore(units ...string) Property { - return propDependency("Before", units) -} - -// PropAfter sets the After unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= -func PropAfter(units ...string) Property { - return propDependency("After", units) -} - -// PropOnFailure sets the OnFailure unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= -func PropOnFailure(units ...string) Property { - return propDependency("OnFailure", units) -} - -// PropTriggers sets the Triggers unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= -func PropTriggers(units ...string) Property { - return propDependency("Triggers", units) -} - -// PropTriggeredBy sets the TriggeredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= -func PropTriggeredBy(units ...string) Property { - return propDependency("TriggeredBy", units) -} - -// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= -func PropPropagatesReloadTo(units ...string) Property { - return propDependency("PropagatesReloadTo", units) -} - -// PropRequiresMountsFor sets the RequiresMountsFor unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= -func PropRequiresMountsFor(units ...string) Property { - return propDependency("RequiresMountsFor", units) -} - -// PropSlice sets the Slice unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= -func PropSlice(slice string) Property { - return Property{ - Name: "Slice", - Value: dbus.MakeVariant(slice), - } -} - -// PropPids sets the PIDs field of scope units used in the initial construction -// of the scope only and specifies the initial PIDs to add to the scope object. -// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties -func PropPids(pids ...uint32) Property { - return Property{ - Name: "PIDs", - Value: dbus.MakeVariant(pids), - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go deleted file mode 100644 index f92e6fbed1..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/set.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -type set struct { - data map[string]bool -} - -func (s *set) Add(value string) { - s.data[value] = true -} - -func (s *set) Remove(value string) { - delete(s.data, value) -} - -func (s *set) Contains(value string) (exists bool) { - _, exists = s.data[value] - return -} - -func (s *set) Length() int { - return len(s.data) -} - -func (s *set) Values() (values []string) { - for val, _ := range s.data { - values = append(values, val) - } - return -} - -func newSet() *set { - return &set{make(map[string]bool)} -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go deleted file mode 100644 index 996451445c..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "time" - - "github.com/godbus/dbus" -) - -const ( - cleanIgnoreInterval = int64(10 * time.Second) - ignoreInterval = int64(30 * time.Millisecond) -) - -// Subscribe sets up this connection to subscribe to all systemd dbus events. -// This is required before calling SubscribeUnits. When the connection closes -// systemd will automatically stop sending signals so there is no need to -// explicitly call Unsubscribe(). -func (c *Conn) Subscribe() error { - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - - err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() - if err != nil { - return err - } - - return nil -} - -// Unsubscribe this connection from systemd dbus events. -func (c *Conn) Unsubscribe() error { - err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() - if err != nil { - return err - } - - return nil -} - -func (c *Conn) dispatch() { - ch := make(chan *dbus.Signal, signalBuffer) - - c.sigconn.Signal(ch) - - go func() { - for { - signal, ok := <-ch - if !ok { - return - } - - if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { - c.jobComplete(signal) - } - - if c.subscriber.updateCh == nil { - continue - } - - var unitPath dbus.ObjectPath - switch signal.Name { - case "org.freedesktop.systemd1.Manager.JobRemoved": - unitName := signal.Body[2].(string) - c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - case "org.freedesktop.systemd1.Manager.UnitNew": - unitPath = signal.Body[1].(dbus.ObjectPath) - case "org.freedesktop.DBus.Properties.PropertiesChanged": - if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - unitPath = signal.Path - } - } - - if unitPath == dbus.ObjectPath("") { - continue - } - - c.sendSubStateUpdate(unitPath) - } - }() -} - -// Returns two unbuffered channels which will receive all changed units every -// interval. Deleted units are sent as nil. -func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { - return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) -} - -// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer -// size of the channels, the comparison function for detecting changes and a filter -// function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { - old := make(map[string]*UnitStatus) - statusChan := make(chan map[string]*UnitStatus, buffer) - errChan := make(chan error, buffer) - - go func() { - for { - timerChan := time.After(interval) - - units, err := c.ListUnits() - if err == nil { - cur := make(map[string]*UnitStatus) - for i := range units { - if filterUnit != nil && filterUnit(units[i].Name) { - continue - } - cur[units[i].Name] = &units[i] - } - - // add all new or changed units - changed := make(map[string]*UnitStatus) - for n, u := range cur { - if oldU, ok := old[n]; !ok || isChanged(oldU, u) { - changed[n] = u - } - delete(old, n) - } - - // add all deleted units - for oldN := range old { - changed[oldN] = nil - } - - old = cur - - if len(changed) != 0 { - statusChan <- changed - } - } else { - errChan <- err - } - - <-timerChan - } - }() - - return statusChan, errChan -} - -type SubStateUpdate struct { - UnitName string - SubState string -} - -// SetSubStateSubscriber writes to updateCh when any unit's substate changes. -// Although this writes to updateCh on every state change, the reported state -// may be more recent than the change that generated it (due to an unavoidable -// race in the systemd dbus interface). That is, this method provides a good -// way to keep a current view of all units' states, but is not guaranteed to -// show every state transition they go through. Furthermore, state changes -// will only be written to the channel with non-blocking writes. If updateCh -// is full, it attempts to write an error to errCh; if errCh is full, the error -// passes silently. -func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { - c.subscriber.Lock() - defer c.subscriber.Unlock() - c.subscriber.updateCh = updateCh - c.subscriber.errCh = errCh -} - -func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { - c.subscriber.Lock() - defer c.subscriber.Unlock() - - if c.shouldIgnore(path) { - return - } - - info, err := c.GetUnitProperties(string(path)) - if err != nil { - select { - case c.subscriber.errCh <- err: - default: - } - } - - name := info["Id"].(string) - substate := info["SubState"].(string) - - update := &SubStateUpdate{name, substate} - select { - case c.subscriber.updateCh <- update: - default: - select { - case c.subscriber.errCh <- errors.New("update channel full!"): - default: - } - } - - c.updateIgnore(path, info) -} - -// The ignore functions work around a wart in the systemd dbus interface. -// Requesting the properties of an unloaded unit will cause systemd to send a -// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's -// properties on UnitNew (as that's the only indication of a new unit coming up -// for the first time), we would enter an infinite loop if we did not attempt -// to detect and ignore these spurious signals. The signal themselves are -// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an -// unloaded unit's signals for a short time after requesting its properties. -// This means that we will miss e.g. a transient unit being restarted -// *immediately* upon failure and also a transient unit being started -// immediately after requesting its status (with systemctl status, for example, -// because this causes a UnitNew signal to be sent which then causes us to fetch -// the properties). - -func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { - t, ok := c.subscriber.ignore[path] - return ok && t >= time.Now().UnixNano() -} - -func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { - c.cleanIgnore() - - // unit is unloaded - it will trigger bad systemd dbus behavior - if info["LoadState"].(string) == "not-found" { - c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval - } -} - -// without this, ignore would grow unboundedly over time -func (c *Conn) cleanIgnore() { - now := time.Now().UnixNano() - if c.subscriber.cleanIgnore < now { - c.subscriber.cleanIgnore = now + cleanIgnoreInterval - - for p, t := range c.subscriber.ignore { - if t < now { - delete(c.subscriber.ignore, p) - } - } - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go deleted file mode 100644 index 5b408d5847..0000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "time" -) - -// SubscriptionSet returns a subscription set which is like conn.Subscribe but -// can filter to only return events for a set of units. -type SubscriptionSet struct { - *set - conn *Conn -} - -func (s *SubscriptionSet) filter(unit string) bool { - return !s.Contains(unit) -} - -// Subscribe starts listening for dbus events for all of the units in the set. -// Returns channels identical to conn.SubscribeUnits. -func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { - // TODO: Make fully evented by using systemd 209 with properties changed values - return s.conn.SubscribeUnitsCustom(time.Second, 0, - mismatchUnitStatus, - func(unit string) bool { return s.filter(unit) }, - ) -} - -// NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { - return &SubscriptionSet{newSet(), conn} -} - -// mismatchUnitStatus returns true if the provided UnitStatus objects -// are not equivalent. false is returned if the objects are equivalent. -// Only the Name, Description and state-related fields are used in -// the comparison. -func mismatchUnitStatus(u1, u2 *UnitStatus) bool { - return u1.Name != u2.Name || - u1.Description != u2.Description || - u1.LoadState != u2.LoadState || - u1.ActiveState != u2.ActiveState || - u1.SubState != u2.SubState -} diff --git a/vendor/github.com/ema/qdisc/LICENSE.md b/vendor/github.com/ema/qdisc/LICENSE.md deleted file mode 100644 index 0a38dae3e1..0000000000 --- a/vendor/github.com/ema/qdisc/LICENSE.md +++ /dev/null @@ -1,10 +0,0 @@ -MIT License -=========== - -Copyright (C) 2017 Emanuele Rocca - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ema/qdisc/Makefile b/vendor/github.com/ema/qdisc/Makefile deleted file mode 100644 index b313b082f9..0000000000 --- a/vendor/github.com/ema/qdisc/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -build: - go fmt - go build - go vet - staticcheck - #golint -set_exit_status - go test -v -race -tags=integration - -cover: - go test -coverprofile=coverage.out - go tool cover -html=coverage.out diff --git a/vendor/github.com/ema/qdisc/README.md b/vendor/github.com/ema/qdisc/README.md deleted file mode 100644 index 9fe5dde54c..0000000000 --- a/vendor/github.com/ema/qdisc/README.md +++ /dev/null @@ -1,26 +0,0 @@ -qdisc [![Build Status](https://travis-ci.org/ema/qdisc.svg?branch=master)](https://travis-ci.org/ema/qdisc) -===== - -Package `qdisc` allows to get queuing discipline information via netlink, -similarly to what `tc -s qdisc show` does. - -Example usage -------------- - - package main - - import ( - "fmt" - - "github.com/ema/qdisc" - ) - - func main() { - info, err := qdisc.Get() - - if err == nil { - for _, msg := range info { - fmt.Printf("%+v\n", msg) - } - } - } diff --git a/vendor/github.com/ema/qdisc/get.go b/vendor/github.com/ema/qdisc/get.go deleted file mode 100644 index adb822c9a2..0000000000 --- a/vendor/github.com/ema/qdisc/get.go +++ /dev/null @@ -1,281 +0,0 @@ -package qdisc - -import ( - "fmt" - "math" - "net" - - "github.com/mdlayher/netlink" - "github.com/mdlayher/netlink/nlenc" -) - -const ( - TCA_UNSPEC = iota - TCA_KIND - TCA_OPTIONS - TCA_STATS - TCA_XSTATS - TCA_RATE - TCA_FCNT - TCA_STATS2 - TCA_STAB - __TCA_MAX -) - -const ( - TCA_STATS_UNSPEC = iota - TCA_STATS_BASIC - TCA_STATS_RATE_EST - TCA_STATS_QUEUE - TCA_STATS_APP - TCA_STATS_RATE_EST64 - __TCA_STATS_MAX -) - -// See struct tc_stats in /usr/include/linux/pkt_sched.h -type TC_Stats struct { - Bytes uint64 - Packets uint32 - Drops uint32 - Overlimits uint32 - Bps uint32 - Pps uint32 - Qlen uint32 - Backlog uint32 -} - -// See /usr/include/linux/gen_stats.h -type TC_Stats2 struct { - // struct gnet_stats_basic - Bytes uint64 - Packets uint32 - // struct gnet_stats_queue - Qlen uint32 - Backlog uint32 - Drops uint32 - Requeues uint32 - Overlimits uint32 -} - -// See struct tc_fq_qd_stats /usr/include/linux/pkt_sched.h -type TC_Fq_Qd_Stats struct { - GcFlows uint64 - HighprioPackets uint64 - TcpRetrans uint64 - Throttled uint64 - FlowsPlimit uint64 - PktsTooLong uint64 - AllocationErrors uint64 - TimeNextDelayedFlow int64 - Flows uint32 - InactiveFlows uint32 - ThrottledFlows uint32 - UnthrottleLatencyNs uint32 -} - -type QdiscInfo struct { - IfaceName string - Parent uint32 - Handle uint32 - Kind string - Bytes uint64 - Packets uint32 - Drops uint32 - Requeues uint32 - Overlimits uint32 - GcFlows uint64 - Throttled uint64 - FlowsPlimit uint64 -} - -func parseTCAStats(attr netlink.Attribute) TC_Stats { - var stats TC_Stats - stats.Bytes = nlenc.Uint64(attr.Data[0:8]) - stats.Packets = nlenc.Uint32(attr.Data[8:12]) - stats.Drops = nlenc.Uint32(attr.Data[12:16]) - stats.Overlimits = nlenc.Uint32(attr.Data[16:20]) - stats.Bps = nlenc.Uint32(attr.Data[20:24]) - stats.Pps = nlenc.Uint32(attr.Data[24:28]) - stats.Qlen = nlenc.Uint32(attr.Data[28:32]) - stats.Backlog = nlenc.Uint32(attr.Data[32:36]) - return stats -} - -func parseTCAStats2(attr netlink.Attribute) TC_Stats2 { - var stats TC_Stats2 - - nested, _ := netlink.UnmarshalAttributes(attr.Data) - - for _, a := range nested { - switch a.Type { - case TCA_STATS_BASIC: - stats.Bytes = nlenc.Uint64(a.Data[0:8]) - stats.Packets = nlenc.Uint32(a.Data[8:12]) - case TCA_STATS_QUEUE: - stats.Qlen = nlenc.Uint32(a.Data[0:4]) - stats.Backlog = nlenc.Uint32(a.Data[4:8]) - stats.Drops = nlenc.Uint32(a.Data[8:12]) - stats.Requeues = nlenc.Uint32(a.Data[12:16]) - stats.Overlimits = nlenc.Uint32(a.Data[16:20]) - default: - } - } - - return stats -} - -func parseTC_Fq_Qd_Stats(attr netlink.Attribute) TC_Fq_Qd_Stats { - var stats TC_Fq_Qd_Stats - - nested, _ := netlink.UnmarshalAttributes(attr.Data) - - for _, a := range nested { - switch a.Type { - case TCA_STATS_APP: - stats.GcFlows = nlenc.Uint64(a.Data[0:8]) - stats.HighprioPackets = nlenc.Uint64(a.Data[8:16]) - stats.TcpRetrans = nlenc.Uint64(a.Data[16:24]) - stats.Throttled = nlenc.Uint64(a.Data[24:32]) - stats.FlowsPlimit = nlenc.Uint64(a.Data[32:40]) - stats.PktsTooLong = nlenc.Uint64(a.Data[40:48]) - stats.AllocationErrors = nlenc.Uint64(a.Data[48:56]) - default: - } - } - - return stats -} - -func getQdiscMsgs(c *netlink.Conn) ([]netlink.Message, error) { - req := netlink.Message{ - Header: netlink.Header{ - Flags: netlink.HeaderFlagsRequest | netlink.HeaderFlagsDump, - Type: 38, // RTM_GETQDISC - }, - Data: []byte{0}, - } - - // Perform a request, receive replies, and validate the replies - msgs, err := c.Execute(req) - if err != nil { - return nil, fmt.Errorf("failed to execute request: %v", err) - } - - return msgs, nil -} - -// See https://tools.ietf.org/html/rfc3549#section-3.1.3 -func parseMessage(msg netlink.Message) (QdiscInfo, error) { - var m QdiscInfo - var s TC_Stats - var s2 TC_Stats2 - var s_fq TC_Fq_Qd_Stats - - /* - struct tcmsg { - unsigned char tcm_family; - unsigned char tcm__pad1; - unsigned short tcm__pad2; - int tcm_ifindex; - __u32 tcm_handle; - __u32 tcm_parent; - __u32 tcm_info; - }; - */ - - if len(msg.Data) < 20 { - return m, fmt.Errorf("Short message, len=%d", len(msg.Data)) - } - - ifaceIdx := nlenc.Uint32(msg.Data[4:8]) - - m.Handle = nlenc.Uint32(msg.Data[8:12]) - m.Parent = nlenc.Uint32(msg.Data[12:16]) - - if m.Parent == math.MaxUint32 { - m.Parent = 0 - } - - // The first 20 bytes are taken by tcmsg - attrs, err := netlink.UnmarshalAttributes(msg.Data[20:]) - - if err != nil { - return m, fmt.Errorf("failed to unmarshal attributes: %v", err) - } - - for _, attr := range attrs { - switch attr.Type { - case TCA_KIND: - m.Kind = nlenc.String(attr.Data) - case TCA_STATS2: - s2 = parseTCAStats2(attr) - s_fq = parseTC_Fq_Qd_Stats(attr) - if s_fq.GcFlows > 0 { - m.GcFlows = s_fq.GcFlows - } - if s_fq.Throttled > 0 { - m.Throttled = s_fq.Throttled - } - if s_fq.FlowsPlimit > 0 { - m.FlowsPlimit = s_fq.FlowsPlimit - } - m.Bytes = s2.Bytes - m.Packets = s2.Packets - m.Drops = s2.Drops - // requeues only available in TCA_STATS2, not in TCA_STATS - m.Requeues = s2.Requeues - m.Overlimits = s2.Overlimits - case TCA_STATS: - // Legacy - s = parseTCAStats(attr) - m.Bytes = s.Bytes - m.Packets = s.Packets - m.Drops = s.Drops - m.Overlimits = s.Overlimits - default: - // TODO: TCA_OPTIONS and TCA_XSTATS - } - } - - iface, err := net.InterfaceByIndex(int(ifaceIdx)) - - if err == nil { - m.IfaceName = iface.Name - } - - return m, err -} - -func getAndParse(c *netlink.Conn) ([]QdiscInfo, error) { - var res []QdiscInfo - - msgs, err := getQdiscMsgs(c) - - if err != nil { - return nil, err - } - - for _, msg := range msgs { - m, err := parseMessage(msg) - - if err != nil { - return nil, err - } - - res = append(res, m) - } - - return res, nil -} - -func Get() ([]QdiscInfo, error) { - const familyRoute = 0 - - c, err := netlink.Dial(familyRoute, nil) - if err != nil { - return nil, fmt.Errorf("failed to dial netlink: %v", err) - } - defer c.Close() - - return getAndParse(c) -} diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md deleted file mode 100644 index c88f9b2bdd..0000000000 --- a/vendor/github.com/godbus/dbus/CONTRIBUTING.md +++ /dev/null @@ -1,50 +0,0 @@ -# How to Contribute - -## Getting Started - -- Fork the repository on GitHub -- Read the [README](README.markdown) for build and test instructions -- Play with the project, submit bugs, submit patches! - -## Contribution Flow - -This is a rough outline of what a contributor's workflow looks like: - -- Create a topic branch from where you want to base your work (usually master). -- Make commits of logical units. -- Make sure your commit messages are in the proper format (see below). -- Push your changes to a topic branch in your fork of the repository. -- Make sure the tests pass, and add any new tests as appropriate. -- Submit a pull request to the original repository. - -Thanks for your contributions! - -### Format of the Commit Message - -We follow a rough convention for commit messages that is designed to answer two -questions: what changed and why. The subject line should feature the what and -the body of the commit should describe the why. - -``` -scripts: add the test-cluster command - -this uses tmux to setup a test cluster that you can easily kill and -start for debugging. - -Fixes #38 -``` - -The format can be described more formally as follows: - -``` -: - - - -