diff --git a/.codacy.yml b/.codacy.yml index a358bf92739100..cebc95f8193627 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -10,9 +10,16 @@ exclude_paths: - web/gui/lib/** - web/gui/old/** - web/gui/src/** + - web/gui/v1/** - web/gui/v2/** - web/gui/main.js - tests/** - aclk/tests/** - libnetdata/libjudy/** - + - database/sqlite/sqlite3.c + - ml/dlib/** + - web/server/h2o/libh2o/** + - build/** + - build_external/** + - libnetdata/dyn_conf/tests/** + - packaging/** diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bc4fd0b32988cd..7f368ceb7e4bed 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -19,11 +19,11 @@ collectors/cups.plugin/ @thiagoftsm exporting/ @thiagoftsm daemon/ @thiagoftsm @vkalintiris database/ @thiagoftsm @vkalintiris -docs/ @tkatsoulas @andrewm4894 @Ancairon +docs/ @tkatsoulas @Ancairon health/ @thiagoftsm @vkalintiris @MrZammler health/health.d/ @thiagoftsm @MrZammler health/notifications/ @Ferroin @thiagoftsm @MrZammler -ml/ @andrewm4894 @vkalintiris +ml/ @vkalintiris libnetdata/ @thiagoftsm @vkalintiris packaging/ @Ferroin @tkatsoulas registry/ @novykh @@ -32,11 +32,12 @@ system/ @Ferroin @tkatsoulas tests/ @Ferroin @vkalintiris @tkatsoulas web/ @thiagoftsm @vkalintiris web/gui/ @novykh +logsmanagement/ @Dim-P @thiagoftsm # Ownership by filetype (overwrites ownership by directory) *.am @Ferroin @tkatsoulas -*.md @tkatsoulas @andrewm4894 @Ancairon -*.mdx @tkatsoulas @andrewm4894 @Ancairon +*.md @tkatsoulas @Ancairon +*.mdx @tkatsoulas @Ancairon Dockerfile* @Ferroin @tkatsoulas # Ownership of specific files diff --git a/.github/data/distros.yml b/.github/data/distros.yml index cdd0faf06f80c5..9175a5c7308731 100644 --- a/.github/data/distros.yml +++ b/.github/data/distros.yml @@ -44,11 +44,6 @@ include: support_type: Intermediate notes: '' eol_check: true - - <<: *alpine - version: "3.15" - support_type: Intermediate - notes: '' - eol_check: true - distro: archlinux version: latest @@ -60,34 +55,6 @@ include: test: ebpf-core: true - - &alma - distro: almalinux - version: "9" - support_type: Core - notes: '' - jsonc_removal: | - dnf remove -y json-c-devel - eol_check: true - packages: &alma_packages - type: rpm - repo_distro: el/9 - alt_links: - - el/9Server - - el/9Client - arches: - - x86_64 - - aarch64 - test: - ebpf-core: true - - <<: *alma - version: "8" - packages: - <<: *alma_packages - repo_distro: el/8 - alt_links: - - el/8Server - - el/8Client - - &amzn distro: amazonlinux version: "2" @@ -108,7 +75,6 @@ include: <<: *amzn_packages repo_distro: amazonlinux/2023 - - distro: centos version: "7" support_type: Core @@ -125,6 +91,30 @@ include: test: ebpf-core: false + - ¢os_stream + distro: centos-stream + base_image: 'quay.io/centos/centos:stream9' + version: '9' + support_type: 'Community' + notes: '' + jsonc_removal: | + dnf remove -y json-c-devel + eol_check: true + packages: &cs_packages + type: rpm + repo_distro: el/c9s + arches: + - x86_64 + - aarch64 + test: + ebpf-core: true + - <<: *centos_stream + version: '8' + base_image: 'quay.io/centos/centos:stream8' + packages: + <<: *cs_packages + repo_distro: el/c8s + - &debian distro: debian version: "12" @@ -165,7 +155,7 @@ include: - &fedora distro: fedora - version: "38" + version: "39" support_type: Core notes: '' eol_check: true @@ -173,12 +163,19 @@ include: dnf remove -y json-c-devel packages: &fedora_packages type: rpm - repo_distro: fedora/38 + repo_distro: fedora/39 arches: - x86_64 - aarch64 test: ebpf-core: true + - <<: *fedora + version: "38" + packages: + <<: *fedora_packages + repo_distro: fedora/38 + test: + ebpf-core: true - <<: *fedora version: "37" packages: @@ -198,7 +195,7 @@ include: zypper rm -y libjson-c-devel packages: &opensuse_packages type: rpm - repo_distro: opensuse/leap:15.5 + repo_distro: opensuse/15.5 arches: - x86_64 - aarch64 @@ -235,6 +232,36 @@ include: <<: *oracle_packages repo_distro: ol/9 + - &rocky + distro: rockylinux + version: "9" + support_type: Core + notes: '' + jsonc_removal: | + dnf remove -y json-c-devel + eol_check: true + packages: &rocky_packages + type: rpm + repo_distro: el/9 + alt_links: + - el/9Server + - el/9Client + - el/9RedHatVirtualizationHost + arches: + - x86_64 + - aarch64 + test: + ebpf-core: true + - <<: *rocky + version: "8" + packages: + <<: *rocky_packages + repo_distro: el/8 + alt_links: + - el/8Server + - el/8Client + - el/8RedHatVirtualizationHost + - &ubuntu distro: ubuntu version: "22.04" @@ -254,6 +281,11 @@ include: - arm64 test: ebpf-core: true + - <<: *ubuntu + version: "23.10" + packages: + <<: *ubuntu_packages + repo_distro: ubuntu/mantic - <<: *ubuntu version: "23.04" packages: diff --git a/.github/labeler.yml b/.github/labeler.yml index 1dd4d472a18b5a..0ea825ef4bc074 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -153,3 +153,6 @@ area/tests: area/web: - web/** + +area/logs-management: + - logsmanagement/** diff --git a/.github/scripts/check_latest_versions.py b/.github/scripts/check_latest_versions.py new file mode 100755 index 00000000000000..67b11f8d54e614 --- /dev/null +++ b/.github/scripts/check_latest_versions.py @@ -0,0 +1,33 @@ +import sys +import os +import modules.version_manipulation as ndvm +import modules.github_actions as cigh + + +def main(command_line_args): + """ + Inputs: Single version or multiple versions + Outputs: + Create files with the versions that needed update under temp_dir/staging-new-releases + Setting the GitHub outputs, 'versions_needs_update' to 'true' + """ + versions = [str(arg) for arg in command_line_args] + # Create a temp output folder for the release that need update + staging = os.path.join(os.environ.get('TMPDIR', '/tmp'), 'staging-new-releases') + os.makedirs(staging, exist_ok=True) + for version in versions: + temp_value = ndvm.compare_version_with_remote(version) + if temp_value: + path, filename = ndvm.get_release_path_and_filename(version) + release_path = os.path.join(staging, path) + os.makedirs(release_path, exist_ok=True) + file_release_path = os.path.join(release_path, filename) + with open(file_release_path, "w") as file: + print("Creating local copy of the release version update at: ", file_release_path) + file.write(version) + if cigh.run_as_github_action(): + cigh.update_github_output("versions_needs_update", "true") + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/.github/scripts/check_latest_versions_per_channel.py b/.github/scripts/check_latest_versions_per_channel.py new file mode 100644 index 00000000000000..885e5a98cc4a5c --- /dev/null +++ b/.github/scripts/check_latest_versions_per_channel.py @@ -0,0 +1,9 @@ +import check_latest_versions +import modules.version_manipulation as ndvm +import sys + +if __name__ == "__main__": + channel = sys.argv[1] + sorted_agents_by_major = ndvm.sort_and_grouby_major_agents_of_channel(channel) + latest_per_major = [values[0] for values in sorted_agents_by_major.values()] + check_latest_versions.main(latest_per_major) diff --git a/.github/scripts/ci-support-pkgs.sh b/.github/scripts/ci-support-pkgs.sh index 9ba11b68ee87f7..5cedbf3b9e02ca 100755 --- a/.github/scripts/ci-support-pkgs.sh +++ b/.github/scripts/ci-support-pkgs.sh @@ -9,7 +9,8 @@ set -e case "${ID}" in amzn|almalinux|centos|fedora) - dnf install -y procps-ng cronie cronie-anacron || yum install -y procps-ng cronie cronie-anacron + dnf install -y procps-ng cronie cronie-anacron || \ + yum install -y procps-ng cronie cronie-anacron ;; arch) pacman -S --noconfirm cronie diff --git a/.github/scripts/get-static-cache-key.sh b/.github/scripts/get-static-cache-key.sh index 3b07088f474f09..5093b332796332 100755 --- a/.github/scripts/get-static-cache-key.sh +++ b/.github/scripts/get-static-cache-key.sh @@ -2,13 +2,14 @@ arch="${1}" platform="$(packaging/makeself/uname2platform.sh "${arch}")" +builder_rev="v1" -docker pull --platform "${platform}" netdata/static-builder +docker pull --platform "${platform}" netdata/static-builder:${builder_rev} # shellcheck disable=SC2046 cat $(find packaging/makeself/jobs -type f ! -regex '.*\(netdata\|-makeself\).*') > /tmp/static-cache-key-data -docker run -it --rm --platform "${platform}" netdata/static-builder sh -c 'apk list -I 2>/dev/null' >> /tmp/static-cache-key-data +docker run -it --rm --platform "${platform}" netdata/static-builder:${builder_rev} sh -c 'apk list -I 2>/dev/null' >> /tmp/static-cache-key-data h="$(sha256sum /tmp/static-cache-key-data | cut -f 1 -d ' ')" diff --git a/.github/scripts/modules/github_actions.py b/.github/scripts/modules/github_actions.py new file mode 100644 index 00000000000000..1d653a77b01005 --- /dev/null +++ b/.github/scripts/modules/github_actions.py @@ -0,0 +1,27 @@ +import os + + +def update_github_env(key, value): + try: + env_file = os.getenv('GITHUB_ENV') + print(env_file) + with open(env_file, "a") as file: + file.write(f"{key}={value}") + print(f"Updated GITHUB_ENV with {key}={value}") + except Exception as e: + print(f"Error updating GITHUB_ENV. Error: {e}") + + +def update_github_output(key, value): + try: + env_file = os.getenv('GITHUB_OUTPUT') + print(env_file) + with open(env_file, "a") as file: + file.write(f"{key}={value}") + print(f"Updated GITHUB_OUTPUT with {key}={value}") + except Exception as e: + print(f"Error updating GITHUB_OUTPUT. Error: {e}") + + +def run_as_github_action(): + return os.environ.get('GITHUB_ACTIONS') == 'true' diff --git a/.github/scripts/modules/requirements.txt b/.github/scripts/modules/requirements.txt new file mode 100644 index 00000000000000..fbec796fa5d698 --- /dev/null +++ b/.github/scripts/modules/requirements.txt @@ -0,0 +1 @@ +PyGithub==2.1.1 diff --git a/.github/scripts/modules/version_manipulation.py b/.github/scripts/modules/version_manipulation.py new file mode 100644 index 00000000000000..cc346fb54d28da --- /dev/null +++ b/.github/scripts/modules/version_manipulation.py @@ -0,0 +1,141 @@ +import os +import re +import requests +from itertools import groupby +from github import Github +from github.GithubException import GithubException + +repos_URL = { + "stable": "netdata/netdata", + "nightly": "netdata/netdata-nightlies" +} + +GH_TOKEN = os.getenv("GH_TOKEN") +if GH_TOKEN is None or GH_TOKEN != "": + print("Token is not defined or empty, continuing with limitation on requests per sec towards Github API") + + +def identify_channel(_version): + nightly_pattern = r'v(\d+)\.(\d+)\.(\d+)-(\d+)-nightly' + stable_pattern = r'v(\d+)\.(\d+)\.(\d+)' + if re.match(nightly_pattern, _version): + _channel = "nightly" + _pattern = nightly_pattern + elif re.match(stable_pattern, _version): + _channel = "stable" + _pattern = stable_pattern + else: + print("Invalid version format.") + return None + return _channel, _pattern + + +def padded_version(item): + key_value = '10000' + for value in item[1:]: + key_value += f'{value:05}' + return int(key_value) + + +def extract_version(title): + if identify_channel(title): + _, _pattern = identify_channel(title) + try: + match = re.match(_pattern, title) + if match: + return tuple(map(int, match.groups())) + except Exception as e: + print(f"Unexpected error: {e}") + return None + + +def get_release_path_and_filename(_version): + nightly_pattern = r'v(\d+)\.(\d+)\.(\d+)-(\d+)-nightly' + stable_pattern = r'v(\d+)\.(\d+)\.(\d+)' + if match := re.match(nightly_pattern, _version): + msb = match.group(1) + _path = "nightly" + _filename = f"v{msb}" + elif match := re.match(stable_pattern, _version): + msb = match.group(1) + _path = "stable" + _filename = f"v{msb}" + else: + print("Invalid version format.") + exit(1) + return (_path, _filename) + + +def compare_version_with_remote(version): + """ + If the version = fun (version) you need to update the version in the + remote. If the version remote doesn't exist, returns the version + :param channel: any version of the agent + :return: the greater from version and version remote. + """ + + prefix = "https://packages.netdata.cloud/releases" + path, filename = get_release_path_and_filename(version) + + remote_url = f"{prefix}/{path}/{filename}" + response = requests.get(remote_url) + + if response.status_code == 200: + version_remote = response.text.rstrip() + + version_components = extract_version(version) + remote_version_components = extract_version(version_remote) + + absolute_version = padded_version(version_components) + absolute_remote_version = padded_version(remote_version_components) + + if absolute_version > absolute_remote_version: + print(f"Version in the remote: {version_remote}, is older than the current: {version}, I need to update") + return (version) + else: + print(f"Version in the remote: {version_remote}, is newer than the current: {version}, no action needed") + return (None) + else: + # Remote version not found + print(f"Version in the remote not found, updating the predefined latest path with the version: {version}") + return (version) + + +def sort_and_grouby_major_agents_of_channel(channel): + """ + Fetches the GH API and read either netdata/netdata or netdata/netdata-nightlies repo. It fetches all of their + releases implements a grouping by their major release number. + Every k,v in this dictionary is in the form; "vX": [descending ordered list of Agents in this major release]. + :param channel: "nightly" or "stable" + :return: None or dict() with the Agents grouped by major version # (vX) + """ + try: + G = Github(GH_TOKEN) + repo = G.get_repo(repos_URL[channel]) + releases = repo.get_releases() + except GithubException as e: + print(f"GitHub API request failed: {e}") + return None + + except Exception as e: + print(f"An unexpected error occurred: {e}") + return None + + extracted_titles = [extract_version(item.title) for item in releases if + extract_version(item.title) is not None] + # Necessary sorting for implement the group by + extracted_titles.sort(key=lambda x: x[0]) + # Group titles by major version + grouped_by_major = {major: list(group) for major, group in groupby(extracted_titles, key=lambda x: x[0])} + sorted_grouped_by_major = {} + for key, values in grouped_by_major.items(): + sorted_values = sorted(values, key=padded_version, reverse=True) + sorted_grouped_by_major[key] = sorted_values + # Transform them in the correct form + if channel == "stable": + result_dict = {f"v{key}": [f"v{a}.{b}.{c}" for a, b, c in values] for key, values in + sorted_grouped_by_major.items()} + else: + result_dict = {f"v{key}": [f"v{a}.{b}.{c}-{d}-nightly" for a, b, c, d in values] for key, values in + sorted_grouped_by_major.items()} + return result_dict diff --git a/.github/scripts/pkg-test.sh b/.github/scripts/pkg-test.sh index 85e8b2e8d2cdaa..35767bf2ef2e4c 100755 --- a/.github/scripts/pkg-test.sh +++ b/.github/scripts/pkg-test.sh @@ -14,7 +14,9 @@ install_debian_like() { # Install Netdata # Strange quoting is required here so that glob matching works. - apt-get install -y $(find /netdata/artifacts -type f -name 'netdata*.deb' ! -name '*dbgsym*' ! -name '*cups*' ! -name '*freeipmi*') || exit 3 + # shellcheck disable=SC2046 + apt-get install -y $(find /netdata/artifacts -type f -name 'netdata*.deb' \ +! -name '*dbgsym*' ! -name '*cups*' ! -name '*freeipmi*') || exit 3 # Install testing tools apt-get install -y --no-install-recommends curl "${netcat}" jq || exit 1 @@ -32,10 +34,10 @@ install_fedora_like() { # Install Netdata # Strange quoting is required here so that glob matching works. - "$PKGMGR" install -y /netdata/artifacts/netdata*.rpm || exit 1 + "${PKGMGR}" install -y /netdata/artifacts/netdata*.rpm || exit 1 # Install testing tools - "$PKGMGR" install -y curl nc jq || exit 1 + "${PKGMGR}" install -y curl nc jq || exit 1 } install_centos() { @@ -49,15 +51,15 @@ install_centos() { fi # Install EPEL (needed for `jq` - "$PKGMGR" install -y epel-release || exit 1 + "${PKGMGR}" install -y epel-release || exit 1 # Install Netdata # Strange quoting is required here so that glob matching works. - "$PKGMGR" install -y /netdata/artifacts/netdata*.rpm || exit 1 + "${PKGMGR}" install -y /netdata/artifacts/netdata*.rpm || exit 1 # Install testing tools # shellcheck disable=SC2086 - "$PKGMGR" install -y ${opts} curl nc jq || exit 1 + "${PKGMGR}" install -y ${opts} curl nc jq || exit 1 } install_amazon_linux() { @@ -69,11 +71,11 @@ install_amazon_linux() { # Install Netdata # Strange quoting is required here so that glob matching works. - "$PKGMGR" install -y /netdata/artifacts/netdata*.rpm || exit 1 + "${PKGMGR}" install -y /netdata/artifacts/netdata*.rpm || exit 1 # Install testing tools # shellcheck disable=SC2086 - "$PKGMGR" install -y ${opts} curl nc jq || exit 1 + "${PKGMGR}" install -y ${opts} curl nc jq || exit 1 } install_suse_like() { @@ -130,7 +132,7 @@ case "${DISTRO}" in fedora | oraclelinux) install_fedora_like ;; - centos | rockylinux | almalinux) + centos| centos-stream | rockylinux | almalinux) install_centos ;; amazonlinux) diff --git a/.github/scripts/upload-new-version-tags.sh b/.github/scripts/upload-new-version-tags.sh new file mode 100755 index 00000000000000..a9b0cd303099dc --- /dev/null +++ b/.github/scripts/upload-new-version-tags.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +host="packages.netdata.cloud" +user="netdatabot" + +prefix="/var/www/html/releases" +staging="${TMPDIR:-/tmp}/staging-new-releases" + +mkdir -p "${staging}" + +for source_dir in "${staging}"/*; do + if [ -d "${source_dir}" ]; then + base_name=$(basename "${source_dir}") + scp -r "${source_dir}"/* "${user}@${host}:${prefix}/${base_name}" + fi +done diff --git a/.github/workflows/build-dummy.yml b/.github/workflows/build-dummy.yml deleted file mode 100644 index 6bf327e2d09c00..00000000000000 --- a/.github/workflows/build-dummy.yml +++ /dev/null @@ -1,127 +0,0 @@ ---- -# Ci code for building release artifacts. -# -# This workflow exists so we can require these checks to pass, but skip -# them on PRs that have nothing to do with the source code. -name: Build -on: - pull_request: # PR checks only validate the build and generate artifacts for testing. - paths-ignore: # This MUST be kept in-sync with the paths-ignore key for the build-dummy.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - '!netdata.spec.in' - - 'configure.ac' - - 'netdata-installer.sh' - - '**/Makefile*' - - 'Makefile*' - - '.github/workflows/build.yml' - - '.github/scripts/build-static.sh' - - '.github/scripts/get-static-cache-key.sh' - - '.github/scripts/gen-matrix-build.py' - - '.github/scripts/run-updater-check.sh' - - 'build/**' - - 'packaging/makeself/**' - - 'packaging/installer/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' - - '!**.md' -concurrency: # This keeps multiple instances of the job from running concurrently for the same ref and event type. - group: build-${{ github.ref }}-${{ github.event_name }} - cancel-in-progress: true -jobs: - build-dist: # Build the distribution tarball and store it as an artifact. - name: Build Distribution Tarball - runs-on: ubuntu-latest - steps: - - run: echo 'NOT REQUIRED' - - build-static: # Build the static binary archives, and store them as artifacts. - name: Build Static - runs-on: ubuntu-latest - strategy: - matrix: - arch: - - x86_64 - - armv7l - - aarch64 - - ppc64le - steps: - - run: echo 'NOT REQUIRED' - - matrix: # Generate the shared build matrix for our build tests. - name: Prepare Build Matrix - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - steps: - - name: Checkout - id: checkout - uses: actions/checkout@v3 - - name: Prepare tools - id: prepare - run: | - sudo apt-get update && sudo apt-get install -y python3-ruamel.yaml - - name: Read build matrix - id: set-matrix - run: | - matrix="$(.github/scripts/gen-matrix-build.py)" - echo "Generated matrix: ${matrix}" - echo "matrix=${matrix}" >> "${GITHUB_OUTPUT}" - - prepare-test-images: # Prepare the test environments for our build checks. This also checks dependency handling code for each tested environment. - name: Prepare Test Environments - runs-on: ubuntu-latest - needs: - - matrix - env: - RETRY_DELAY: 300 - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} - steps: - - run: echo 'NOT REQUIRED' - - source-build: # Test various source build arrangements. - name: Test Source Build - runs-on: ubuntu-latest - needs: - - matrix - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} - steps: - - run: echo 'NOT REQUIRED' - - updater-check: # Test the generated dist archive using the updater code. - name: Test Generated Distfile and Updater Code - runs-on: ubuntu-latest - needs: - - matrix - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} - steps: - - run: echo 'NOT REQUIRED' - - prepare-upload: # Consolidate the artifacts for uploading or releasing. - name: Prepare Artifacts - runs-on: ubuntu-latest - steps: - - run: echo 'NOT REQUIRED' - - artifact-verification-dist: # Verify the regular installer works with the consolidated artifacts. - name: Test Consolidated Artifacts (Source) - runs-on: ubuntu-latest - steps: - - run: echo 'NOT REQUIRED' - - artifact-verification-static: # Verify the static installer works with the consolidated artifacts. - name: Test Consolidated Artifacts (Static) - runs-on: ubuntu-latest - steps: - - run: echo 'NOT REQUIRED' diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 180574a3c299fa..4a6debc4696857 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,31 +5,7 @@ on: push: # Master branch checks only validate the build and generate artifacts for testing. branches: - master - pull_request: # PR checks only validate the build and generate artifacts for testing. - paths: # This MUST be kept in-sync with the paths-ignore key for the build-dummy.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - '!netdata.spec.in' - - 'configure.ac' - - 'netdata-installer.sh' - - '**/Makefile*' - - 'Makefile*' - - '.github/workflows/build.yml' - - '.github/scripts/build-static.sh' - - '.github/scripts/get-static-cache-key.sh' - - '.github/scripts/gen-matrix-build.py' - - '.github/scripts/run-updater-check.sh' - - 'build/**' - - 'packaging/makeself/**' - - 'packaging/installer/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' - - '!**.md' + pull_request: null # PR checks only validate the build and generate artifacts for testing. workflow_dispatch: # Dispatch runs build and validate, then push to the appropriate storage location. inputs: type: @@ -44,30 +20,90 @@ concurrency: # This keeps multiple instances of the job from running concurrentl group: build-${{ github.ref }}-${{ github.event_name }} cancel-in-progress: true jobs: + file-check: # Check what files changed if we’re being run in a PR or on a push. + name: Check Modified Files + runs-on: ubuntu-latest + outputs: + run: ${{ steps.check-run.outputs.run }} + steps: + - name: Checkout + id: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + - name: Check files + id: check-files + uses: tj-actions/changed-files@v40 + with: + since_last_remote_commit: ${{ github.event_name != 'pull_request' }} + files: | + **.c + **.cc + **.h + **.hh + **.in + configure.ac + netdata-installer.sh + **/Makefile* + Makefile* + .github/data/distros.yml + .github/workflows/build.yml + .github/scripts/build-static.sh + .github/scripts/get-static-cache-key.sh + .github/scripts/gen-matrix-build.py + .github/scripts/run-updater-check.sh + build/** + packaging/makeself/** + packaging/installer/** + aclk/aclk-schemas/ + ml/dlib/ + mqtt_websockets + web/server/h2o/libh2o + files_ignore: | + netdata.spec.in + **.md + - name: Check Run + id: check-run + run: | + if [ "${{ steps.check-files.outputs.any_modified }}" == "true" ] || [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + echo 'run=true' >> "${GITHUB_OUTPUT}" + else + echo 'run=false' >> "${GITHUB_OUTPUT}" + fi + build-dist: # Build the distribution tarball and store it as an artifact. name: Build Distribution Tarball runs-on: ubuntu-latest + needs: + - file-check outputs: distfile: ${{ steps.build.outputs.distfile }} steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: fetch-depth: 0 submodules: recursive - name: Fix tags id: fix-tags - if: github.event_name != 'push' + if: github.event_name != 'push' && needs.file-check.outputs.run == 'true' run: | git fetch --tags --force - name: Mark Stable id: channel - if: github.event_name == 'workflow_dispatch' && github.event.inputs.type != 'nightly' + if: github.event_name == 'workflow_dispatch' && github.event.inputs.type != 'nightly' && needs.file-check.outputs.run == 'true' run: | sed -i 's/^RELEASE_CHANNEL="nightly"/RELEASE_CHANNEL="stable"/' netdata-installer.sh - name: Build id: build + if: needs.file-check.outputs.run == 'true' run: | git describe mkdir -p artifacts @@ -85,6 +121,7 @@ jobs: cp netdata-*.tar.gz artifacts/ - name: Store id: store + if: needs.file-check.outputs.run == 'true' uses: actions/upload-artifact@v3 with: name: dist-tarball @@ -112,11 +149,14 @@ jobs: && startsWith(github.ref, 'refs/heads/master') && github.event_name != 'pull_request' && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} build-static: # Build the static binary archives, and store them as artifacts. name: Build Static runs-on: ubuntu-latest + needs: + - file-check strategy: matrix: arch: @@ -125,38 +165,43 @@ jobs: - aarch64 - ppc64le steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: fetch-depth: 0 submodules: recursive - name: Fix tags id: fix-tags - if: github.event_name != 'push' + if: github.event_name != 'push' && needs.file-check.outputs.run == 'true' run: | git fetch --tags --force - name: Mark Stable id: channel - if: github.event_name == 'workflow_dispatch' && github.event.inputs.type != 'nightly' + if: github.event_name == 'workflow_dispatch' && github.event.inputs.type != 'nightly' && needs.file-check.outputs.run == 'true' run: | sed -i 's/^RELEASE_CHANNEL="nightly"/RELEASE_CHANNEL="stable"/' netdata-installer.sh packaging/makeself/install-or-update.sh - name: Get Cache Key - if: github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'run-ci/no-cache') + if: (github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'run-ci/no-cache')) && needs.file-check.outputs.run == 'true' id: cache-key run: .github/scripts/get-static-cache-key.sh ${{ matrix.arch }} "${{ contains(github.event.pull_request.labels.*.name, 'run-ci/no-cache') }}" - name: Cache - if: github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'run-ci/no-cache') + if: (github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'run-ci/no-cache')) && needs.file-check.outputs.run == 'true' id: cache uses: actions/cache@v3 with: path: artifacts/cache key: ${{ steps.cache-key.outputs.key }} - name: Build - if: github.event_name != 'workflow_dispatch' # Don’t use retries on PRs. + if: github.event_name != 'workflow_dispatch' && needs.file-check.outputs.run == 'true' # Don’t use retries on PRs. run: .github/scripts/build-static.sh ${{ matrix.arch }} - name: Build - if: github.event_name == 'workflow_dispatch' + if: github.event_name == 'workflow_dispatch' && needs.file-check.outputs.run == 'true' id: build uses: nick-fields/retry@v2 with: @@ -165,6 +210,7 @@ jobs: command: .github/scripts/build-static.sh ${{ matrix.arch }} - name: Store id: store + if: needs.file-check.outputs.run == 'true' uses: actions/upload-artifact@v3 with: name: static-archive @@ -192,6 +238,7 @@ jobs: && startsWith(github.ref, 'refs/heads/master') && github.event_name != 'pull_request' && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} matrix: # Generate the shared build matrix for our build tests. @@ -203,7 +250,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare tools id: prepare run: | @@ -252,13 +299,13 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build test environment id: build1 - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 continue-on-error: true # We retry 3 times at 5 minute intervals if there is a failure here. with: push: false @@ -276,7 +323,7 @@ jobs: - name: Build test environment (attempt 2) if: ${{ steps.build1.outcome == 'failure' }} id: build2 - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 continue-on-error: true # We retry 3 times at 5 minute intervals if there is a failure here. with: push: false @@ -294,7 +341,7 @@ jobs: - name: Build test environment (attempt 3) if: ${{ steps.build1.outcome == 'failure' && steps.build2.outcome == 'failure' }} id: build3 - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: push: false load: false @@ -344,42 +391,53 @@ jobs: needs: - matrix - prepare-test-images + - file-check strategy: fail-fast: false max-parallel: 8 matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: submodules: recursive - name: Fetch test environment id: fetch + if: needs.file-check.outputs.run == 'true' uses: actions/download-artifact@v3 with: name: ${{ matrix.artifact_key }}-test-env - name: Load test environment id: load + if: needs.file-check.outputs.run == 'true' run: docker load --input image.tar - name: Regular build on ${{ matrix.distro }} id: build-basic + if: needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ /bin/sh -c 'autoreconf -ivf && ./configure --disable-dependency-tracking && make -j2' - name: netdata-installer on ${{ matrix.distro }}, disable cloud id: build-no-cloud + if: needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --disable-cloud --one-time-build' - name: netdata-installer on ${{ matrix.distro }}, require cloud id: build-cloud + if: needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --require-cloud --one-time-build' - name: netdata-installer on ${{ matrix.distro }}, require cloud, no JSON-C id: build-no-jsonc - if: matrix.jsonc_removal != '' + if: matrix.jsonc_removal != '' && needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ /bin/sh -c '/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --require-cloud --one-time-build' @@ -407,6 +465,7 @@ jobs: && startsWith(github.ref, 'refs/heads/master') && github.event_name != 'pull_request' && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} updater-check: # Test the generated dist archive using the updater code. @@ -417,6 +476,7 @@ jobs: - build-dist - matrix - prepare-test-images + - file-check strategy: fail-fast: false max-parallel: 8 @@ -429,17 +489,24 @@ jobs: volumes: - ${{ github.workspace }}:/usr/local/apache2/htdocs/ steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 - name: Fetch dist tarball artifacts id: fetch-tarball + if: needs.file-check.outputs.run == 'true' uses: actions/download-artifact@v3 with: name: dist-tarball path: dist-tarball - name: Prepare artifact directory id: prepare + if: needs.file-check.outputs.run == 'true' run: | mkdir -p artifacts/download/latest || exit 1 echo "9999.0.0-0" > artifacts/download/latest/latest-version.txt || exit 1 @@ -450,14 +517,17 @@ jobs: cat sha256sums.txt - name: Fetch test environment id: fetch-test-environment + if: needs.file-check.outputs.run == 'true' uses: actions/download-artifact@v3 with: name: ${{ matrix.artifact_key }}-test-env - name: Load test environment id: load + if: needs.file-check.outputs.run == 'true' run: docker load --input image.tar - name: Install netdata and run the updater on ${{ matrix.distro }} id: updater-check + if: needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -e DISABLE_TELEMETRY=1 --network host -w /netdata test:${{ matrix.artifact_key }} \ /netdata/.github/scripts/run-updater-check.sh @@ -484,6 +554,7 @@ jobs: && startsWith(github.ref, 'refs/heads/master') && github.event_name != 'pull_request' && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} prepare-upload: # Consolidate the artifacts for uploading or releasing. @@ -492,27 +563,37 @@ jobs: needs: - build-dist - build-static + - file-check steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 - name: Prepare Environment id: prepare + if: needs.file-check.outputs.run == 'true' run: mkdir -p artifacts - name: Retrieve Dist Tarball id: fetch-dist + if: needs.file-check.outputs.run == 'true' uses: actions/download-artifact@v3 with: name: dist-tarball path: dist-tarball - name: Retrieve Static Build Artifacts id: fetch-static + if: needs.file-check.outputs.run == 'true' uses: actions/download-artifact@v3 with: name: static-archive path: static-archive - name: Prepare Artifacts id: consolidate + if: needs.file-check.outputs.run == 'true' working-directory: ./artifacts/ run: | mv ../dist-tarball/* . || exit 1 @@ -524,6 +605,7 @@ jobs: cat sha256sums.txt - name: Store Artifacts id: store + if: needs.file-check.outputs.run == 'true' uses: actions/upload-artifact@v3 with: name: final-artifacts @@ -552,6 +634,7 @@ jobs: && startsWith(github.ref, 'refs/heads/master') && github.event_name != 'pull_request' && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} artifact-verification-dist: # Verify the regular installer works with the consolidated artifacts. @@ -559,6 +642,7 @@ jobs: runs-on: ubuntu-latest needs: - prepare-upload + - file-check services: apache: # This gets used to serve the dist tarball for the updater script. image: httpd:2.4 @@ -567,22 +651,30 @@ jobs: volumes: - ${{ github.workspace }}:/usr/local/apache2/htdocs/ steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 - name: Fetch artifacts id: fetch + if: needs.file-check.outputs.run == 'true' uses: actions/download-artifact@v3 with: name: final-artifacts path: artifacts - name: Prepare artifacts directory id: prepare + if: needs.file-check.outputs.run == 'true' run: | mkdir -p download/latest mv artifacts/* download/latest - name: Verify that artifacts work with installer id: verify + if: needs.file-check.outputs.run == 'true' env: NETDATA_TARBALL_BASEURL: http://localhost:8080/ run: packaging/installer/kickstart.sh --build-only --dont-start-it --disable-telemetry --dont-wait @@ -606,6 +698,7 @@ jobs: && startsWith(github.ref, 'refs/heads/master') && github.event_name != 'pull_request' && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} artifact-verification-static: # Verify the static installer works with the consolidated artifacts. @@ -613,6 +706,7 @@ jobs: runs-on: ubuntu-latest needs: - prepare-upload + - file-check services: apache: # This gets used to serve the static archives. image: httpd:2.4 @@ -621,22 +715,30 @@ jobs: volumes: - ${{ github.workspace }}:/usr/local/apache2/htdocs/ steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 - name: Fetch artifacts id: fetch-artifacts + if: needs.file-check.outputs.run == 'true' uses: actions/download-artifact@v3 with: name: final-artifacts path: artifacts - name: Prepare artifacts directory id: prepare + if: needs.file-check.outputs.run == 'true' run: | mkdir -p download/latest mv artifacts/* download/latest - name: Verify that artifacts work with installer id: verify + if: needs.file-check.outputs.run == 'true' env: NETDATA_TARBALL_BASEURL: http://localhost:8080/ run: packaging/installer/kickstart.sh --static-only --dont-start-it --disable-telemetry @@ -660,6 +762,7 @@ jobs: && startsWith(github.ref, 'refs/heads/master') && github.event_name != 'pull_request' && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} upload-nightly: # Upload the nightly build artifacts to GCS. @@ -725,12 +828,12 @@ jobs: steps: - name: Checkout Main Repo id: checkout-main - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: main - name: Checkout Nightly Repo id: checkout-nightly - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: repository: netdata/netdata-nightlies path: nightlies @@ -762,6 +865,37 @@ jobs: makeLatest: true tag: ${{ steps.version.outputs.version }} token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} + - name: Checkout netdata main Repo # Checkout back to netdata/netdata repo to the update latest packaged versions + id: checkout-netdata + uses: actions/checkout@v4 + with: + token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} + - name: Init python environment for publish release metadata + uses: actions/setup-python@v4 + id: init-python + with: + python-version: "3.12" + - name: Setup python environment + id: setup-python + run: | + pip install -r .github/scripts/modules/requirements.txt + - name: Check if the version is latest and published + id: check-latest-version + run: | + python .github/scripts/check_latest_versions.py ${{ steps.version.outputs.version }} + - name: SSH setup + id: ssh-setup + if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' && steps.check-latest-version.outputs.versions_needs_update == 'true' + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.NETDATABOT_PACKAGES_SSH_KEY }} + name: id_ecdsa + known_hosts: ${{ secrets.PACKAGES_KNOWN_HOSTS }} + - name: Sync newer releases + id: sync-releases + if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' && steps.check-latest-version.outputs.versions_needs_update == 'true' + run: | + .github/scripts/upload-new-version-tags.sh - name: Failure Notification uses: rtCamp/action-slack-notify@v2 env: @@ -777,6 +911,12 @@ jobs: Fetch artifacts: ${{ steps.fetch.outcome }} Prepare version info: ${{ steps.version.outcome }} Create release: ${{ steps.create-release.outcome }} + Checkout back netdata/netdata: ${{ steps.checkout-netdata.outcome }} + Init python environment: ${{ steps.init-python.outcome }} + Setup python environment: ${{ steps.setup-python.outcome }} + Check the nearly published release against the advertised: ${{ steps.check-latest-version.outcome }} + Setup ssh: ${{ steps.ssh-setup.outcome }} + Sync with the releases: ${{ steps.sync-releases.outcome }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} if: >- ${{ @@ -811,7 +951,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Retrieve Artifacts id: fetch uses: actions/download-artifact@v3 diff --git a/.github/workflows/checks-dummy.yml b/.github/workflows/checks-dummy.yml deleted file mode 100644 index 369d70ff91d15b..00000000000000 --- a/.github/workflows/checks-dummy.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: Checks -on: - pull_request: - paths-ignore: # This MUST be kept in sync with the paths key for the checks.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - '!netdata.spec.in' - - 'configure.ac' - - '**/Makefile*' - - 'Makefile*' - - '.gitignore' - - '.github/workflows/checks.yml' - - 'build/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' -env: - DISABLE_TELEMETRY: 1 -concurrency: - group: checks-${{ github.ref }} - cancel-in-progress: true -jobs: - libressl-checks: - name: LibreSSL - runs-on: ubuntu-latest - steps: - - run: "echo 'NOT REQUIRED'" - clang-checks: - name: Clang - runs-on: ubuntu-latest - steps: - - run: "echo 'NOT REQUIRED'" - gitignore-check: - name: .gitignore - runs-on: ubuntu-latest - steps: - - run: "echo 'NOT REQUIRED'" diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 4c892ffce80898..1308f45fa59721 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -2,58 +2,77 @@ name: Checks on: push: - paths: - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - '!netdata.spec.in' - - 'configure.ac' - - '**/Makefile*' - - 'Makefile*' - - '.gitignore' - - '.github/workflows/checks.yml' - - 'build/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' branches: - master - pull_request: - paths: # This MUST be kept in-sync with the paths-ignore key for the checks-dummy.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - '!netdata.spec.in' - - 'configure.ac' - - '**/Makefile*' - - 'Makefile*' - - '.gitignore' - - '.github/workflows/checks.yml' - - 'build/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' + pull_request: null env: DISABLE_TELEMETRY: 1 concurrency: group: checks-${{ github.ref }} cancel-in-progress: true jobs: + file-check: # Check what files changed if we’re being run in a PR or on a push. + name: Check Modified Files + runs-on: ubuntu-latest + outputs: + run: ${{ steps.check-run.outputs.run }} + steps: + - name: Checkout + id: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + - name: Check files + id: check-files + uses: tj-actions/changed-files@v40 + with: + since_last_remote_commit: ${{ github.event_name != 'pull_request' }} + files: | + **.c + **.cc + **.h + **.hh + **.in + configure.ac + **/Makefile* + Makefile* + .gitignore + .github/workflows/checks.yml + build/** + aclk/aclk-schemas/ + ml/dlib/ + mqtt_websockets + web/server/h2o/libh2o + files_ignore: | + netdata.spec.in + **.md + - name: Check Run + id: check-run + run: | + if [ "${{ steps.check-files.outputs.any_modified }}" == "true" ] || [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + echo 'run=true' >> "${GITHUB_OUTPUT}" + else + echo 'run=false' >> "${GITHUB_OUTPUT}" + fi + libressl-checks: name: LibreSSL + needs: + - file-check runs-on: ubuntu-latest steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: submodules: recursive - name: Build + if: needs.file-check.outputs.run == 'true' run: > docker run -v "$PWD":/netdata -w /netdata alpine:latest /bin/sh -c 'apk add bash; @@ -63,30 +82,49 @@ jobs: autoreconf -ivf; ./configure --disable-dependency-tracking; make;' + clang-checks: name: Clang + needs: + - file-check runs-on: ubuntu-latest steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: submodules: recursive - name: Build - run: | - docker build -f .github/dockerfiles/Dockerfile.clang . + if: needs.file-check.outputs.run == 'true' + run: docker build -f .github/dockerfiles/Dockerfile.clang . + gitignore-check: name: .gitignore + needs: + - file-check runs-on: ubuntu-latest steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: submodules: recursive - name: Prepare environment + if: needs.file-check.outputs.run == 'true' run: ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata - name: Build netdata + if: needs.file-check.outputs.run == 'true' run: ./netdata-installer.sh --dont-start-it --disable-telemetry --dont-wait --install-prefix /tmp/install --one-time-build - name: Check that repo is clean + if: needs.file-check.outputs.run == 'true' run: | git status --porcelain=v1 > /tmp/porcelain if [ -s /tmp/porcelain ]; then diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 174f650eacf01f..ae5818afc05cf2 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,7 +23,7 @@ jobs: python: ${{ steps.python.outputs.run }} steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -76,7 +76,7 @@ jobs: security-events: write steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -103,7 +103,7 @@ jobs: security-events: write steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 8a1ee2486e997d..eb68c302b2ffec 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 id: checkout with: submodules: recursive diff --git a/.github/workflows/dashboard-pr.yml b/.github/workflows/dashboard-pr.yml index ac414da10920fa..f02cfb69dd036a 100644 --- a/.github/workflows/dashboard-pr.yml +++ b/.github/workflows/dashboard-pr.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Update Files id: update run: | diff --git a/.github/workflows/docker-dummy.yml b/.github/workflows/docker-dummy.yml deleted file mode 100644 index 64131dac569fdc..00000000000000 --- a/.github/workflows/docker-dummy.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -name: Docker -on: - pull_request: - paths-ignore: # This MUST be kept in-sync with the paths key for the dummy.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - '!netdata.spec.in' - - '.dockerignore' - - 'configure.ac' - - 'netdata-installer.sh' - - '**/Makefile*' - - 'Makefile*' - - '.github/workflows/docker.yml' - - '.github/scripts/docker-test.sh' - - 'build/**' - - 'packaging/docker/**' - - 'packaging/installer/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' - - '!**.md' -env: - DISABLE_TELEMETRY: 1 -concurrency: - group: docker-${{ github.ref }}-${{ github.event_name }} - cancel-in-progress: true -jobs: - docker-test: - name: Docker Runtime Test - runs-on: ubuntu-latest - steps: - - run: echo 'NOT REQUIRED' - - docker-ci: - name: Docker Alt Arch Builds - needs: docker-test - runs-on: ubuntu-latest - strategy: - matrix: - platforms: - - linux/i386 - - linux/arm/v7 - - linux/arm64 - - linux/ppc64le - steps: - - run: echo 'NOT REQUIRED' diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index aad83ced578767..b7fe0a8666fabb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -4,29 +4,7 @@ on: push: branches: - master - pull_request: - paths: # This MUST be kept in-sync with the paths-ignore key for the docker-dummy.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - '!netdata.spec.in' - - '.dockerignore' - - 'configure.ac' - - 'netdata-installer.sh' - - '**/Makefile*' - - 'Makefile*' - - '.github/workflows/docker.yml' - - '.github/scripts/docker-test.sh' - - 'build/**' - - 'packaging/docker/**' - - 'packaging/installer/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' - - '!**.md' + pull_request: null workflow_dispatch: inputs: version: @@ -39,27 +17,86 @@ concurrency: group: docker-${{ github.ref }}-${{ github.event_name }} cancel-in-progress: true jobs: + file-check: # Check what files changed if we’re being run in a PR or on a push. + name: Check Modified Files + runs-on: ubuntu-latest + outputs: + run: ${{ steps.check-run.outputs.run }} + steps: + - name: Checkout + id: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + - name: Check files + id: file-check + uses: tj-actions/changed-files@v40 + with: + since_last_remote_commit: ${{ github.event_name != 'pull_request' }} + files: | + **.c + **.cc + **.h + **.hh + **.in + .dockerignore + configure.ac + netdata-installer.sh + **/Makefile* + Makefile* + .github/workflows/docker.yml + .github/scripts/docker-test.sh + build/** + packaging/docker/** + packaging/installer/** + aclk/aclk-schemas/ + ml/dlib/ + mqtt_websockets + web/server/h2o/libh2o + files_ignore: | + netdata.spec.in + **.md + - name: Check Run + id: check-run + run: | + if [ "${{ steps.file-check.outputs.any_modified }}" == "true" ] || [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + echo 'run=true' >> "${GITHUB_OUTPUT}" + else + echo 'run=false' >> "${GITHUB_OUTPUT}" + fi + docker-test: name: Docker Runtime Test + needs: + - file-check runs-on: ubuntu-latest steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: submodules: recursive - name: Setup Buildx id: prepare - uses: docker/setup-buildx-action@v2 + if: needs.file-check.outputs.run == 'true' + uses: docker/setup-buildx-action@v3 - name: Test Build id: build - uses: docker/build-push-action@v4 + if: needs.file-check.outputs.run == 'true' + uses: docker/build-push-action@v5 with: load: true push: false tags: netdata/netdata:test - name: Test Image id: test + if: needs.file-check.outputs.run == 'true' run: .github/scripts/docker-test.sh - name: Failure Notification uses: rtCamp/action-slack-notify@v2 @@ -82,12 +119,15 @@ jobs: && github.event_name != 'pull_request' && startsWith(github.ref, 'refs/heads/master') && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} docker-ci: if: github.event_name != 'workflow_dispatch' name: Docker Alt Arch Builds - needs: docker-test + needs: + - docker-test + - file-check runs-on: ubuntu-latest strategy: matrix: @@ -97,21 +137,28 @@ jobs: - linux/arm64 - linux/ppc64le steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: submodules: recursive - name: Setup QEMU id: qemu - if: matrix.platforms != 'linux/i386' - uses: docker/setup-qemu-action@v2 + if: matrix.platforms != 'linux/i386' && needs.file-check.outputs.run == 'true' + uses: docker/setup-qemu-action@v3 - name: Setup Buildx id: buildx - uses: docker/setup-buildx-action@v2 + if: needs.file-check.outputs.run == 'true' + uses: docker/setup-buildx-action@v3 - name: Build id: build - uses: docker/build-push-action@v4 + if: needs.file-check.outputs.run == 'true' + uses: docker/build-push-action@v5 with: platforms: ${{ matrix.platforms }} load: false @@ -138,6 +185,7 @@ jobs: && github.event_name != 'pull_request' && startsWith(github.ref, 'refs/heads/master') && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} normalize-tag: # Fix the release tag if needed @@ -166,7 +214,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Determine which tags to use @@ -186,21 +234,21 @@ jobs: run: echo "OFFICIAL_IMAGE=true" >> "${GITHUB_ENV}" - name: Setup QEMU id: qemu - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Setup Buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Docker Hub Login id: docker-hub-login if: github.repository == 'netdata/netdata' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_PASSWORD }} - name: GitHub Container Registry Login id: ghcr-login if: github.repository == 'netdata/netdata' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -208,14 +256,14 @@ jobs: - name: Quay.io Login id: quay-login if: github.repository == 'netdata/netdata' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: quay.io username: ${{ secrets.NETDATABOT_QUAY_USERNAME }} password: ${{ secrets.NETDATABOT_QUAY_TOKEN }} - name: Docker Build id: build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: platforms: linux/amd64,linux/i386,linux/arm/v7,linux/arm64,linux/ppc64le push: ${{ github.repository == 'netdata/netdata' }} @@ -278,7 +326,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Determine which tags to use @@ -298,21 +346,21 @@ jobs: run: echo "OFFICIAL_IMAGE=true" >> "${GITHUB_ENV}" - name: Setup QEMU id: qemu - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Setup Buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Docker Hub Login id: docker-hub-login if: github.repository == 'netdata/netdata' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_PASSWORD }} - name: GitHub Container Registry Login id: ghcr-login if: github.repository == 'netdata/netdata' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -320,14 +368,14 @@ jobs: - name: Quay.io Login id: quay-login if: github.repository == 'netdata/netdata' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: quay.io username: ${{ secrets.NETDATABOT_QUAY_USERNAME }} password: ${{ secrets.NETDATABOT_QUAY_TOKEN }} - name: Docker Build id: build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: platforms: linux/amd64,linux/i386,linux/arm/v7,linux/arm64,linux/ppc64le push: ${{ github.repository == 'netdata/netdata' }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 69fda40c3ee706..a0554b16779383 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Run link check diff --git a/.github/workflows/generate-integrations.yml b/.github/workflows/generate-integrations.yml index 599cefbc0b88c1..4128e992561bd2 100644 --- a/.github/workflows/generate-integrations.yml +++ b/.github/workflows/generate-integrations.yml @@ -1,6 +1,5 @@ --- -# CI workflow used to regenerate `integrations/integrations.js` when -# relevant source files are changed. +# CI workflow used to regenerate `integrations/integrations.js` and accompanying documentation when relevant source files are changed. name: Generate Integrations on: push: @@ -28,7 +27,7 @@ jobs: steps: - name: Checkout Agent id: checkout-agent - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 submodules: recursive @@ -37,7 +36,7 @@ jobs: run: echo "go_ref=$(cat packaging/go.d.version)" >> "${GITHUB_ENV}" - name: Checkout Go id: checkout-go - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 path: go.d.plugin @@ -55,6 +54,14 @@ jobs: run: | source ./virtualenv/bin/activate python3 integrations/gen_integrations.py + - name: Generate Integrations Documentation + id: generate-integrations-documentation + run: | + python3 integrations/gen_docs_integrations.py + - name: Generate collectors/COLLECTORS.md + id: generate-collectors-md + run: | + python3 integrations/gen_doc_collector_page.py - name: Clean Up Temporary Data id: clean run: rm -rf go.d.plugin virtualenv @@ -67,7 +74,7 @@ jobs: branch: integrations-regen title: Regenerate integrations.js body: | - Regenerate `integrations/integrations.js` based on the + Regenerate `integrations/integrations.js`, and documentation based on the latest code. This PR was auto-generated by @@ -87,6 +94,8 @@ jobs: Checkout Go: ${{ steps.checkout-go.outcome }} Prepare Dependencies: ${{ steps.prep-deps.outcome }} Generate Integrations: ${{ steps.generate.outcome }} + Generate Integrations Documentation: ${{ steps.generate-integrations-documentation.outcome }} + Generate collectors/COLLECTORS.md: ${{ steps.generate-collectors-md.outcome }} Clean Up Temporary Data: ${{ steps.clean.outcome }} Create PR: ${{ steps.create-pr.outcome }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/monitor-releases.yml b/.github/workflows/monitor-releases.yml new file mode 100644 index 00000000000000..649cf68aab7bac --- /dev/null +++ b/.github/workflows/monitor-releases.yml @@ -0,0 +1,72 @@ +--- +name: Monitor-releases + +on: + release: + types: [released, deleted] + workflow_dispatch: + inputs: + channel: + description: 'Specify the release channel' + required: true + default: 'stable' + + +concurrency: # This keeps multiple instances of the job from running concurrently for the same ref and event type. + group: monitor-{{ github.event.inputs.channel }}-releases-${{ github.ref }}-${{ github.event_name }} + cancel-in-progress: true + +jobs: + update-stable-agents-metadata: + name: update-stable-agents-metadata + runs-on: ubuntu-latest + steps: + - name: Checkout + id: checkout + uses: actions/checkout@v4 + with: + token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} + - name: Init python environment + uses: actions/setup-python@v4 + id: init-python + with: + python-version: "3.12" + - name: Setup python environment + id: setup-python + run: | + pip install -r .github/scripts/modules/requirements.txt + - name: Check for newer versions + id: check-newer-releases + run: | + python .github/scripts/check_latest_versions_per_channel.py "${{ github.event.inputs.channel }}" + - name: SSH setup + id: ssh-setup + if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' && steps.check-newer-releases.outputs.versions_needs_update == 'true' + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.NETDATABOT_PACKAGES_SSH_KEY }} + name: id_ecdsa + known_hosts: ${{ secrets.PACKAGES_KNOWN_HOSTS }} + - name: Sync newer releases + id: sync-releases + if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' && steps.check-newer-releases.outputs.versions_needs_update == 'true' + run: | + .github/scripts/upload-new-version-tags.sh + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Failed to prepare changelog:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: |- + ${{ github.repository }}: Failed to update stable Agent's metadata. + Checkout: ${{ steps.checkout.outcome }} + Init python: ${{ steps.init-python.outcome }} + Setup python: ${{ steps.setup-python.outcome }} + Check for newer stable releaes: ${{ steps.check-newer-releases.outcome }} + Setup ssh: ${{ steps.ssh-setup.outcome }} + Syncing newer release to packages.netdata.cloud : ${{ steps.sync-releases.outcome }} + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: failure() diff --git a/.github/workflows/packagecloud.yml b/.github/workflows/packagecloud.yml index ba70c177bc0451..3c427756a4ba2c 100644 --- a/.github/workflows/packagecloud.yml +++ b/.github/workflows/packagecloud.yml @@ -20,7 +20,7 @@ jobs: - devel steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 id: checkout with: submodules: recursive diff --git a/.github/workflows/packaging-dummy.yml b/.github/workflows/packaging-dummy.yml deleted file mode 100644 index 653227e1c8a30f..00000000000000 --- a/.github/workflows/packaging-dummy.yml +++ /dev/null @@ -1,80 +0,0 @@ ---- -# Handles building of binary packages for the agent. -# -# This workflow exists so that we can make these required checks but -# still skip running them on PRs where they are not relevant. -name: Packages -on: - pull_request: - types: - - opened - - reopened - - labeled - - synchronize - paths-ignore: # This MUST be kept in-sync with the paths key for the packaging.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - 'netdata.spec.in' - - 'configure.ac' - - '**/Makefile*' - - 'Makefile*' - - '.github/workflows/packaging.yml' - - '.github/scripts/gen-matrix-packaging.py' - - '.github/scripts/pkg-test.sh' - - 'build/**' - - 'packaging/*.sh' - - 'packaging/*.checksums' - - 'packaging/*.version' - - 'contrib/debian/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' - - '!**.md' -env: - DISABLE_TELEMETRY: 1 - REPO_PREFIX: netdata/netdata -concurrency: - group: packages-${{ github.ref }}-${{ github.event_name }} - cancel-in-progress: true -jobs: - matrix: - name: Prepare Build Matrix - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - steps: - - name: Checkout - id: checkout - uses: actions/checkout@v3 - - name: Prepare tools - id: prepare - run: | - sudo apt-get update && sudo apt-get install -y python3-ruamel.yaml - - name: Read build matrix - id: set-matrix - run: | - if [ "${{ github.event_name }}" = "pull_request" ] && \ - [ "${{ !contains(github.event.pull_request.labels.*.name, 'run-ci/packaging') }}" = "true" ]; then - matrix="$(.github/scripts/gen-matrix-packaging.py 1)" - else - matrix="$(.github/scripts/gen-matrix-packaging.py 0)" - fi - echo "Generated matrix: ${matrix}" - echo "matrix=${matrix}" >> "${GITHUB_OUTPUT}" - - build: - name: Build - runs-on: ubuntu-latest - env: - DOCKER_CLI_EXPERIMENTAL: enabled - needs: - - matrix - strategy: - matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} - fail-fast: false - steps: - - run: echo 'NOT REQUIRED' diff --git a/.github/workflows/packaging.yml b/.github/workflows/packaging.yml index 7e8c7e527a5925..eb936c4d902bbd 100644 --- a/.github/workflows/packaging.yml +++ b/.github/workflows/packaging.yml @@ -8,31 +8,6 @@ on: - reopened - labeled - synchronize - paths: # This MUST be kept in-sync with the paths-ignore key for the packaging-dummy.yml workflow. - - '**.c' - - '**.cc' - - '**.h' - - '**.hh' - - '**.in' - - 'netdata.spec.in' - - 'configure.ac' - - '**/Makefile*' - - 'Makefile*' - - '.github/workflows/packaging.yml' - - '.github/scripts/gen-matrix-packaging.py' - - '.github/scripts/pkg-test.sh' - - 'build/**' - - 'packaging/*.sh' - - 'packaging/*.checksums' - - 'packaging/*.version' - - 'contrib/debian/**' - - 'aclk/aclk-schemas/' - - 'ml/dlib/' - - 'mqtt_websockets' - - 'web/server/h2o/libh2o' - - '!**.md' - branches: - - master push: branches: - master @@ -52,6 +27,57 @@ concurrency: group: packages-${{ github.ref }}-${{ github.event_name }} cancel-in-progress: true jobs: + file-check: # Check what files changed if we’re being run in a PR or on a push. + name: Check Modified Files + runs-on: ubuntu-latest + outputs: + run: ${{ steps.check-run.outputs.run }} + steps: + - name: Checkout + id: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + - name: Check files + id: file-check + uses: tj-actions/changed-files@v40 + with: + since_last_remote_commit: ${{ github.event_name != 'pull_request' }} + files: | + **.c + **.cc + **.h + **.hh + **.in + netdata.spec.in + configure.ac + **/Makefile* + Makefile* + .github/data/distros.yml + .github/workflows/packaging.yml + .github/scripts/gen-matrix-packaging.py + .github/scripts/pkg-test.sh + build/** + packaging/*.sh + packaging/*.checksums + packaging/*.version + contrib/debian/** + aclk/aclk-schemas/ + ml/dlib/ + mqtt_websockets + web/server/h2o/libh2o + files_ignore: | + **.md + - name: Check Run + id: check-run + run: | + if [ "${{ steps.file-check.outputs.any_modified }}" == "true" ] || [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + echo 'run=true' >> "${GITHUB_OUTPUT}" + else + echo 'run=false' >> "${GITHUB_OUTPUT}" + fi + matrix: name: Prepare Build Matrix runs-on: ubuntu-latest @@ -60,7 +86,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare tools id: prepare run: | @@ -107,7 +133,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check Version id: check-version run: | @@ -161,6 +187,7 @@ jobs: needs: - matrix - version-check + - file-check strategy: matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} # We intentiaonally disable the fail-fast behavior so that a @@ -169,24 +196,31 @@ jobs: fail-fast: false max-parallel: 8 steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: echo "SKIPPED" - name: Checkout id: checkout - uses: actions/checkout@v3 + if: needs.file-check.outputs.run == 'true' + uses: actions/checkout@v4 with: fetch-depth: 0 # We need full history for versioning submodules: recursive - name: Setup QEMU id: qemu - if: matrix.platform != 'linux/amd64' && matrix.platform != 'linux/i386' - uses: docker/setup-qemu-action@v2 + if: matrix.platform != 'linux/amd64' && matrix.platform != 'linux/i386' && needs.file-check.outputs.run == 'true' + uses: docker/setup-qemu-action@v3 - name: Prepare Docker Environment id: docker-config + if: needs.file-check.outputs.run == 'true' shell: bash run: | echo '{"cgroup-parent": "actions-job.slice", "experimental": true}' | sudo tee /etc/docker/daemon.json 2>/dev/null sudo service docker restart - name: Fetch images id: fetch-images + if: needs.file-check.outputs.run == 'true' uses: nick-invision/retry@v2 with: max_attempts: 3 @@ -194,15 +228,17 @@ jobs: timeout_seconds: 900 command: | docker pull --platform ${{ matrix.platform }} ${{ matrix.base_image }} - docker pull --platform ${{ matrix.platform }} netdata/package-builders:${{ matrix.distro }}${{ matrix.version }} + docker pull --platform ${{ matrix.platform }} netdata/package-builders:${{ matrix.distro }}${{ matrix.version }}-v1 - name: Build Packages id: build + if: needs.file-check.outputs.run == 'true' shell: bash run: | docker run --security-opt seccomp=unconfined -e DISABLE_TELEMETRY=1 -e VERSION=${{ needs.version-check.outputs.version }} \ - --platform=${{ matrix.platform }} -v "$PWD":/netdata netdata/package-builders:${{ matrix.distro }}${{ matrix.version }} + --platform=${{ matrix.platform }} -v "$PWD":/netdata netdata/package-builders:${{ matrix.distro }}${{ matrix.version }}-v1 - name: Save Packages id: artifacts + if: needs.file-check.outputs.run == 'true' continue-on-error: true uses: actions/upload-artifact@v3 with: @@ -210,6 +246,7 @@ jobs: path: ${{ github.workspace }}/artifacts/* - name: Test Packages id: test + if: needs.file-check.outputs.run == 'true' shell: bash run: | docker run --security-opt seccomp=unconfined -e DISABLE_TELEMETRY=1 -e DISTRO=${{ matrix.distro }} \ @@ -218,7 +255,7 @@ jobs: /netdata/.github/scripts/pkg-test.sh - name: Upload to PackageCloud id: upload - if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' + if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' && needs.file-check.outputs.run == 'true' continue-on-error: true shell: bash env: @@ -232,7 +269,7 @@ jobs: done - name: SSH setup id: ssh-setup - if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' + if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' && needs.file-check.outputs.run == 'true' uses: shimataro/ssh-key-action@v2 with: key: ${{ secrets.NETDATABOT_PACKAGES_SSH_KEY }} @@ -240,7 +277,7 @@ jobs: known_hosts: ${{ secrets.PACKAGES_KNOWN_HOSTS }} - name: Upload to packages.netdata.cloud id: package-upload - if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' + if: github.event_name == 'workflow_dispatch' && github.repository == 'netdata/netdata' && needs.file-check.outputs.run == 'true' run: | .github/scripts/package-upload.sh \ ${{ matrix.repo_distro }} \ @@ -272,4 +309,5 @@ jobs: && github.event_name != 'pull_request' && startsWith(github.ref, 'refs/heads/master') && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' }} diff --git a/.github/workflows/platform-eol-check.yml b/.github/workflows/platform-eol-check.yml index d1f4416cde6820..ae290a973ef974 100644 --- a/.github/workflows/platform-eol-check.yml +++ b/.github/workflows/platform-eol-check.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare tools id: prepare run: | @@ -66,7 +66,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Actually check the EOL date for the platform. - name: Check EOL Date id: check diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e675d789fb3bc2..2fa51cc52f5327 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 submodules: recursive @@ -116,7 +116,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ needs.update-changelogs.outputs.ref }} - name: Trigger build @@ -151,7 +151,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ needs.update-changelogs.outputs.ref }} - name: Trigger build @@ -186,7 +186,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ needs.update-changelogs.outputs.ref }} - name: Trigger build diff --git a/.github/workflows/repoconfig-packages.yml b/.github/workflows/repoconfig-packages.yml index e2b41570fa39c7..df8fac2044661c 100644 --- a/.github/workflows/repoconfig-packages.yml +++ b/.github/workflows/repoconfig-packages.yml @@ -27,7 +27,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Prepare tools id: prepare run: | @@ -77,7 +77,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Unlike normally, we do not need a deep clone or submodules for this. - name: Fetch base image id: fetch-images diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index 7e76717edf581b..8cb61f2cf8fcc9 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -23,7 +23,7 @@ jobs: yamllint: ${{ steps.yamllint.outputs.run }} steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -112,7 +112,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -129,7 +129,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: false fetch-depth: 0 @@ -162,7 +162,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -182,7 +182,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -203,7 +203,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Run hadolint @@ -219,7 +219,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 @@ -234,6 +234,7 @@ jobs: ./.git/* packaging/makeself/makeself.sh packaging/makeself/makeself-header.sh + ./fluent-bit/* yamllint: name: yamllint @@ -242,7 +243,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Git clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5f83a440598336..46384ffc5e1fec 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Prepare environment diff --git a/.gitignore b/.gitignore index daa55c0300af35..a53025c3be353e 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ Makefile.in .*.swp *.old *.log +!collectors/log2journal/tests.d/*.log *.pyc Makefile @@ -41,6 +42,9 @@ sha256sums.txt # netdata binaries netdata netdatacli +systemd-cat-native +log2journal +!log2journal/ !netdata/ upload/ artifacts/ @@ -83,6 +87,9 @@ debugfs.plugin systemd-journal.plugin !systemd-journal.plugin/ +logs-management.plugin +!logs-management.plugin/ + # protoc generated files *.pb.cc *.pb.h @@ -142,6 +149,8 @@ daemon/get-kubernetes-labels.sh health/notifications/alarm-notify.sh claim/netdata-claim.sh +collectors/cgroups.plugin/cgroup-name.sh +collectors/cgroups.plugin/cgroup-network-helper.sh collectors/tc.plugin/tc-qos-helper.sh collectors/charts.d.plugin/charts.d.plugin collectors/python.d.plugin/python.d.plugin @@ -149,6 +158,8 @@ collectors/ioping.plugin/ioping.plugin collectors/go.d.plugin web/netdata-switch-dashboard.sh +logsmanagement/stress_test/stress_test + # installer generated files /netdata-uninstaller.sh /netdata-updater.sh diff --git a/.gitmodules b/.gitmodules index 2dae4a1dd237bc..ac8e131f5d482c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,3 +13,8 @@ path = web/server/h2o/libh2o url = https://github.com/h2o/h2o.git ignore = untracked +[submodule "fluent-bit"] + path = fluent-bit + url = https://github.com/fluent/fluent-bit.git + shallow = true + ignore = dirty \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 7055f4af179d9e..2fde81e9f7518d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,129 +1,447 @@ # Changelog -## [v1.42.4](https://github.com/netdata/netdata/tree/v1.42.4) (2023-09-18) +## [v1.44.1](https://github.com/netdata/netdata/tree/v1.44.1) (2023-12-12) -[Full Changelog](https://github.com/netdata/netdata/compare/v1.42.3...v1.42.4) +[Full Changelog](https://github.com/netdata/netdata/compare/v1.44.0...v1.44.1) + +**Merged pull requests:** + +- uninstaller remove log2journal and systemd-cat-native [\#16585](https://github.com/netdata/netdata/pull/16585) ([ilyam8](https://github.com/ilyam8)) +- Handle coverity issues related to Y2K38\_SAFETY [\#16583](https://github.com/netdata/netdata/pull/16583) ([stelfrag](https://github.com/stelfrag)) +- Add Alpine Linux 3.19 to CI. [\#16579](https://github.com/netdata/netdata/pull/16579) ([Ferroin](https://github.com/Ferroin)) +- make debugfs exit on sigpipe [\#16569](https://github.com/netdata/netdata/pull/16569) ([ilyam8](https://github.com/ilyam8)) +- Fix memory leak during host chart label cleanup [\#16568](https://github.com/netdata/netdata/pull/16568) ([stelfrag](https://github.com/stelfrag)) +- fix cpu arch/ram/disk values in buildinfo [\#16567](https://github.com/netdata/netdata/pull/16567) ([ilyam8](https://github.com/ilyam8)) +- Resolve issue on startup in servers with 1 core [\#16565](https://github.com/netdata/netdata/pull/16565) ([stelfrag](https://github.com/stelfrag)) +- Fix release metadata workflow [\#16563](https://github.com/netdata/netdata/pull/16563) ([tkatsoulas](https://github.com/tkatsoulas)) +- Make the systemd-journal mandatory package on Centos 7 and Amazon linux 2 [\#16562](https://github.com/netdata/netdata/pull/16562) ([tkatsoulas](https://github.com/tkatsoulas)) +- Fix for AMD GPU drm different format proc file [\#16561](https://github.com/netdata/netdata/pull/16561) ([MrZammler](https://github.com/MrZammler)) +- Revert "remove discourse badge from readme" [\#16560](https://github.com/netdata/netdata/pull/16560) ([ilyam8](https://github.com/ilyam8)) +- Change the workflow on how we set the right permissions for perf-plugin [\#16558](https://github.com/netdata/netdata/pull/16558) ([tkatsoulas](https://github.com/tkatsoulas)) +- Add README for gorilla [\#16553](https://github.com/netdata/netdata/pull/16553) ([vkalintiris](https://github.com/vkalintiris)) +- Bump new version on the release changelog GHA [\#16551](https://github.com/netdata/netdata/pull/16551) ([tkatsoulas](https://github.com/tkatsoulas)) +- set "HOME" after switching to netdata user [\#16548](https://github.com/netdata/netdata/pull/16548) ([ilyam8](https://github.com/ilyam8)) +- wip documentation about functions table [\#16535](https://github.com/netdata/netdata/pull/16535) ([ktsaou](https://github.com/ktsaou)) +- Remove openSUSE 15.4 from CI [\#16449](https://github.com/netdata/netdata/pull/16449) ([tkatsoulas](https://github.com/tkatsoulas)) +- Remove fedora 37 from CI [\#16422](https://github.com/netdata/netdata/pull/16422) ([tkatsoulas](https://github.com/tkatsoulas)) + +## [v1.44.0](https://github.com/netdata/netdata/tree/v1.44.0) (2023-12-06) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.43.2...v1.44.0) **Merged pull requests:** -- Regenerate integrations.js [\#15985](https://github.com/netdata/netdata/pull/15985) ([netdatabot](https://github.com/netdatabot)) -- Re-store rrdvars on late dimensions [\#15984](https://github.com/netdata/netdata/pull/15984) ([MrZammler](https://github.com/MrZammler)) -- Functions: allow collectors to be restarted [\#15983](https://github.com/netdata/netdata/pull/15983) ([ktsaou](https://github.com/ktsaou)) -- Metadata fixes for some collectors [\#15982](https://github.com/netdata/netdata/pull/15982) ([Ancairon](https://github.com/Ancairon)) -- update go.d.plugin to v0.55.0 [\#15981](https://github.com/netdata/netdata/pull/15981) ([ilyam8](https://github.com/ilyam8)) -- bump UI to v6.37.1 [\#15980](https://github.com/netdata/netdata/pull/15980) ([ilyam8](https://github.com/ilyam8)) -- apps.plugin function is not thread safe [\#15978](https://github.com/netdata/netdata/pull/15978) ([ktsaou](https://github.com/ktsaou)) -- Facets: fixes 5 [\#15976](https://github.com/netdata/netdata/pull/15976) ([ktsaou](https://github.com/ktsaou)) -- facets histogram: do not send db retention for facets [\#15974](https://github.com/netdata/netdata/pull/15974) ([ktsaou](https://github.com/ktsaou)) -- extend ml default training from ~24 to ~48 hours [\#15971](https://github.com/netdata/netdata/pull/15971) ([andrewm4894](https://github.com/andrewm4894)) -- facets histogram when empty [\#15970](https://github.com/netdata/netdata/pull/15970) ([ktsaou](https://github.com/ktsaou)) -- facets: do not shadow local variable [\#15968](https://github.com/netdata/netdata/pull/15968) ([ktsaou](https://github.com/ktsaou)) -- Skip trying to preserve file owners when bundling external code. [\#15966](https://github.com/netdata/netdata/pull/15966) ([Ferroin](https://github.com/Ferroin)) -- fix using undefined var when loading job statuses in python.d [\#15965](https://github.com/netdata/netdata/pull/15965) ([ilyam8](https://github.com/ilyam8)) -- facets: data-only queries [\#15961](https://github.com/netdata/netdata/pull/15961) ([ktsaou](https://github.com/ktsaou)) -- Clarifying the possible installation types [\#15958](https://github.com/netdata/netdata/pull/15958) ([tkatsoulas](https://github.com/tkatsoulas)) -- fix journal direction parsing [\#15957](https://github.com/netdata/netdata/pull/15957) ([ktsaou](https://github.com/ktsaou)) -- facets and journal improvements [\#15956](https://github.com/netdata/netdata/pull/15956) ([ktsaou](https://github.com/ktsaou)) -- Fix CID 400366 [\#15953](https://github.com/netdata/netdata/pull/15953) ([stelfrag](https://github.com/stelfrag)) -- Update descriptions. [\#15952](https://github.com/netdata/netdata/pull/15952) ([thiagoftsm](https://github.com/thiagoftsm)) -- Update slabinfo metadata [\#15951](https://github.com/netdata/netdata/pull/15951) ([thiagoftsm](https://github.com/thiagoftsm)) -- Disk Labels [\#15949](https://github.com/netdata/netdata/pull/15949) ([ktsaou](https://github.com/ktsaou)) -- streaming logs [\#15948](https://github.com/netdata/netdata/pull/15948) ([ktsaou](https://github.com/ktsaou)) -- Regenerate integrations.js [\#15946](https://github.com/netdata/netdata/pull/15946) ([netdatabot](https://github.com/netdatabot)) -- Integrations: Add a note to enable the collectors [\#15945](https://github.com/netdata/netdata/pull/15945) ([MrZammler](https://github.com/MrZammler)) -- Integrations: Add a note to install charts.d plugin [\#15943](https://github.com/netdata/netdata/pull/15943) ([MrZammler](https://github.com/MrZammler)) -- Add description about packages [\#15941](https://github.com/netdata/netdata/pull/15941) ([thiagoftsm](https://github.com/thiagoftsm)) -- facets optimizations [\#15940](https://github.com/netdata/netdata/pull/15940) ([ktsaou](https://github.com/ktsaou)) -- improved facets info [\#15936](https://github.com/netdata/netdata/pull/15936) ([ktsaou](https://github.com/ktsaou)) -- feat: Adds access control configuration for ntfy [\#15932](https://github.com/netdata/netdata/pull/15932) ([miversen33](https://github.com/miversen33)) -- fix memory leak on prometheus exporter and code cleanup [\#15929](https://github.com/netdata/netdata/pull/15929) ([ktsaou](https://github.com/ktsaou)) -- systemd-journal and facets: info and sources [\#15928](https://github.com/netdata/netdata/pull/15928) ([ktsaou](https://github.com/ktsaou)) -- systemd-journal and facets improvements [\#15926](https://github.com/netdata/netdata/pull/15926) ([ktsaou](https://github.com/ktsaou)) -- add specific info on how to access the dashboards [\#15925](https://github.com/netdata/netdata/pull/15925) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Reduce workload during cleanup [\#15919](https://github.com/netdata/netdata/pull/15919) ([stelfrag](https://github.com/stelfrag)) -- Replace \_ with spaces for name variable for ntfy [\#15909](https://github.com/netdata/netdata/pull/15909) ([MAH69IK](https://github.com/MAH69IK)) -- python.d/sensors: Increase voltage limits 127 -\> 400 [\#15905](https://github.com/netdata/netdata/pull/15905) ([kylemanna](https://github.com/kylemanna)) -- Assorted Dockerfile cleanup. [\#15902](https://github.com/netdata/netdata/pull/15902) ([Ferroin](https://github.com/Ferroin)) -- Improve shutdown of the metadata thread [\#15901](https://github.com/netdata/netdata/pull/15901) ([stelfrag](https://github.com/stelfrag)) -- bump ui to v6.32.0 [\#15897](https://github.com/netdata/netdata/pull/15897) ([andrewm4894](https://github.com/andrewm4894)) -- Update change-metrics-storage.md [\#15896](https://github.com/netdata/netdata/pull/15896) ([Ancairon](https://github.com/Ancairon)) -- make `anomaly_detection.type_anomaly_rate` stacked [\#15895](https://github.com/netdata/netdata/pull/15895) ([andrewm4894](https://github.com/andrewm4894)) -- Update pfsense.md [\#15894](https://github.com/netdata/netdata/pull/15894) ([Ancairon](https://github.com/Ancairon)) -- Initial tooling for Integrations Documentation [\#15893](https://github.com/netdata/netdata/pull/15893) ([Ancairon](https://github.com/Ancairon)) -- Reset the obsolete flag on service thread [\#15892](https://github.com/netdata/netdata/pull/15892) ([MrZammler](https://github.com/MrZammler)) -- Add better recovery for corrupted metadata [\#15891](https://github.com/netdata/netdata/pull/15891) ([stelfrag](https://github.com/stelfrag)) -- Add index to ACLK table to improve update statements [\#15890](https://github.com/netdata/netdata/pull/15890) ([stelfrag](https://github.com/stelfrag)) -- Limit atomic operations for statistics [\#15887](https://github.com/netdata/netdata/pull/15887) ([ktsaou](https://github.com/ktsaou)) -- Properly document issues with installing on hosts without IPv4. [\#15882](https://github.com/netdata/netdata/pull/15882) ([Ferroin](https://github.com/Ferroin)) -- allow any field to be a facet [\#15880](https://github.com/netdata/netdata/pull/15880) ([ktsaou](https://github.com/ktsaou)) -- Regenerate integrations.js [\#15879](https://github.com/netdata/netdata/pull/15879) ([netdatabot](https://github.com/netdatabot)) -- use the newer XXH3 128bits algorithm, instead of the classic XXH128 [\#15878](https://github.com/netdata/netdata/pull/15878) ([ktsaou](https://github.com/ktsaou)) -- Skip copying environment/install-type files when checking existing installs. [\#15876](https://github.com/netdata/netdata/pull/15876) ([Ferroin](https://github.com/Ferroin)) -- ML add new `delete old models param` to readme [\#15873](https://github.com/netdata/netdata/pull/15873) ([andrewm4894](https://github.com/andrewm4894)) -- Update SQLITE version to 3.42.0 [\#15870](https://github.com/netdata/netdata/pull/15870) ([stelfrag](https://github.com/stelfrag)) -- Regenerate integrations.js [\#15867](https://github.com/netdata/netdata/pull/15867) ([netdatabot](https://github.com/netdatabot)) -- Add a fail reason to pinpoint exactly what went wrong [\#15866](https://github.com/netdata/netdata/pull/15866) ([stelfrag](https://github.com/stelfrag)) -- Add plugin and module information to collector integrations. [\#15864](https://github.com/netdata/netdata/pull/15864) ([Ferroin](https://github.com/Ferroin)) -- Regenerate integrations.js [\#15862](https://github.com/netdata/netdata/pull/15862) ([netdatabot](https://github.com/netdatabot)) -- Explicitly depend on version-matched plugins in native packages. [\#15861](https://github.com/netdata/netdata/pull/15861) ([Ferroin](https://github.com/Ferroin)) -- Apply a label prefix for netdata labels [\#15860](https://github.com/netdata/netdata/pull/15860) ([kevin-fwu](https://github.com/kevin-fwu)) -- fix proc meminfo cached calculation [\#15859](https://github.com/netdata/netdata/pull/15859) ([ilyam8](https://github.com/ilyam8)) -- Fix compilation warnings [\#15858](https://github.com/netdata/netdata/pull/15858) ([stelfrag](https://github.com/stelfrag)) -- packaging cleanup after \#15842 [\#15857](https://github.com/netdata/netdata/pull/15857) ([ilyam8](https://github.com/ilyam8)) -- Add a chart that groups anomaly rate by chart type. [\#15856](https://github.com/netdata/netdata/pull/15856) ([vkalintiris](https://github.com/vkalintiris)) -- fix packaging static build openssl 32bit [\#15855](https://github.com/netdata/netdata/pull/15855) ([ilyam8](https://github.com/ilyam8)) -- fix packaging mark stable static build [\#15854](https://github.com/netdata/netdata/pull/15854) ([ilyam8](https://github.com/ilyam8)) -- eBPF socket function [\#15850](https://github.com/netdata/netdata/pull/15850) ([thiagoftsm](https://github.com/thiagoftsm)) -- Facets histograms [\#15846](https://github.com/netdata/netdata/pull/15846) ([ktsaou](https://github.com/ktsaou)) -- reworked pluginsd caching of RDAs to avoid crashes [\#15845](https://github.com/netdata/netdata/pull/15845) ([ktsaou](https://github.com/ktsaou)) -- Fix static build SSL [\#15842](https://github.com/netdata/netdata/pull/15842) ([ktsaou](https://github.com/ktsaou)) -- bump bundled ui to v6.29.0 [\#15841](https://github.com/netdata/netdata/pull/15841) ([ilyam8](https://github.com/ilyam8)) -- Fix configure: WARNING: unrecognized options: --with-zlib [\#15840](https://github.com/netdata/netdata/pull/15840) ([stelfrag](https://github.com/stelfrag)) -- Fix compilation warning [\#15839](https://github.com/netdata/netdata/pull/15839) ([stelfrag](https://github.com/stelfrag)) -- Fix warning when compiling with -flto [\#15838](https://github.com/netdata/netdata/pull/15838) ([stelfrag](https://github.com/stelfrag)) -- workaround for systems that do not have SD\_JOURNAL\_OS\_ROOT [\#15837](https://github.com/netdata/netdata/pull/15837) ([ktsaou](https://github.com/ktsaou)) -- added ilove.html [\#15836](https://github.com/netdata/netdata/pull/15836) ([ktsaou](https://github.com/ktsaou)) -- Fix CID 382964: Code maintainability issues \(SIZEOF\_MISMATCH\) [\#15833](https://github.com/netdata/netdata/pull/15833) ([stelfrag](https://github.com/stelfrag)) -- Fix coverity 393052: API usage errors \(LOCK\) [\#15832](https://github.com/netdata/netdata/pull/15832) ([stelfrag](https://github.com/stelfrag)) -- systemd-journal in containers [\#15830](https://github.com/netdata/netdata/pull/15830) ([ktsaou](https://github.com/ktsaou)) -- RPM: fixed attrs for conf.d dirs [\#15828](https://github.com/netdata/netdata/pull/15828) ([k0ste](https://github.com/k0ste)) -- Avoid resource leak [\#15827](https://github.com/netdata/netdata/pull/15827) ([stelfrag](https://github.com/stelfrag)) -- Release fd if setsockopt or bind fails [\#15826](https://github.com/netdata/netdata/pull/15826) ([stelfrag](https://github.com/stelfrag)) -- Fix use after free [\#15825](https://github.com/netdata/netdata/pull/15825) ([stelfrag](https://github.com/stelfrag)) -- Improve dyncfg exit [\#15824](https://github.com/netdata/netdata/pull/15824) ([underhood](https://github.com/underhood)) -- Release job message status to avoid memory leak [\#15822](https://github.com/netdata/netdata/pull/15822) ([stelfrag](https://github.com/stelfrag)) -- ML improve init [\#15819](https://github.com/netdata/netdata/pull/15819) ([stelfrag](https://github.com/stelfrag)) -- Update cmakelist [\#15817](https://github.com/netdata/netdata/pull/15817) ([stelfrag](https://github.com/stelfrag)) -- added /api/v2/ilove.svg endpoint [\#15815](https://github.com/netdata/netdata/pull/15815) ([ktsaou](https://github.com/ktsaou)) -- systemd-journal fixes [\#15814](https://github.com/netdata/netdata/pull/15814) ([ktsaou](https://github.com/ktsaou)) -- fix packaging: link health.log to stdout [\#15813](https://github.com/netdata/netdata/pull/15813) ([ilyam8](https://github.com/ilyam8)) -- docs rename alarm to alert [\#15812](https://github.com/netdata/netdata/pull/15812) ([ilyam8](https://github.com/ilyam8)) -- bump ui to v6.28.0 [\#15810](https://github.com/netdata/netdata/pull/15810) ([ilyam8](https://github.com/ilyam8)) -- return 412 instead of 403 when a bearer token is required [\#15808](https://github.com/netdata/netdata/pull/15808) ([ktsaou](https://github.com/ktsaou)) -- installer setuid fallback for perf and slabinfo plugins [\#15807](https://github.com/netdata/netdata/pull/15807) ([ilyam8](https://github.com/ilyam8)) -- fix api v1 mgmt/health [\#15806](https://github.com/netdata/netdata/pull/15806) ([underhood](https://github.com/underhood)) -- Fix systemd journal build deps in DEB packages. [\#15805](https://github.com/netdata/netdata/pull/15805) ([Ferroin](https://github.com/Ferroin)) -- Clean up python deps for RPM packages. [\#15804](https://github.com/netdata/netdata/pull/15804) ([Ferroin](https://github.com/Ferroin)) -- Add proper SUID fallback for DEB plugin packages. [\#15803](https://github.com/netdata/netdata/pull/15803) ([Ferroin](https://github.com/Ferroin)) -- nfacct.plugin increase restart time from 4 hours to 1 day [\#15801](https://github.com/netdata/netdata/pull/15801) ([ilyam8](https://github.com/ilyam8)) -- Function systemd-journal: always have a nd\_journal\_process [\#15798](https://github.com/netdata/netdata/pull/15798) ([ktsaou](https://github.com/ktsaou)) -- prevent reporting negative retention when the db is empty [\#15796](https://github.com/netdata/netdata/pull/15796) ([ktsaou](https://github.com/ktsaou)) -- Fix typo in Readme [\#15794](https://github.com/netdata/netdata/pull/15794) ([shyamvalsan](https://github.com/shyamvalsan)) -- fix hpssa handle unassigned drives [\#15793](https://github.com/netdata/netdata/pull/15793) ([ilyam8](https://github.com/ilyam8)) -- count functions as collections, to restart plugins [\#15787](https://github.com/netdata/netdata/pull/15787) ([ktsaou](https://github.com/ktsaou)) -- Set correct path for ansible-playbook in deployment tutorial [\#15786](https://github.com/netdata/netdata/pull/15786) ([novotnyJiri](https://github.com/novotnyJiri)) -- minor Dyncfg mvp0 fixes [\#15785](https://github.com/netdata/netdata/pull/15785) ([underhood](https://github.com/underhood)) -- fix docker-compose example [\#15784](https://github.com/netdata/netdata/pull/15784) ([zhqu1148980644](https://github.com/zhqu1148980644)) -- mark integrations milestones as completed in README.md [\#15783](https://github.com/netdata/netdata/pull/15783) ([tkatsoulas](https://github.com/tkatsoulas)) -- Update an oversight on the openSUSE 15.5 packages [\#15781](https://github.com/netdata/netdata/pull/15781) ([tkatsoulas](https://github.com/tkatsoulas)) -- Bump openssl version of static builds to 1.1.1v [\#15779](https://github.com/netdata/netdata/pull/15779) ([tkatsoulas](https://github.com/tkatsoulas)) -- fix: the cleanup was not performed during the kickstart.sh dry run [\#15775](https://github.com/netdata/netdata/pull/15775) ([ilyam8](https://github.com/ilyam8)) -- don't return `-1` if the socket was closed [\#15771](https://github.com/netdata/netdata/pull/15771) ([moonbreon](https://github.com/moonbreon)) -- Increase alert snapshot chunk size [\#15748](https://github.com/netdata/netdata/pull/15748) ([MrZammler](https://github.com/MrZammler)) -- Added CentOS-Stream to distros [\#15742](https://github.com/netdata/netdata/pull/15742) ([k0ste](https://github.com/k0ste)) -- Unconditionally delete very old models. [\#15720](https://github.com/netdata/netdata/pull/15720) ([vkalintiris](https://github.com/vkalintiris)) -- Misc code cleanup [\#15665](https://github.com/netdata/netdata/pull/15665) ([stelfrag](https://github.com/stelfrag)) -- Metadata cleanup improvements [\#15462](https://github.com/netdata/netdata/pull/15462) ([stelfrag](https://github.com/stelfrag)) +- update bundled UI to v6.66.1 [\#16554](https://github.com/netdata/netdata/pull/16554) ([ilyam8](https://github.com/ilyam8)) +- Improve page validity check during database extent load [\#16552](https://github.com/netdata/netdata/pull/16552) ([stelfrag](https://github.com/stelfrag)) +- Proper Learn-friendly links [\#16547](https://github.com/netdata/netdata/pull/16547) ([Ancairon](https://github.com/Ancairon)) +- docs required for release [\#16546](https://github.com/netdata/netdata/pull/16546) ([ktsaou](https://github.com/ktsaou)) +- Add option to change page type for tier 0 to gorilla [\#16545](https://github.com/netdata/netdata/pull/16545) ([vkalintiris](https://github.com/vkalintiris)) +- fix alpine deps [\#16543](https://github.com/netdata/netdata/pull/16543) ([tkatsoulas](https://github.com/tkatsoulas)) +- change level to debug "took too long to be updated" [\#16540](https://github.com/netdata/netdata/pull/16540) ([ilyam8](https://github.com/ilyam8)) +- apps: fix uptime for groups with 0 processes [\#16538](https://github.com/netdata/netdata/pull/16538) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16536](https://github.com/netdata/netdata/pull/16536) ([netdatabot](https://github.com/netdatabot)) +- Reorg kickstart guide's steps [\#16534](https://github.com/netdata/netdata/pull/16534) ([tkatsoulas](https://github.com/tkatsoulas)) +- update go.d plugin to v0.57.2 [\#16533](https://github.com/netdata/netdata/pull/16533) ([ilyam8](https://github.com/ilyam8)) +- Update getting-started-light-poc.md [\#16532](https://github.com/netdata/netdata/pull/16532) ([M4itee](https://github.com/M4itee)) +- Acquire receiver\_lock to to avoid race condition [\#16531](https://github.com/netdata/netdata/pull/16531) ([stelfrag](https://github.com/stelfrag)) +- link aclk.log to stdout in docker [\#16529](https://github.com/netdata/netdata/pull/16529) ([ilyam8](https://github.com/ilyam8)) +- Update getting-started.md [\#16528](https://github.com/netdata/netdata/pull/16528) ([Ancairon](https://github.com/Ancairon)) +- Make image available to Learn + add a category overview page for new … [\#16527](https://github.com/netdata/netdata/pull/16527) ([Ancairon](https://github.com/Ancairon)) +- logs-management: Disable logs management monitoring section [\#16525](https://github.com/netdata/netdata/pull/16525) ([Dim-P](https://github.com/Dim-P)) +- log method = none is not respected [\#16523](https://github.com/netdata/netdata/pull/16523) ([ktsaou](https://github.com/ktsaou)) +- include more cases for megacli degraded state [\#16522](https://github.com/netdata/netdata/pull/16522) ([ClaraCrazy](https://github.com/ClaraCrazy)) +- update bundled UI to v6.65.0 [\#16520](https://github.com/netdata/netdata/pull/16520) ([ilyam8](https://github.com/ilyam8)) +- log2journal improvements 5 [\#16519](https://github.com/netdata/netdata/pull/16519) ([ktsaou](https://github.com/ktsaou)) +- change log level to debug for dbengine routine operations on start [\#16518](https://github.com/netdata/netdata/pull/16518) ([ilyam8](https://github.com/ilyam8)) +- remove system info logging [\#16517](https://github.com/netdata/netdata/pull/16517) ([ilyam8](https://github.com/ilyam8)) +- python.d: logger: remove timestamp when logging to journald. [\#16516](https://github.com/netdata/netdata/pull/16516) ([ilyam8](https://github.com/ilyam8)) +- python.d: mute stock jobs logging during check\(\) [\#16515](https://github.com/netdata/netdata/pull/16515) ([ilyam8](https://github.com/ilyam8)) +- logs-management: Add prefix to chart names [\#16514](https://github.com/netdata/netdata/pull/16514) ([Dim-P](https://github.com/Dim-P)) +- docs: add with-systemd-units-monitoring example to docker [\#16513](https://github.com/netdata/netdata/pull/16513) ([ilyam8](https://github.com/ilyam8)) +- apps: fix "has aggregated" debug output [\#16512](https://github.com/netdata/netdata/pull/16512) ([ilyam8](https://github.com/ilyam8)) +- log2journal improvements 4 [\#16510](https://github.com/netdata/netdata/pull/16510) ([ktsaou](https://github.com/ktsaou)) +- journal improvements part 3 [\#16509](https://github.com/netdata/netdata/pull/16509) ([ktsaou](https://github.com/ktsaou)) +- convert some error messages to info [\#16508](https://github.com/netdata/netdata/pull/16508) ([ilyam8](https://github.com/ilyam8)) +- Resolve coverity issue 410232 [\#16507](https://github.com/netdata/netdata/pull/16507) ([stelfrag](https://github.com/stelfrag)) +- convert some error messages to info [\#16505](https://github.com/netdata/netdata/pull/16505) ([ilyam8](https://github.com/ilyam8)) +- diskspace/diskstats: don't create runtime disk config by default [\#16503](https://github.com/netdata/netdata/pull/16503) ([ilyam8](https://github.com/ilyam8)) +- Fix CID 410152 Dereference after null check [\#16502](https://github.com/netdata/netdata/pull/16502) ([stelfrag](https://github.com/stelfrag)) +- proc\_net\_dev: don't create runtime device config by default [\#16501](https://github.com/netdata/netdata/pull/16501) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16500](https://github.com/netdata/netdata/pull/16500) ([netdatabot](https://github.com/netdatabot)) +- remove discourse badge from readme [\#16499](https://github.com/netdata/netdata/pull/16499) ([ilyam8](https://github.com/ilyam8)) +- add curl example to create\_netdata\_conf\(\) [\#16498](https://github.com/netdata/netdata/pull/16498) ([ilyam8](https://github.com/ilyam8)) +- add /var/log mount to docker [\#16496](https://github.com/netdata/netdata/pull/16496) ([ilyam8](https://github.com/ilyam8)) +- Fix occasional shutdown deadlock [\#16495](https://github.com/netdata/netdata/pull/16495) ([stelfrag](https://github.com/stelfrag)) +- Log2journal improvements part2 [\#16494](https://github.com/netdata/netdata/pull/16494) ([ktsaou](https://github.com/ktsaou)) +- proc\_net\_dev: remove device config section [\#16492](https://github.com/netdata/netdata/pull/16492) ([ilyam8](https://github.com/ilyam8)) +- Spelling fixes to documentation [\#16490](https://github.com/netdata/netdata/pull/16490) ([M4itee](https://github.com/M4itee)) +- Fix builds on macOS due to missing endianness functions [\#16489](https://github.com/netdata/netdata/pull/16489) ([vkalintiris](https://github.com/vkalintiris)) +- log2journal: added missing yaml elements [\#16488](https://github.com/netdata/netdata/pull/16488) ([ktsaou](https://github.com/ktsaou)) +- When unregistering an ephemeral host, delete its chart labels [\#16486](https://github.com/netdata/netdata/pull/16486) ([stelfrag](https://github.com/stelfrag)) +- logs-management: Add option to submit logs to system journal [\#16485](https://github.com/netdata/netdata/pull/16485) ([Dim-P](https://github.com/Dim-P)) +- logs-management: Add function cancellability [\#16484](https://github.com/netdata/netdata/pull/16484) ([Dim-P](https://github.com/Dim-P)) +- Fix incorrect DEB package build dep. [\#16483](https://github.com/netdata/netdata/pull/16483) ([Ferroin](https://github.com/Ferroin)) +- Bump new version to cov-analysis tool [\#16482](https://github.com/netdata/netdata/pull/16482) ([tkatsoulas](https://github.com/tkatsoulas)) +- log2journal moved to collectors [\#16481](https://github.com/netdata/netdata/pull/16481) ([ktsaou](https://github.com/ktsaou)) +- Disable netdata monitoring section by default [\#16480](https://github.com/netdata/netdata/pull/16480) ([MrZammler](https://github.com/MrZammler)) +- Log2journal yaml configuration support [\#16479](https://github.com/netdata/netdata/pull/16479) ([ktsaou](https://github.com/ktsaou)) +- log alarm notifications to health.log [\#16476](https://github.com/netdata/netdata/pull/16476) ([ktsaou](https://github.com/ktsaou)) +- journals management improvements [\#16475](https://github.com/netdata/netdata/pull/16475) ([ktsaou](https://github.com/ktsaou)) +- SEO changes for Collector names [\#16473](https://github.com/netdata/netdata/pull/16473) ([sashwathn](https://github.com/sashwathn)) +- Check context post processing queue before sending status to cloud [\#16472](https://github.com/netdata/netdata/pull/16472) ([stelfrag](https://github.com/stelfrag)) +- fix charts.d plugin loading configuration [\#16471](https://github.com/netdata/netdata/pull/16471) ([ilyam8](https://github.com/ilyam8)) +- Fix error limit to respect the log every [\#16469](https://github.com/netdata/netdata/pull/16469) ([stelfrag](https://github.com/stelfrag)) +- Journal better estimations and watcher [\#16467](https://github.com/netdata/netdata/pull/16467) ([ktsaou](https://github.com/ktsaou)) +- update go.d plugin version to v0.57.1 [\#16465](https://github.com/netdata/netdata/pull/16465) ([ilyam8](https://github.com/ilyam8)) +- Add option to disable ML. [\#16463](https://github.com/netdata/netdata/pull/16463) ([vkalintiris](https://github.com/vkalintiris)) +- fix analytics logs [\#16462](https://github.com/netdata/netdata/pull/16462) ([ktsaou](https://github.com/ktsaou)) +- fix logs bashism [\#16461](https://github.com/netdata/netdata/pull/16461) ([ktsaou](https://github.com/ktsaou)) +- fix log2journal incorrect log [\#16460](https://github.com/netdata/netdata/pull/16460) ([ktsaou](https://github.com/ktsaou)) +- fixes for logging [\#16459](https://github.com/netdata/netdata/pull/16459) ([ktsaou](https://github.com/ktsaou)) +- when the namespace socket does not work, continue trying [\#16458](https://github.com/netdata/netdata/pull/16458) ([ktsaou](https://github.com/ktsaou)) +- set journal path for logging [\#16457](https://github.com/netdata/netdata/pull/16457) ([ktsaou](https://github.com/ktsaou)) +- add sbindir\_POST to PATH of bash scripts that use `systemd-cat-native` [\#16456](https://github.com/netdata/netdata/pull/16456) ([ilyam8](https://github.com/ilyam8)) +- add LogNamespace to systemd units [\#16454](https://github.com/netdata/netdata/pull/16454) ([ilyam8](https://github.com/ilyam8)) +- Update non-zero uuid key + child conf. [\#16452](https://github.com/netdata/netdata/pull/16452) ([vkalintiris](https://github.com/vkalintiris)) +- Add missing argument. [\#16451](https://github.com/netdata/netdata/pull/16451) ([vkalintiris](https://github.com/vkalintiris)) +- log flood protection to 1000 log lines / 1 minute [\#16450](https://github.com/netdata/netdata/pull/16450) ([ilyam8](https://github.com/ilyam8)) +- Code cleanup [\#16448](https://github.com/netdata/netdata/pull/16448) ([stelfrag](https://github.com/stelfrag)) +- fix: link daemon.log to stderr in docker [\#16447](https://github.com/netdata/netdata/pull/16447) ([ilyam8](https://github.com/ilyam8)) +- Doc change: Curl no longer supports spaces in the URL. [\#16446](https://github.com/netdata/netdata/pull/16446) ([luisj1983](https://github.com/luisj1983)) +- journal estimations [\#16445](https://github.com/netdata/netdata/pull/16445) ([ktsaou](https://github.com/ktsaou)) +- journal startup [\#16443](https://github.com/netdata/netdata/pull/16443) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16442](https://github.com/netdata/netdata/pull/16442) ([netdatabot](https://github.com/netdatabot)) +- Fix icon filename [\#16441](https://github.com/netdata/netdata/pull/16441) ([shyamvalsan](https://github.com/shyamvalsan)) +- On-Prem documentation full and light [\#16440](https://github.com/netdata/netdata/pull/16440) ([M4itee](https://github.com/M4itee)) +- Minor: Small health docs typo fix [\#16439](https://github.com/netdata/netdata/pull/16439) ([MrZammler](https://github.com/MrZammler)) +- Removes Observabilitycon banner README.md [\#16434](https://github.com/netdata/netdata/pull/16434) ([Aliki92](https://github.com/Aliki92)) +- Journal sampling [\#16433](https://github.com/netdata/netdata/pull/16433) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16431](https://github.com/netdata/netdata/pull/16431) ([netdatabot](https://github.com/netdatabot)) +- Regenerate integrations.js [\#16430](https://github.com/netdata/netdata/pull/16430) ([netdatabot](https://github.com/netdatabot)) +- proc\_net\_dev: keep nic\_speed\_max in kilobits [\#16429](https://github.com/netdata/netdata/pull/16429) ([ilyam8](https://github.com/ilyam8)) +- update go.d plugin to v0.57.0 [\#16427](https://github.com/netdata/netdata/pull/16427) ([ilyam8](https://github.com/ilyam8)) +- Adds config info for Telegram cloud notification [\#16424](https://github.com/netdata/netdata/pull/16424) ([juacker](https://github.com/juacker)) +- Minor: Remove backtick from doc [\#16423](https://github.com/netdata/netdata/pull/16423) ([MrZammler](https://github.com/MrZammler)) +- Update netdata-functions.md [\#16421](https://github.com/netdata/netdata/pull/16421) ([shyamvalsan](https://github.com/shyamvalsan)) +- disable socket port reuse [\#16420](https://github.com/netdata/netdata/pull/16420) ([ilyam8](https://github.com/ilyam8)) +- fix proc net dev: keep iface speed chart var in Mbits [\#16418](https://github.com/netdata/netdata/pull/16418) ([ilyam8](https://github.com/ilyam8)) +- Don't print errors from reading filtered alerts [\#16417](https://github.com/netdata/netdata/pull/16417) ([MrZammler](https://github.com/MrZammler)) +- /api/v1/charts: bring back chart id to `title` [\#16416](https://github.com/netdata/netdata/pull/16416) ([ilyam8](https://github.com/ilyam8)) +- fix: don't count reused connections as new [\#16414](https://github.com/netdata/netdata/pull/16414) ([ilyam8](https://github.com/ilyam8)) +- Add support for installing a specific major version of the agent on install. [\#16413](https://github.com/netdata/netdata/pull/16413) ([Ferroin](https://github.com/Ferroin)) +- Remove queue limit from ACLK sync event loop [\#16411](https://github.com/netdata/netdata/pull/16411) ([stelfrag](https://github.com/stelfrag)) +- Regenerate integrations.js [\#16409](https://github.com/netdata/netdata/pull/16409) ([netdatabot](https://github.com/netdatabot)) +- Improve handling around EPEL requirement for RPM packages. [\#16406](https://github.com/netdata/netdata/pull/16406) ([Ferroin](https://github.com/Ferroin)) +- Fix typo in metadata \(eBPF\) [\#16405](https://github.com/netdata/netdata/pull/16405) ([thiagoftsm](https://github.com/thiagoftsm)) +- docker: use /host/etc/hostname if mounted [\#16401](https://github.com/netdata/netdata/pull/16401) ([ilyam8](https://github.com/ilyam8)) +- adaptec\_raid: fix parsing PD without NCQ status [\#16400](https://github.com/netdata/netdata/pull/16400) ([ilyam8](https://github.com/ilyam8)) +- eBPF apps order [\#16395](https://github.com/netdata/netdata/pull/16395) ([thiagoftsm](https://github.com/thiagoftsm)) +- fix systemd-units func expiration time [\#16393](https://github.com/netdata/netdata/pull/16393) ([ilyam8](https://github.com/ilyam8)) +- docker: mount /etc/localtime [\#16392](https://github.com/netdata/netdata/pull/16392) ([ilyam8](https://github.com/ilyam8)) +- fix "differ in signedness" warn in cgroup [\#16391](https://github.com/netdata/netdata/pull/16391) ([ilyam8](https://github.com/ilyam8)) +- fix v0 dashboard [\#16389](https://github.com/netdata/netdata/pull/16389) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16386](https://github.com/netdata/netdata/pull/16386) ([netdatabot](https://github.com/netdatabot)) +- skip spaces when reading cpuset [\#16385](https://github.com/netdata/netdata/pull/16385) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16384](https://github.com/netdata/netdata/pull/16384) ([netdatabot](https://github.com/netdatabot)) +- use pre-configured message\_ids to identify common logs [\#16383](https://github.com/netdata/netdata/pull/16383) ([ktsaou](https://github.com/ktsaou)) +- Handle ephemeral hosts [\#16381](https://github.com/netdata/netdata/pull/16381) ([stelfrag](https://github.com/stelfrag)) +- docs: remove 'families' from health reference [\#16380](https://github.com/netdata/netdata/pull/16380) ([ilyam8](https://github.com/ilyam8)) +- fix cloud aws sns notification meta [\#16379](https://github.com/netdata/netdata/pull/16379) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16378](https://github.com/netdata/netdata/pull/16378) ([netdatabot](https://github.com/netdatabot)) +- update bundled UI to v6.59.0 [\#16377](https://github.com/netdata/netdata/pull/16377) ([ilyam8](https://github.com/ilyam8)) +- health guides: remove guides for alerts that don't exist in the repo [\#16375](https://github.com/netdata/netdata/pull/16375) ([ilyam8](https://github.com/ilyam8)) +- add pids current to cgroups meta [\#16374](https://github.com/netdata/netdata/pull/16374) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16373](https://github.com/netdata/netdata/pull/16373) ([netdatabot](https://github.com/netdatabot)) +- docs: remove unused cloud notification methods mds [\#16372](https://github.com/netdata/netdata/pull/16372) ([ilyam8](https://github.com/ilyam8)) +- Add configuration documentation for Cloud AWS SNS [\#16371](https://github.com/netdata/netdata/pull/16371) ([car12o](https://github.com/car12o)) +- pacakging: add zstd dev to install-required-packages [\#16370](https://github.com/netdata/netdata/pull/16370) ([ilyam8](https://github.com/ilyam8)) +- cgroups: collect pids/pids.current [\#16369](https://github.com/netdata/netdata/pull/16369) ([ilyam8](https://github.com/ilyam8)) +- docs: Correct time unit for tier 2 explanation [\#16368](https://github.com/netdata/netdata/pull/16368) ([sepek](https://github.com/sepek)) +- cgroups: fix throttle\_duration chart context [\#16367](https://github.com/netdata/netdata/pull/16367) ([ilyam8](https://github.com/ilyam8)) +- Introduce agent release metadata pipelines [\#16366](https://github.com/netdata/netdata/pull/16366) ([tkatsoulas](https://github.com/tkatsoulas)) +- fix system.net when inside lxc [\#16364](https://github.com/netdata/netdata/pull/16364) ([ilyam8](https://github.com/ilyam8)) +- collectors/freeipmi: add ipmi-sensors function [\#16363](https://github.com/netdata/netdata/pull/16363) ([ilyam8](https://github.com/ilyam8)) +- Add assorted improvements to the version policy draft. [\#16362](https://github.com/netdata/netdata/pull/16362) ([Ferroin](https://github.com/Ferroin)) +- Add a apcupsd status code metric [\#16361](https://github.com/netdata/netdata/pull/16361) ([thomasbeaudry](https://github.com/thomasbeaudry)) +- Switch alarm\_log to use the buffer json functions [\#16360](https://github.com/netdata/netdata/pull/16360) ([stelfrag](https://github.com/stelfrag)) +- Switch charts / chart to use buffer json functions [\#16359](https://github.com/netdata/netdata/pull/16359) ([stelfrag](https://github.com/stelfrag)) +- health: put guides into subdirs [\#16358](https://github.com/netdata/netdata/pull/16358) ([ilyam8](https://github.com/ilyam8)) +- New logging layer [\#16357](https://github.com/netdata/netdata/pull/16357) ([ktsaou](https://github.com/ktsaou)) +- Import alert guides from Netdata Assistant [\#16355](https://github.com/netdata/netdata/pull/16355) ([ralphm](https://github.com/ralphm)) +- update bundle UI to v6.58.5 [\#16354](https://github.com/netdata/netdata/pull/16354) ([ilyam8](https://github.com/ilyam8)) +- Update CODEOWNERS [\#16353](https://github.com/netdata/netdata/pull/16353) ([Ancairon](https://github.com/Ancairon)) +- Copy outdated alert guides to health/guides [\#16352](https://github.com/netdata/netdata/pull/16352) ([Ancairon](https://github.com/Ancairon)) +- Replace rrdset\_is\_obsolete & rrdset\_isnot\_obsolete [\#16351](https://github.com/netdata/netdata/pull/16351) ([MrZammler](https://github.com/MrZammler)) +- fix zstd in static build [\#16349](https://github.com/netdata/netdata/pull/16349) ([ilyam8](https://github.com/ilyam8)) +- add rrddim\_get\_last\_stored\_value to simplify function code in internal collectors [\#16348](https://github.com/netdata/netdata/pull/16348) ([ilyam8](https://github.com/ilyam8)) +- change defaults for functions [\#16347](https://github.com/netdata/netdata/pull/16347) ([ktsaou](https://github.com/ktsaou)) +- give the streaming function to nightly users [\#16346](https://github.com/netdata/netdata/pull/16346) ([ktsaou](https://github.com/ktsaou)) +- diskspace: add mount-points function [\#16345](https://github.com/netdata/netdata/pull/16345) ([ilyam8](https://github.com/ilyam8)) +- Update packaging instructions [\#16344](https://github.com/netdata/netdata/pull/16344) ([tkatsoulas](https://github.com/tkatsoulas)) +- Better database corruption detention during runtime [\#16343](https://github.com/netdata/netdata/pull/16343) ([stelfrag](https://github.com/stelfrag)) +- Improve agent to cloud status update process [\#16342](https://github.com/netdata/netdata/pull/16342) ([stelfrag](https://github.com/stelfrag)) +- h2o add api/v2 support [\#16340](https://github.com/netdata/netdata/pull/16340) ([underhood](https://github.com/underhood)) +- proc/diskstats: add block-devices function [\#16338](https://github.com/netdata/netdata/pull/16338) ([ilyam8](https://github.com/ilyam8)) +- network-interfaces function: add UsedBy field to [\#16337](https://github.com/netdata/netdata/pull/16337) ([ilyam8](https://github.com/ilyam8)) +- Network-interfaces function small improvements [\#16336](https://github.com/netdata/netdata/pull/16336) ([ilyam8](https://github.com/ilyam8)) +- proc netstat: add network interface statistics function [\#16334](https://github.com/netdata/netdata/pull/16334) ([ilyam8](https://github.com/ilyam8)) +- systemd-units improvements [\#16333](https://github.com/netdata/netdata/pull/16333) ([ktsaou](https://github.com/ktsaou)) +- cleanup systemd unit files After [\#16332](https://github.com/netdata/netdata/pull/16332) ([ilyam8](https://github.com/ilyam8)) +- fix: check for null rrdim in cgroup functions [\#16331](https://github.com/netdata/netdata/pull/16331) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16330](https://github.com/netdata/netdata/pull/16330) ([netdatabot](https://github.com/netdatabot)) +- Improve unittests [\#16329](https://github.com/netdata/netdata/pull/16329) ([stelfrag](https://github.com/stelfrag)) +- fix coverity warnings in cgroups [\#16328](https://github.com/netdata/netdata/pull/16328) ([ilyam8](https://github.com/ilyam8)) +- Fix readme images [\#16327](https://github.com/netdata/netdata/pull/16327) ([Ancairon](https://github.com/Ancairon)) +- integrations: fix nightly tag in helm deploy [\#16326](https://github.com/netdata/netdata/pull/16326) ([ilyam8](https://github.com/ilyam8)) +- rename newly added functions [\#16325](https://github.com/netdata/netdata/pull/16325) ([ktsaou](https://github.com/ktsaou)) +- Added section Blog posts README.md [\#16323](https://github.com/netdata/netdata/pull/16323) ([Aliki92](https://github.com/Aliki92)) +- Keep precompiled statements for alarm log queries to improve performance [\#16321](https://github.com/netdata/netdata/pull/16321) ([stelfrag](https://github.com/stelfrag)) +- Fix README images [\#16320](https://github.com/netdata/netdata/pull/16320) ([Ancairon](https://github.com/Ancairon)) +- Fix journal file index when collision is detected [\#16319](https://github.com/netdata/netdata/pull/16319) ([stelfrag](https://github.com/stelfrag)) +- Systemd units function [\#16318](https://github.com/netdata/netdata/pull/16318) ([ktsaou](https://github.com/ktsaou)) +- Optimize database before agent shutdown [\#16317](https://github.com/netdata/netdata/pull/16317) ([stelfrag](https://github.com/stelfrag)) +- `tcp_v6_connect` monitoring [\#16316](https://github.com/netdata/netdata/pull/16316) ([thiagoftsm](https://github.com/thiagoftsm)) +- Improve shutdown when collectors are active [\#16315](https://github.com/netdata/netdata/pull/16315) ([stelfrag](https://github.com/stelfrag)) +- cgroup-top function [\#16314](https://github.com/netdata/netdata/pull/16314) ([ktsaou](https://github.com/ktsaou)) +- Add a note for the docker deployment alongside with cetus [\#16312](https://github.com/netdata/netdata/pull/16312) ([tkatsoulas](https://github.com/tkatsoulas)) +- Update ObservabilityCon README.md [\#16311](https://github.com/netdata/netdata/pull/16311) ([Aliki92](https://github.com/Aliki92)) +- update docker swarm deploy info [\#16308](https://github.com/netdata/netdata/pull/16308) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16306](https://github.com/netdata/netdata/pull/16306) ([netdatabot](https://github.com/netdatabot)) +- Use proper icons for deploy integrations [\#16305](https://github.com/netdata/netdata/pull/16305) ([Ancairon](https://github.com/Ancairon)) +- bump openssl for static in 3.1.4 [\#16303](https://github.com/netdata/netdata/pull/16303) ([tkatsoulas](https://github.com/tkatsoulas)) +- claim.sh: use echo instead of /bin/echo [\#16300](https://github.com/netdata/netdata/pull/16300) ([ilyam8](https://github.com/ilyam8)) +- update journal sources once per minute [\#16298](https://github.com/netdata/netdata/pull/16298) ([ktsaou](https://github.com/ktsaou)) +- Fix label copy [\#16297](https://github.com/netdata/netdata/pull/16297) ([stelfrag](https://github.com/stelfrag)) +- fix missing labels from parents [\#16296](https://github.com/netdata/netdata/pull/16296) ([ktsaou](https://github.com/ktsaou)) +- do not propagate upstream internal label sources [\#16295](https://github.com/netdata/netdata/pull/16295) ([ktsaou](https://github.com/ktsaou)) +- fix various issues identified by coverity [\#16294](https://github.com/netdata/netdata/pull/16294) ([ktsaou](https://github.com/ktsaou)) +- fix missing labels from parents [\#16293](https://github.com/netdata/netdata/pull/16293) ([ktsaou](https://github.com/ktsaou)) +- fix renames in freebsd [\#16292](https://github.com/netdata/netdata/pull/16292) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16291](https://github.com/netdata/netdata/pull/16291) ([netdatabot](https://github.com/netdatabot)) +- fix retention loading [\#16290](https://github.com/netdata/netdata/pull/16290) ([ktsaou](https://github.com/ktsaou)) +- integrations: yes/no instead of True/False in tables [\#16289](https://github.com/netdata/netdata/pull/16289) ([ilyam8](https://github.com/ilyam8)) +- typo fixed in gen\_docs\_integrations.py [\#16288](https://github.com/netdata/netdata/pull/16288) ([khalid586](https://github.com/khalid586)) +- Brotli streaming compression [\#16287](https://github.com/netdata/netdata/pull/16287) ([ktsaou](https://github.com/ktsaou)) +- Apcupsd selftest metric [\#16286](https://github.com/netdata/netdata/pull/16286) ([thomasbeaudry](https://github.com/thomasbeaudry)) +- Fix 404s in markdown files [\#16285](https://github.com/netdata/netdata/pull/16285) ([Ancairon](https://github.com/Ancairon)) +- Regenerate integrations.js [\#16284](https://github.com/netdata/netdata/pull/16284) ([netdatabot](https://github.com/netdatabot)) +- Small optimization of alert queries [\#16282](https://github.com/netdata/netdata/pull/16282) ([MrZammler](https://github.com/MrZammler)) +- update go.d version to 0.56.4 [\#16281](https://github.com/netdata/netdata/pull/16281) ([ilyam8](https://github.com/ilyam8)) +- update bundled UI to v6.57.0 [\#16277](https://github.com/netdata/netdata/pull/16277) ([ilyam8](https://github.com/ilyam8)) +- Remove semicolons from strings [\#16276](https://github.com/netdata/netdata/pull/16276) ([Ancairon](https://github.com/Ancairon)) +- Prevent wrong optimization armv7l static build [\#16274](https://github.com/netdata/netdata/pull/16274) ([stelfrag](https://github.com/stelfrag)) +- local\_listeners: add cmd args for reading specific files [\#16273](https://github.com/netdata/netdata/pull/16273) ([ilyam8](https://github.com/ilyam8)) +- DYNCFG fix REPORT\_JOB\_STATUS streaming [\#16272](https://github.com/netdata/netdata/pull/16272) ([underhood](https://github.com/underhood)) +- fix sources match [\#16271](https://github.com/netdata/netdata/pull/16271) ([ktsaou](https://github.com/ktsaou)) +- Add an obsoletion time for statsd private charts [\#16269](https://github.com/netdata/netdata/pull/16269) ([MrZammler](https://github.com/MrZammler)) +- ZSTD and GZIP/DEFLATE streaming support [\#16268](https://github.com/netdata/netdata/pull/16268) ([ktsaou](https://github.com/ktsaou)) +- journal minor updates [\#16267](https://github.com/netdata/netdata/pull/16267) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16266](https://github.com/netdata/netdata/pull/16266) ([netdatabot](https://github.com/netdatabot)) +- Fix coverity issue 403725 [\#16265](https://github.com/netdata/netdata/pull/16265) ([stelfrag](https://github.com/stelfrag)) +- SUBSTRING simple patterns fix [\#16264](https://github.com/netdata/netdata/pull/16264) ([ktsaou](https://github.com/ktsaou)) +- QUERIES: use tiers only when they have useful data [\#16263](https://github.com/netdata/netdata/pull/16263) ([ktsaou](https://github.com/ktsaou)) +- Improve dimension ML model load [\#16262](https://github.com/netdata/netdata/pull/16262) ([stelfrag](https://github.com/stelfrag)) +- cgroup: add net container\_device label [\#16261](https://github.com/netdata/netdata/pull/16261) ([ilyam8](https://github.com/ilyam8)) +- Replace distutils with packaging for version [\#16259](https://github.com/netdata/netdata/pull/16259) ([MrZammler](https://github.com/MrZammler)) +- Regenerate integrations.js [\#16258](https://github.com/netdata/netdata/pull/16258) ([netdatabot](https://github.com/netdatabot)) +- Fix Discord webhook payload [\#16257](https://github.com/netdata/netdata/pull/16257) ([luchaos](https://github.com/luchaos)) +- Fix HAProxy server status parsing and add MAINT status chart [\#16253](https://github.com/netdata/netdata/pull/16253) ([seniorquico](https://github.com/seniorquico)) +- Journal multiple sources [\#16252](https://github.com/netdata/netdata/pull/16252) ([ktsaou](https://github.com/ktsaou)) +- `most_popular` on markdown metadata for integrations [\#16251](https://github.com/netdata/netdata/pull/16251) ([Ancairon](https://github.com/Ancairon)) +- Dyncfg improvements [\#16250](https://github.com/netdata/netdata/pull/16250) ([ktsaou](https://github.com/ktsaou)) +- Fix label copy to correctly handle duplicate keys [\#16249](https://github.com/netdata/netdata/pull/16249) ([stelfrag](https://github.com/stelfrag)) +- added systemd-journal forward\_secure\_sealing [\#16247](https://github.com/netdata/netdata/pull/16247) ([ktsaou](https://github.com/ktsaou)) +- Terminate cgroups discovery thread faster during shutdown [\#16246](https://github.com/netdata/netdata/pull/16246) ([stelfrag](https://github.com/stelfrag)) +- python.d\(smartd\_log\): collect Total LBAs written/read [\#16245](https://github.com/netdata/netdata/pull/16245) ([watsonbox](https://github.com/watsonbox)) +- fix apps plugin metric names in meta [\#16243](https://github.com/netdata/netdata/pull/16243) ([ilyam8](https://github.com/ilyam8)) +- Drop an unused index from aclk\_alert table [\#16242](https://github.com/netdata/netdata/pull/16242) ([stelfrag](https://github.com/stelfrag)) +- add DYNCFG\_RESET [\#16241](https://github.com/netdata/netdata/pull/16241) ([underhood](https://github.com/underhood)) +- Reuse ML load prepared statement [\#16240](https://github.com/netdata/netdata/pull/16240) ([stelfrag](https://github.com/stelfrag)) +- update bundled UI to v6.53.0 [\#16239](https://github.com/netdata/netdata/pull/16239) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16237](https://github.com/netdata/netdata/pull/16237) ([netdatabot](https://github.com/netdatabot)) +- Active journal centralization guide no encryption [\#16236](https://github.com/netdata/netdata/pull/16236) ([tkatsoulas](https://github.com/tkatsoulas)) +- journal: script to generate self-signed-certificates [\#16235](https://github.com/netdata/netdata/pull/16235) ([ktsaou](https://github.com/ktsaou)) +- Fix dimension HETEROGENEOUS check [\#16234](https://github.com/netdata/netdata/pull/16234) ([stelfrag](https://github.com/stelfrag)) +- uninstaller: remove /etc/cron.d/netdata-updater-daily [\#16233](https://github.com/netdata/netdata/pull/16233) ([ilyam8](https://github.com/ilyam8)) +- Add Erlang to Apps configuration [\#16231](https://github.com/netdata/netdata/pull/16231) ([andyundso](https://github.com/andyundso)) +- remove charts.d/nut [\#16230](https://github.com/netdata/netdata/pull/16230) ([ilyam8](https://github.com/ilyam8)) +- kickstart: rename auto-update-method to auto-update-type [\#16229](https://github.com/netdata/netdata/pull/16229) ([ilyam8](https://github.com/ilyam8)) +- update go.d plugin version to v0.56.3 [\#16228](https://github.com/netdata/netdata/pull/16228) ([ilyam8](https://github.com/ilyam8)) +- Add document outlining our versioning policy and public API. [\#16227](https://github.com/netdata/netdata/pull/16227) ([Ferroin](https://github.com/Ferroin)) +- Changes to `systemd-journal` docs [\#16225](https://github.com/netdata/netdata/pull/16225) ([Ancairon](https://github.com/Ancairon)) +- Fix statistics calculation in 32bit systems [\#16222](https://github.com/netdata/netdata/pull/16222) ([stelfrag](https://github.com/stelfrag)) +- Fix meta unittest [\#16221](https://github.com/netdata/netdata/pull/16221) ([stelfrag](https://github.com/stelfrag)) +- facets: minimize hashtable collisions [\#16215](https://github.com/netdata/netdata/pull/16215) ([ktsaou](https://github.com/ktsaou)) +- Removing support for Alpine 3.15 [\#16205](https://github.com/netdata/netdata/pull/16205) ([tkatsoulas](https://github.com/tkatsoulas)) +- Improve context load on startup [\#16203](https://github.com/netdata/netdata/pull/16203) ([stelfrag](https://github.com/stelfrag)) +- cgroup-network: don't log an error opening pid file if doesn't exist [\#16196](https://github.com/netdata/netdata/pull/16196) ([ilyam8](https://github.com/ilyam8)) +- docker install: support for Proxmox vms/containers name resolution [\#16193](https://github.com/netdata/netdata/pull/16193) ([ilyam8](https://github.com/ilyam8)) +- Introduce workflow to always update bundled packages \(static builds\) into their latest release \(part1\) [\#16191](https://github.com/netdata/netdata/pull/16191) ([tkatsoulas](https://github.com/tkatsoulas)) +- Improvements for labels handling [\#16172](https://github.com/netdata/netdata/pull/16172) ([stelfrag](https://github.com/stelfrag)) +- Split chars \(eBPF \<-\> Apps integration\) [\#16139](https://github.com/netdata/netdata/pull/16139) ([thiagoftsm](https://github.com/thiagoftsm)) +- Faster parents [\#16127](https://github.com/netdata/netdata/pull/16127) ([ktsaou](https://github.com/ktsaou)) +- Update info about custom dashboards [\#16121](https://github.com/netdata/netdata/pull/16121) ([elizabyte8](https://github.com/elizabyte8)) +- Add info to native packages docs about mirroring our repos. [\#16069](https://github.com/netdata/netdata/pull/16069) ([Ferroin](https://github.com/Ferroin)) +- shutdown while waiting for collectors to finish [\#16023](https://github.com/netdata/netdata/pull/16023) ([ktsaou](https://github.com/ktsaou)) + +## [v1.43.2](https://github.com/netdata/netdata/tree/v1.43.2) (2023-10-30) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.43.1...v1.43.2) + +## [v1.43.1](https://github.com/netdata/netdata/tree/v1.43.1) (2023-10-26) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.43.0...v1.43.1) + +## [v1.43.0](https://github.com/netdata/netdata/tree/v1.43.0) (2023-10-16) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.42.4...v1.43.0) + +**Merged pull requests:** + +- update bundled UI to v6.52.2 [\#16219](https://github.com/netdata/netdata/pull/16219) ([ilyam8](https://github.com/ilyam8)) +- dynamic meta queue size [\#16218](https://github.com/netdata/netdata/pull/16218) ([ktsaou](https://github.com/ktsaou)) +- update bundled UI to v6.52.1 [\#16217](https://github.com/netdata/netdata/pull/16217) ([ilyam8](https://github.com/ilyam8)) +- update bundled UI to v6.52.0 [\#16216](https://github.com/netdata/netdata/pull/16216) ([ilyam8](https://github.com/ilyam8)) +- disable logging to syslog by default [\#16214](https://github.com/netdata/netdata/pull/16214) ([ilyam8](https://github.com/ilyam8)) +- add summary to /alerts [\#16213](https://github.com/netdata/netdata/pull/16213) ([MrZammler](https://github.com/MrZammler)) +- registry action hello should always work [\#16212](https://github.com/netdata/netdata/pull/16212) ([ktsaou](https://github.com/ktsaou)) +- apps: fix divide by zero when calc avg uptime [\#16211](https://github.com/netdata/netdata/pull/16211) ([ilyam8](https://github.com/ilyam8)) +- allow patterns in journal queries [\#16210](https://github.com/netdata/netdata/pull/16210) ([ktsaou](https://github.com/ktsaou)) +- ui-6.51.0 [\#16208](https://github.com/netdata/netdata/pull/16208) ([ktsaou](https://github.com/ktsaou)) +- add order in available histograms [\#16204](https://github.com/netdata/netdata/pull/16204) ([ktsaou](https://github.com/ktsaou)) +- update ui to 6.50.2 again [\#16202](https://github.com/netdata/netdata/pull/16202) ([ktsaou](https://github.com/ktsaou)) +- update ui to 6.50.2 [\#16201](https://github.com/netdata/netdata/pull/16201) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16200](https://github.com/netdata/netdata/pull/16200) ([netdatabot](https://github.com/netdatabot)) +- health: attach drops ratio alarms to net.drops [\#16199](https://github.com/netdata/netdata/pull/16199) ([ilyam8](https://github.com/ilyam8)) +- apps: always expose "other" group [\#16198](https://github.com/netdata/netdata/pull/16198) ([ilyam8](https://github.com/ilyam8)) +- journal timeout [\#16195](https://github.com/netdata/netdata/pull/16195) ([ktsaou](https://github.com/ktsaou)) +- systemd-journal timeout to 55 secs [\#16194](https://github.com/netdata/netdata/pull/16194) ([ktsaou](https://github.com/ktsaou)) +- update bundled UI to v6.49.0 [\#16192](https://github.com/netdata/netdata/pull/16192) ([ilyam8](https://github.com/ilyam8)) +- Faster facets [\#16190](https://github.com/netdata/netdata/pull/16190) ([ktsaou](https://github.com/ktsaou)) +- Journal updates [\#16189](https://github.com/netdata/netdata/pull/16189) ([ktsaou](https://github.com/ktsaou)) +- Add agent version on startup [\#16188](https://github.com/netdata/netdata/pull/16188) ([stelfrag](https://github.com/stelfrag)) +- Suppress "families" log [\#16186](https://github.com/netdata/netdata/pull/16186) ([stelfrag](https://github.com/stelfrag)) +- Fix access of memory after free [\#16185](https://github.com/netdata/netdata/pull/16185) ([stelfrag](https://github.com/stelfrag)) +- functions columns [\#16184](https://github.com/netdata/netdata/pull/16184) ([ktsaou](https://github.com/ktsaou)) +- disable \_go\_build in centos 8 & 9 [\#16183](https://github.com/netdata/netdata/pull/16183) ([tkatsoulas](https://github.com/tkatsoulas)) +- Regenerate integrations.js [\#16182](https://github.com/netdata/netdata/pull/16182) ([netdatabot](https://github.com/netdatabot)) +- update go.d to v0.56.2 [\#16181](https://github.com/netdata/netdata/pull/16181) ([ilyam8](https://github.com/ilyam8)) +- Add support for Fedora 39 native packages into our CI [\#16180](https://github.com/netdata/netdata/pull/16180) ([tkatsoulas](https://github.com/tkatsoulas)) +- Add support for Ubuntu 23.10 native packages into our CI [\#16179](https://github.com/netdata/netdata/pull/16179) ([tkatsoulas](https://github.com/tkatsoulas)) +- Update bundled static packages [\#16177](https://github.com/netdata/netdata/pull/16177) ([tkatsoulas](https://github.com/tkatsoulas)) +- Regenerate integrations.js [\#16176](https://github.com/netdata/netdata/pull/16176) ([netdatabot](https://github.com/netdatabot)) +- facets: do not corrupt the index when doubling the hashtable [\#16171](https://github.com/netdata/netdata/pull/16171) ([ktsaou](https://github.com/ktsaou)) +- Add icons to integrations markdown files [\#16169](https://github.com/netdata/netdata/pull/16169) ([Ancairon](https://github.com/Ancairon)) +- Fix netdata-uninstaller; blindly deletes NETDATA\_PREFIX env var [\#16167](https://github.com/netdata/netdata/pull/16167) ([tkatsoulas](https://github.com/tkatsoulas)) +- apps: remove mem\_private on FreeBSD [\#16166](https://github.com/netdata/netdata/pull/16166) ([ilyam8](https://github.com/ilyam8)) +- fix repo path for openSUSE 15.5 packages [\#16161](https://github.com/netdata/netdata/pull/16161) ([tkatsoulas](https://github.com/tkatsoulas)) +- Modify eBPF exit [\#16159](https://github.com/netdata/netdata/pull/16159) ([thiagoftsm](https://github.com/thiagoftsm)) +- Fix compilation warnings [\#16158](https://github.com/netdata/netdata/pull/16158) ([stelfrag](https://github.com/stelfrag)) +- Don't queue removed when there is a newer alert [\#16157](https://github.com/netdata/netdata/pull/16157) ([MrZammler](https://github.com/MrZammler)) +- docker: make chmod o+rX / non fatal [\#16156](https://github.com/netdata/netdata/pull/16156) ([ilyam8](https://github.com/ilyam8)) +- Batch ML model load commands [\#16155](https://github.com/netdata/netdata/pull/16155) ([stelfrag](https://github.com/stelfrag)) +- \[BUGFIX\] MQTT ARM fix [\#16154](https://github.com/netdata/netdata/pull/16154) ([underhood](https://github.com/underhood)) +- Rework guide, add SSL with self-signed certs [\#16153](https://github.com/netdata/netdata/pull/16153) ([tkatsoulas](https://github.com/tkatsoulas)) +- make io charts "write" negative in apps and cgroups \(systemd\) [\#16152](https://github.com/netdata/netdata/pull/16152) ([ilyam8](https://github.com/ilyam8)) +- journal: updates [\#16150](https://github.com/netdata/netdata/pull/16150) ([ktsaou](https://github.com/ktsaou)) +- uninstaller: remove ND systemd preset and tmp dir [\#16148](https://github.com/netdata/netdata/pull/16148) ([ilyam8](https://github.com/ilyam8)) +- fix `test -x` check for uninstaller script [\#16146](https://github.com/netdata/netdata/pull/16146) ([ilyam8](https://github.com/ilyam8)) +- health: don't log an unknown key error for "families" [\#16145](https://github.com/netdata/netdata/pull/16145) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16144](https://github.com/netdata/netdata/pull/16144) ([netdatabot](https://github.com/netdatabot)) +- Update python.d./varnish/metadata.yaml [\#16143](https://github.com/netdata/netdata/pull/16143) ([Ancairon](https://github.com/Ancairon)) +- Bugfix in integrations/setup/template [\#16142](https://github.com/netdata/netdata/pull/16142) ([Ancairon](https://github.com/Ancairon)) +- Fixes in integration generation script [\#16141](https://github.com/netdata/netdata/pull/16141) ([Ancairon](https://github.com/Ancairon)) +- Introduce stringify function for integrations [\#16140](https://github.com/netdata/netdata/pull/16140) ([Ancairon](https://github.com/Ancairon)) +- Regenerate integrations.js [\#16138](https://github.com/netdata/netdata/pull/16138) ([netdatabot](https://github.com/netdatabot)) +- fix random crashes on pthread\_detach\(\) [\#16137](https://github.com/netdata/netdata/pull/16137) ([ktsaou](https://github.com/ktsaou)) +- fix journal help and mark debug keys in the output [\#16133](https://github.com/netdata/netdata/pull/16133) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16132](https://github.com/netdata/netdata/pull/16132) ([netdatabot](https://github.com/netdatabot)) +- apps: change user\_group to usergroup [\#16131](https://github.com/netdata/netdata/pull/16131) ([ilyam8](https://github.com/ilyam8)) +- Retain a list structure instead of a set for data collection integrations categories [\#16130](https://github.com/netdata/netdata/pull/16130) ([Ancairon](https://github.com/Ancairon)) +- Add summary to alerts configurations [\#16129](https://github.com/netdata/netdata/pull/16129) ([MrZammler](https://github.com/MrZammler)) +- Remove multiple categories due to bug [\#16126](https://github.com/netdata/netdata/pull/16126) ([Ancairon](https://github.com/Ancairon)) +- Regenerate integrations.js [\#16125](https://github.com/netdata/netdata/pull/16125) ([netdatabot](https://github.com/netdatabot)) +- update UI to v6.45.0 [\#16124](https://github.com/netdata/netdata/pull/16124) ([ilyam8](https://github.com/ilyam8)) +- journal: fix the 1 second latency in play mode [\#16123](https://github.com/netdata/netdata/pull/16123) ([ktsaou](https://github.com/ktsaou)) +- fix proc netstat metrics [\#16122](https://github.com/netdata/netdata/pull/16122) ([ilyam8](https://github.com/ilyam8)) +- dont strip newlines when forwarding FUNCTION\_PAYLOAD [\#16120](https://github.com/netdata/netdata/pull/16120) ([underhood](https://github.com/underhood)) +- Do not force OOMKill [\#16115](https://github.com/netdata/netdata/pull/16115) ([thiagoftsm](https://github.com/thiagoftsm)) +- fix crash on parsing clabel command with no source [\#16114](https://github.com/netdata/netdata/pull/16114) ([ilyam8](https://github.com/ilyam8)) +- update UI to v6.43.0 [\#16112](https://github.com/netdata/netdata/pull/16112) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#16111](https://github.com/netdata/netdata/pull/16111) ([netdatabot](https://github.com/netdatabot)) +- journal: respect anchor on non-data-only queries [\#16109](https://github.com/netdata/netdata/pull/16109) ([ktsaou](https://github.com/ktsaou)) +- Fix in generate integrations docs script [\#16108](https://github.com/netdata/netdata/pull/16108) ([Ancairon](https://github.com/Ancairon)) +- journal: go up to stop anchor on data only queries [\#16107](https://github.com/netdata/netdata/pull/16107) ([ktsaou](https://github.com/ktsaou)) +- Update collectors/python.d.plugin/pandas/metadata.yaml [\#16106](https://github.com/netdata/netdata/pull/16106) ([Ancairon](https://github.com/Ancairon)) +- Code improvements [\#16104](https://github.com/netdata/netdata/pull/16104) ([stelfrag](https://github.com/stelfrag)) +- Regenerate integrations.js [\#16103](https://github.com/netdata/netdata/pull/16103) ([netdatabot](https://github.com/netdatabot)) +- Add integrations/cloud-notifications to cleanup [\#16102](https://github.com/netdata/netdata/pull/16102) ([Ancairon](https://github.com/Ancairon)) +- better journal logging [\#16101](https://github.com/netdata/netdata/pull/16101) ([ktsaou](https://github.com/ktsaou)) +- update UI to v6.42.2 [\#16100](https://github.com/netdata/netdata/pull/16100) ([ilyam8](https://github.com/ilyam8)) +- a simple journal optimization [\#16099](https://github.com/netdata/netdata/pull/16099) ([ktsaou](https://github.com/ktsaou)) +- journal: fix incremental queries [\#16098](https://github.com/netdata/netdata/pull/16098) ([ktsaou](https://github.com/ktsaou)) +- Update categories.yaml [\#16097](https://github.com/netdata/netdata/pull/16097) ([Ancairon](https://github.com/Ancairon)) +- Fix systemd-journal.plugin README and prepare it for Learn [\#16096](https://github.com/netdata/netdata/pull/16096) ([Ancairon](https://github.com/Ancairon)) +- Split apps charts [\#16095](https://github.com/netdata/netdata/pull/16095) ([thiagoftsm](https://github.com/thiagoftsm)) +- fix querying out of retention [\#16094](https://github.com/netdata/netdata/pull/16094) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16093](https://github.com/netdata/netdata/pull/16093) ([netdatabot](https://github.com/netdatabot)) +- update go.d.plugin to v0.56.1 [\#16092](https://github.com/netdata/netdata/pull/16092) ([ilyam8](https://github.com/ilyam8)) +- update UI to v6.42.1 [\#16091](https://github.com/netdata/netdata/pull/16091) ([ilyam8](https://github.com/ilyam8)) +- dont use sd\_journal\_open\_files\_fd\(\) that is buggy on older libsystemd [\#16090](https://github.com/netdata/netdata/pull/16090) ([ktsaou](https://github.com/ktsaou)) +- external plugins: respect env NETDATA\_LOG\_SEVERITY\_LEVEL [\#16089](https://github.com/netdata/netdata/pull/16089) ([ilyam8](https://github.com/ilyam8)) +- update UI to v6.42.0 [\#16088](https://github.com/netdata/netdata/pull/16088) ([ilyam8](https://github.com/ilyam8)) +- functions: prevent a busy wait loop [\#16086](https://github.com/netdata/netdata/pull/16086) ([ktsaou](https://github.com/ktsaou)) +- charts.d: respect env NETDATA\_LOG\_SEVERITY\_LEVEL [\#16085](https://github.com/netdata/netdata/pull/16085) ([ilyam8](https://github.com/ilyam8)) +- python.d: respect env NETDATA\_LOG\_SEVERITY\_LEVEL [\#16084](https://github.com/netdata/netdata/pull/16084) ([ilyam8](https://github.com/ilyam8)) +- Address reported socket issue [\#16083](https://github.com/netdata/netdata/pull/16083) ([thiagoftsm](https://github.com/thiagoftsm)) +- Change @linuxnetdata to @netdatahq [\#16082](https://github.com/netdata/netdata/pull/16082) ([ralphm](https://github.com/ralphm)) +- \[Integrations Docs\] Add a badge for either netdata or community maintained [\#16073](https://github.com/netdata/netdata/pull/16073) ([Ancairon](https://github.com/Ancairon)) +- Skip database migration steps in new installation [\#16071](https://github.com/netdata/netdata/pull/16071) ([stelfrag](https://github.com/stelfrag)) +- Improve description about tc.plugin [\#16068](https://github.com/netdata/netdata/pull/16068) ([thiagoftsm](https://github.com/thiagoftsm)) +- Regenerate integrations.js [\#16062](https://github.com/netdata/netdata/pull/16062) ([netdatabot](https://github.com/netdatabot)) +- update go.d version to v0.56.0 [\#16061](https://github.com/netdata/netdata/pull/16061) ([ilyam8](https://github.com/ilyam8)) +- Bugfix on integrations/gen\_docs\_integrations.py [\#16059](https://github.com/netdata/netdata/pull/16059) ([Ancairon](https://github.com/Ancairon)) +- Fix coverity 402975 [\#16058](https://github.com/netdata/netdata/pull/16058) ([stelfrag](https://github.com/stelfrag)) +- Send alerts summary field to cloud [\#16056](https://github.com/netdata/netdata/pull/16056) ([MrZammler](https://github.com/MrZammler)) +- update bundled ui version to v6.41.1 [\#16054](https://github.com/netdata/netdata/pull/16054) ([ilyam8](https://github.com/ilyam8)) +- Update to use versioned base images for CI. [\#16053](https://github.com/netdata/netdata/pull/16053) ([Ferroin](https://github.com/Ferroin)) +- Fix missing find command when installing/updating on Rocky Linux systems. [\#16052](https://github.com/netdata/netdata/pull/16052) ([Ferroin](https://github.com/Ferroin)) +- Fix summary field in table [\#16050](https://github.com/netdata/netdata/pull/16050) ([MrZammler](https://github.com/MrZammler)) +- Switch to uint64\_t to avoid overflow in 32bit systems [\#16048](https://github.com/netdata/netdata/pull/16048) ([stelfrag](https://github.com/stelfrag)) +- Convert the ML database [\#16046](https://github.com/netdata/netdata/pull/16046) ([stelfrag](https://github.com/stelfrag)) +- Regenerate integrations.js [\#16044](https://github.com/netdata/netdata/pull/16044) ([netdatabot](https://github.com/netdatabot)) +- Doc about running a local dashboard through Cloudflare \(community\) [\#16043](https://github.com/netdata/netdata/pull/16043) ([Ancairon](https://github.com/Ancairon)) +- Have one documentation page about Netdata Charts [\#16042](https://github.com/netdata/netdata/pull/16042) ([Ancairon](https://github.com/Ancairon)) +- Remove discontinued Hangouts and StackPulse notification methods [\#16041](https://github.com/netdata/netdata/pull/16041) ([Ancairon](https://github.com/Ancairon)) +- systemd-Journal by file [\#16038](https://github.com/netdata/netdata/pull/16038) ([ktsaou](https://github.com/ktsaou)) +- health: add upsd alerts [\#16036](https://github.com/netdata/netdata/pull/16036) ([ilyam8](https://github.com/ilyam8)) +- Disable mongodb exporter builds where broken. [\#16033](https://github.com/netdata/netdata/pull/16033) ([Ferroin](https://github.com/Ferroin)) +- Run health queries from tier 0 [\#16032](https://github.com/netdata/netdata/pull/16032) ([MrZammler](https://github.com/MrZammler)) +- use `status` as units for `anomaly_detection.detector_events` [\#16028](https://github.com/netdata/netdata/pull/16028) ([andrewm4894](https://github.com/andrewm4894)) +- add description for Homebrew on Apple Silicon Mac\(netdata/learn/\#1789\) [\#16027](https://github.com/netdata/netdata/pull/16027) ([theggs](https://github.com/theggs)) +- Fix package builds on Rocky Linux. [\#16026](https://github.com/netdata/netdata/pull/16026) ([Ferroin](https://github.com/Ferroin)) +- Remove family from alerts [\#16025](https://github.com/netdata/netdata/pull/16025) ([MrZammler](https://github.com/MrZammler)) +- add systemd-journal.plugin to apps\_groups.conf [\#16024](https://github.com/netdata/netdata/pull/16024) ([ilyam8](https://github.com/ilyam8)) +- Fix handling of CI skipping. [\#16022](https://github.com/netdata/netdata/pull/16022) ([Ferroin](https://github.com/Ferroin)) +- update bundled UI to v6.39.0 [\#16020](https://github.com/netdata/netdata/pull/16020) ([ilyam8](https://github.com/ilyam8)) +- Update collector metadata for python collectors [\#16019](https://github.com/netdata/netdata/pull/16019) ([tkatsoulas](https://github.com/tkatsoulas)) +- fix crash on setting thread name [\#16016](https://github.com/netdata/netdata/pull/16016) ([ilyam8](https://github.com/ilyam8)) +- Systemd-Journal: fix crash when the uid or gid do not have names [\#16015](https://github.com/netdata/netdata/pull/16015) ([ktsaou](https://github.com/ktsaou)) +- Avoid duplicate keys in labels [\#16014](https://github.com/netdata/netdata/pull/16014) ([stelfrag](https://github.com/stelfrag)) +- remove the line length limit from pluginsd [\#16013](https://github.com/netdata/netdata/pull/16013) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#16011](https://github.com/netdata/netdata/pull/16011) ([netdatabot](https://github.com/netdatabot)) +- Simplify the script for generating documentation from integrations [\#16009](https://github.com/netdata/netdata/pull/16009) ([Ancairon](https://github.com/Ancairon)) +- some collector metadata improvements [\#16008](https://github.com/netdata/netdata/pull/16008) ([andrewm4894](https://github.com/andrewm4894)) +- Fix compilation warnings [\#16006](https://github.com/netdata/netdata/pull/16006) ([stelfrag](https://github.com/stelfrag)) +- Update CMakeLists.txt [\#16005](https://github.com/netdata/netdata/pull/16005) ([stelfrag](https://github.com/stelfrag)) +- eBPF socket: function with event loop [\#16004](https://github.com/netdata/netdata/pull/16004) ([thiagoftsm](https://github.com/thiagoftsm)) +- fix compilation warnings [\#16001](https://github.com/netdata/netdata/pull/16001) ([ktsaou](https://github.com/ktsaou)) +- Update integrations/gen\_docs\_integrations.py [\#15997](https://github.com/netdata/netdata/pull/15997) ([Ancairon](https://github.com/Ancairon)) +- Make collectors/COLLECTORS.md have its list autogenerated from integrations.js [\#15995](https://github.com/netdata/netdata/pull/15995) ([Ancairon](https://github.com/Ancairon)) + +## [v1.42.4](https://github.com/netdata/netdata/tree/v1.42.4) (2023-09-18) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.42.3...v1.42.4) ## [v1.42.3](https://github.com/netdata/netdata/tree/v1.42.3) (2023-09-11) @@ -141,291 +459,10 @@ [Full Changelog](https://github.com/netdata/netdata/compare/v1.41.0...v1.42.0) -**Merged pull requests:** - -- ci: codacy exclude web/gui/v2/ [\#15780](https://github.com/netdata/netdata/pull/15780) ([ilyam8](https://github.com/ilyam8)) -- update UI to v6.27.0 [\#15778](https://github.com/netdata/netdata/pull/15778) ([ilyam8](https://github.com/ilyam8)) -- ci: fix labeler area/docs [\#15776](https://github.com/netdata/netdata/pull/15776) ([ilyam8](https://github.com/ilyam8)) -- fix claiming via UI for static build [\#15774](https://github.com/netdata/netdata/pull/15774) ([ilyam8](https://github.com/ilyam8)) -- extend the trimming window to avoid empty points at the end of queries [\#15773](https://github.com/netdata/netdata/pull/15773) ([ktsaou](https://github.com/ktsaou)) -- Regenerate integrations.js [\#15772](https://github.com/netdata/netdata/pull/15772) ([netdatabot](https://github.com/netdatabot)) -- Change FreeBSD / macOS system.swap\(io\) to mem.swap\(io\) [\#15769](https://github.com/netdata/netdata/pull/15769) ([Dim-P](https://github.com/Dim-P)) -- update ui to v6.26.3 [\#15767](https://github.com/netdata/netdata/pull/15767) ([ilyam8](https://github.com/ilyam8)) -- Fix CID 398318 [\#15766](https://github.com/netdata/netdata/pull/15766) ([underhood](https://github.com/underhood)) -- Fix coverity issues introduced via drm proc module [\#15765](https://github.com/netdata/netdata/pull/15765) ([Dim-P](https://github.com/Dim-P)) -- Regenerate integrations.js [\#15764](https://github.com/netdata/netdata/pull/15764) ([netdatabot](https://github.com/netdatabot)) -- meta update proc drm icon [\#15763](https://github.com/netdata/netdata/pull/15763) ([ilyam8](https://github.com/ilyam8)) -- Update metadata.yaml [\#15762](https://github.com/netdata/netdata/pull/15762) ([ktsaou](https://github.com/ktsaou)) -- Update metadata.yaml [\#15761](https://github.com/netdata/netdata/pull/15761) ([ktsaou](https://github.com/ktsaou)) -- Regenerate integrations.js [\#15760](https://github.com/netdata/netdata/pull/15760) ([netdatabot](https://github.com/netdatabot)) -- fix nvidia\_smi power\_readings for new drivers [\#15759](https://github.com/netdata/netdata/pull/15759) ([ilyam8](https://github.com/ilyam8)) -- update bundled UI to v2.26.2 [\#15758](https://github.com/netdata/netdata/pull/15758) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#15751](https://github.com/netdata/netdata/pull/15751) ([netdatabot](https://github.com/netdatabot)) -- ci labeler: remove integrations from area/docs [\#15750](https://github.com/netdata/netdata/pull/15750) ([ilyam8](https://github.com/ilyam8)) -- meta: align left metrics, alerts, and config options [\#15749](https://github.com/netdata/netdata/pull/15749) ([ilyam8](https://github.com/ilyam8)) -- Add dependencies for systemd journal plugin. [\#15747](https://github.com/netdata/netdata/pull/15747) ([Ferroin](https://github.com/Ferroin)) -- prefer cap over setuid for sysetmd-journal in installer [\#15741](https://github.com/netdata/netdata/pull/15741) ([ilyam8](https://github.com/ilyam8)) -- \[cloud-blocker\] https\_client add TLS ext. SNI + support chunked transfer encoding [\#15739](https://github.com/netdata/netdata/pull/15739) ([underhood](https://github.com/underhood)) -- Don't overwrite my vscode settings! [\#15738](https://github.com/netdata/netdata/pull/15738) ([underhood](https://github.com/underhood)) -- faster facets and journal fixes [\#15737](https://github.com/netdata/netdata/pull/15737) ([ktsaou](https://github.com/ktsaou)) -- Adjust namespace used for sd\_journal\_open [\#15736](https://github.com/netdata/netdata/pull/15736) ([stelfrag](https://github.com/stelfrag)) -- Update to latest copy of v2 dashboard. [\#15735](https://github.com/netdata/netdata/pull/15735) ([Ferroin](https://github.com/Ferroin)) -- Add netdata-plugin-systemd-journal package. [\#15733](https://github.com/netdata/netdata/pull/15733) ([Ferroin](https://github.com/Ferroin)) -- proc.plugin: dont log if pressure/irq does not exist [\#15732](https://github.com/netdata/netdata/pull/15732) ([ilyam8](https://github.com/ilyam8)) -- ci: run "Generate Integrations" only in netdata/netdata [\#15731](https://github.com/netdata/netdata/pull/15731) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#15728](https://github.com/netdata/netdata/pull/15728) ([netdatabot](https://github.com/netdatabot)) -- fix systemd-journal makefile [\#15727](https://github.com/netdata/netdata/pull/15727) ([ktsaou](https://github.com/ktsaou)) -- disable systemdunits alarms [\#15726](https://github.com/netdata/netdata/pull/15726) ([ilyam8](https://github.com/ilyam8)) -- Fix memory corruption [\#15724](https://github.com/netdata/netdata/pull/15724) ([stelfrag](https://github.com/stelfrag)) -- Revert "Refactor RRD code. \(\#15423\)" [\#15723](https://github.com/netdata/netdata/pull/15723) ([vkalintiris](https://github.com/vkalintiris)) -- Changes to the templates for integrations [\#15721](https://github.com/netdata/netdata/pull/15721) ([Ancairon](https://github.com/Ancairon)) -- fix the freez pointer of dyncfg [\#15719](https://github.com/netdata/netdata/pull/15719) ([ktsaou](https://github.com/ktsaou)) -- Update the bundled v2 dashboard to the latest release. [\#15718](https://github.com/netdata/netdata/pull/15718) ([Ferroin](https://github.com/Ferroin)) -- Regenerate integrations.js [\#15717](https://github.com/netdata/netdata/pull/15717) ([netdatabot](https://github.com/netdatabot)) -- fix meta deploy docker swarm NC env var [\#15716](https://github.com/netdata/netdata/pull/15716) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#15713](https://github.com/netdata/netdata/pull/15713) ([netdatabot](https://github.com/netdatabot)) -- Update metadata.yaml [\#15710](https://github.com/netdata/netdata/pull/15710) ([sashwathn](https://github.com/sashwathn)) -- Regenerate integrations.js [\#15709](https://github.com/netdata/netdata/pull/15709) ([netdatabot](https://github.com/netdatabot)) -- integrations: fix docker compose indent [\#15708](https://github.com/netdata/netdata/pull/15708) ([ilyam8](https://github.com/ilyam8)) -- Better cleanup of aclk alert table entries [\#15706](https://github.com/netdata/netdata/pull/15706) ([MrZammler](https://github.com/MrZammler)) -- Regenerate integrations.js [\#15705](https://github.com/netdata/netdata/pull/15705) ([netdatabot](https://github.com/netdatabot)) -- Fix typo in categories for beanstalk collector metadata. [\#15703](https://github.com/netdata/netdata/pull/15703) ([Ferroin](https://github.com/Ferroin)) -- Assorted fixes for integrations templates. [\#15702](https://github.com/netdata/netdata/pull/15702) ([Ferroin](https://github.com/Ferroin)) -- integrations: fix metrics availability [\#15701](https://github.com/netdata/netdata/pull/15701) ([ilyam8](https://github.com/ilyam8)) -- Fix handling of troubleshooting section in integrations. [\#15700](https://github.com/netdata/netdata/pull/15700) ([Ferroin](https://github.com/Ferroin)) -- update vscode yaml schemas association [\#15697](https://github.com/netdata/netdata/pull/15697) ([ilyam8](https://github.com/ilyam8)) -- Update categories.yaml [\#15696](https://github.com/netdata/netdata/pull/15696) ([sashwathn](https://github.com/sashwathn)) -- Regenerate integrations.js [\#15695](https://github.com/netdata/netdata/pull/15695) ([netdatabot](https://github.com/netdatabot)) -- Extend eBPF default shutdown [\#15694](https://github.com/netdata/netdata/pull/15694) ([thiagoftsm](https://github.com/thiagoftsm)) -- Fix integrations regen workflow [\#15693](https://github.com/netdata/netdata/pull/15693) ([Ferroin](https://github.com/Ferroin)) -- bump go.d.plugin v0.54.1 [\#15692](https://github.com/netdata/netdata/pull/15692) ([ilyam8](https://github.com/ilyam8)) -- Update names [\#15691](https://github.com/netdata/netdata/pull/15691) ([thiagoftsm](https://github.com/thiagoftsm)) -- Update metadata.yaml [\#15690](https://github.com/netdata/netdata/pull/15690) ([sashwathn](https://github.com/sashwathn)) -- Update categories.yaml [\#15689](https://github.com/netdata/netdata/pull/15689) ([sashwathn](https://github.com/sashwathn)) -- Update metadata.yaml [\#15688](https://github.com/netdata/netdata/pull/15688) ([sashwathn](https://github.com/sashwathn)) -- Update deploy.yaml [\#15687](https://github.com/netdata/netdata/pull/15687) ([sashwathn](https://github.com/sashwathn)) -- Update categories.yaml [\#15686](https://github.com/netdata/netdata/pull/15686) ([sashwathn](https://github.com/sashwathn)) -- Update categories.yaml [\#15685](https://github.com/netdata/netdata/pull/15685) ([sashwathn](https://github.com/sashwathn)) -- Update metadata.yaml [\#15684](https://github.com/netdata/netdata/pull/15684) ([sashwathn](https://github.com/sashwathn)) -- Update categories.yaml [\#15683](https://github.com/netdata/netdata/pull/15683) ([sashwathn](https://github.com/sashwathn)) -- Update categories.yaml [\#15682](https://github.com/netdata/netdata/pull/15682) ([sashwathn](https://github.com/sashwathn)) -- Update categories.yaml [\#15681](https://github.com/netdata/netdata/pull/15681) ([sashwathn](https://github.com/sashwathn)) -- Update metadata.yaml [\#15680](https://github.com/netdata/netdata/pull/15680) ([sashwathn](https://github.com/sashwathn)) -- Update metadata.yaml [\#15679](https://github.com/netdata/netdata/pull/15679) ([shyamvalsan](https://github.com/shyamvalsan)) -- Update metadata.yaml [\#15678](https://github.com/netdata/netdata/pull/15678) ([sashwathn](https://github.com/sashwathn)) -- Update Webhook icon [\#15677](https://github.com/netdata/netdata/pull/15677) ([sashwathn](https://github.com/sashwathn)) -- Update deploy.yaml to fix Docker and Kubernetes commands [\#15676](https://github.com/netdata/netdata/pull/15676) ([sashwathn](https://github.com/sashwathn)) -- meta MacOS =\> macOS [\#15675](https://github.com/netdata/netdata/pull/15675) ([ilyam8](https://github.com/ilyam8)) -- Adapt Cloud notifications to the new schema [\#15674](https://github.com/netdata/netdata/pull/15674) ([sashwathn](https://github.com/sashwathn)) -- Fix formatting [\#15673](https://github.com/netdata/netdata/pull/15673) ([shyamvalsan](https://github.com/shyamvalsan)) -- Fixing tables \(aws sns\) [\#15671](https://github.com/netdata/netdata/pull/15671) ([shyamvalsan](https://github.com/shyamvalsan)) -- Update metadata.yaml for Cloud Notifications [\#15670](https://github.com/netdata/netdata/pull/15670) ([sashwathn](https://github.com/sashwathn)) -- remove " Metrics" from linux categories [\#15669](https://github.com/netdata/netdata/pull/15669) ([ilyam8](https://github.com/ilyam8)) -- Fix table formatting \(custom exporter\) [\#15668](https://github.com/netdata/netdata/pull/15668) ([shyamvalsan](https://github.com/shyamvalsan)) -- Fix icon prometheus exporter icon [\#15666](https://github.com/netdata/netdata/pull/15666) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- freeipmi change restart message to info [\#15664](https://github.com/netdata/netdata/pull/15664) ([ilyam8](https://github.com/ilyam8)) -- fix proc.plugin meta filename [\#15659](https://github.com/netdata/netdata/pull/15659) ([ilyam8](https://github.com/ilyam8)) -- small improvements to README.md [\#15658](https://github.com/netdata/netdata/pull/15658) ([ilyam8](https://github.com/ilyam8)) -- Fix icon for solarwinds [\#15657](https://github.com/netdata/netdata/pull/15657) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Fix Apps plugin icons [\#15655](https://github.com/netdata/netdata/pull/15655) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- fix pandas category [\#15654](https://github.com/netdata/netdata/pull/15654) ([andrewm4894](https://github.com/andrewm4894)) -- Fix exporter icons [\#15652](https://github.com/netdata/netdata/pull/15652) ([shyamvalsan](https://github.com/shyamvalsan)) -- disable freeipmi in docker by default [\#15651](https://github.com/netdata/netdata/pull/15651) ([ilyam8](https://github.com/ilyam8)) -- Fixing FreeBSD icons [\#15650](https://github.com/netdata/netdata/pull/15650) ([shyamvalsan](https://github.com/shyamvalsan)) -- Fix exporter schema to support multiple entries per file. [\#15649](https://github.com/netdata/netdata/pull/15649) ([Ferroin](https://github.com/Ferroin)) -- Fixing icons in netdata/netdata repo [\#15647](https://github.com/netdata/netdata/pull/15647) ([shyamvalsan](https://github.com/shyamvalsan)) -- Fix name in the yaml of example python collector [\#15646](https://github.com/netdata/netdata/pull/15646) ([Ancairon](https://github.com/Ancairon)) -- Fix icons [\#15645](https://github.com/netdata/netdata/pull/15645) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Fix icons for notifications [\#15644](https://github.com/netdata/netdata/pull/15644) ([shyamvalsan](https://github.com/shyamvalsan)) -- convert collectors meta files from single to multi [\#15642](https://github.com/netdata/netdata/pull/15642) ([ilyam8](https://github.com/ilyam8)) -- fix edit-config for containerized Netdata when running from host [\#15641](https://github.com/netdata/netdata/pull/15641) ([ilyam8](https://github.com/ilyam8)) -- fix: 🐛 docker bind-mount stock files creation [\#15639](https://github.com/netdata/netdata/pull/15639) ([Leny1996](https://github.com/Leny1996)) -- The icon\_filename value was not in quotes - Fixed [\#15635](https://github.com/netdata/netdata/pull/15635) ([sashwathn](https://github.com/sashwathn)) -- Update graphite metadata.yaml [\#15634](https://github.com/netdata/netdata/pull/15634) ([shyamvalsan](https://github.com/shyamvalsan)) -- Debugfs yaml update [\#15633](https://github.com/netdata/netdata/pull/15633) ([thiagoftsm](https://github.com/thiagoftsm)) -- Update metadata.yaml [\#15632](https://github.com/netdata/netdata/pull/15632) ([shyamvalsan](https://github.com/shyamvalsan)) -- review images for integrations from security to windows systems [\#15630](https://github.com/netdata/netdata/pull/15630) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- bump ui to v6.23.0 [\#15629](https://github.com/netdata/netdata/pull/15629) ([ilyam8](https://github.com/ilyam8)) -- Updated Cloud Notification Integrations with the new schema [\#15628](https://github.com/netdata/netdata/pull/15628) ([sashwathn](https://github.com/sashwathn)) -- Add additional variable section to instance data in schema. [\#15627](https://github.com/netdata/netdata/pull/15627) ([Ferroin](https://github.com/Ferroin)) -- fix icons for message brokers and hardware [\#15626](https://github.com/netdata/netdata/pull/15626) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Add key for notifications to control what global config options get displayed [\#15625](https://github.com/netdata/netdata/pull/15625) ([Ferroin](https://github.com/Ferroin)) -- fix icons for webservers integrations [\#15624](https://github.com/netdata/netdata/pull/15624) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Add notification metadata for agent notifications [\#15622](https://github.com/netdata/netdata/pull/15622) ([shyamvalsan](https://github.com/shyamvalsan)) -- fix icons for db integrations [\#15621](https://github.com/netdata/netdata/pull/15621) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Rename multi\_metadata.yaml to metadata.yaml [\#15619](https://github.com/netdata/netdata/pull/15619) ([shyamvalsan](https://github.com/shyamvalsan)) -- Rename multi\_metadata.yaml to metadata.yaml [\#15618](https://github.com/netdata/netdata/pull/15618) ([shyamvalsan](https://github.com/shyamvalsan)) -- Fix up notification schema to better support cloud notifications. [\#15616](https://github.com/netdata/netdata/pull/15616) ([Ferroin](https://github.com/Ferroin)) -- Updated all cloud notifications except generic webhook [\#15615](https://github.com/netdata/netdata/pull/15615) ([sashwathn](https://github.com/sashwathn)) -- prefer titles, families, units and priorities from collected charts [\#15614](https://github.com/netdata/netdata/pull/15614) ([ktsaou](https://github.com/ktsaou)) -- Update categories.yaml to add notifications [\#15613](https://github.com/netdata/netdata/pull/15613) ([sashwathn](https://github.com/sashwathn)) -- ci disable yamllint line-length check [\#15612](https://github.com/netdata/netdata/pull/15612) ([ilyam8](https://github.com/ilyam8)) -- Fix descriptions in config objects, make them single line [\#15610](https://github.com/netdata/netdata/pull/15610) ([Ancairon](https://github.com/Ancairon)) -- Update icons [\#15609](https://github.com/netdata/netdata/pull/15609) ([shyamvalsan](https://github.com/shyamvalsan)) -- Update icon [\#15608](https://github.com/netdata/netdata/pull/15608) ([shyamvalsan](https://github.com/shyamvalsan)) -- Update icon [\#15607](https://github.com/netdata/netdata/pull/15607) ([shyamvalsan](https://github.com/shyamvalsan)) -- Update documentation [\#15606](https://github.com/netdata/netdata/pull/15606) ([kiela](https://github.com/kiela)) -- fix potential crash bug. [\#15605](https://github.com/netdata/netdata/pull/15605) ([icy17](https://github.com/icy17)) -- FreeBSD yaml update [\#15603](https://github.com/netdata/netdata/pull/15603) ([thiagoftsm](https://github.com/thiagoftsm)) -- Macos yaml update [\#15602](https://github.com/netdata/netdata/pull/15602) ([thiagoftsm](https://github.com/thiagoftsm)) -- minor changes in README.md [\#15601](https://github.com/netdata/netdata/pull/15601) ([tkatsoulas](https://github.com/tkatsoulas)) -- reviewed icos for a bunch of integrations [\#15599](https://github.com/netdata/netdata/pull/15599) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Sample Cloud Notifications metadata for Discord [\#15597](https://github.com/netdata/netdata/pull/15597) ([sashwathn](https://github.com/sashwathn)) -- Updated icons in deploy section [\#15596](https://github.com/netdata/netdata/pull/15596) ([shyamvalsan](https://github.com/shyamvalsan)) -- 10 points per query min [\#15595](https://github.com/netdata/netdata/pull/15595) ([ktsaou](https://github.com/ktsaou)) -- CUPS yaml update [\#15594](https://github.com/netdata/netdata/pull/15594) ([thiagoftsm](https://github.com/thiagoftsm)) -- remove metrics.csv files [\#15593](https://github.com/netdata/netdata/pull/15593) ([ilyam8](https://github.com/ilyam8)) -- fix tomcat meta [\#15592](https://github.com/netdata/netdata/pull/15592) ([ilyam8](https://github.com/ilyam8)) -- Added a sample metadata.yaml for Alerta [\#15591](https://github.com/netdata/netdata/pull/15591) ([sashwathn](https://github.com/sashwathn)) -- remove the noise by silencing alerts that dont need to wake up people [\#15590](https://github.com/netdata/netdata/pull/15590) ([ktsaou](https://github.com/ktsaou)) -- Fix health query [\#15589](https://github.com/netdata/netdata/pull/15589) ([stelfrag](https://github.com/stelfrag)) -- Fix typo in notification schema. [\#15588](https://github.com/netdata/netdata/pull/15588) ([Ferroin](https://github.com/Ferroin)) -- Update icons for relevant integrations in proc.plugin [\#15587](https://github.com/netdata/netdata/pull/15587) ([sashwathn](https://github.com/sashwathn)) -- Update icon for power supply [\#15586](https://github.com/netdata/netdata/pull/15586) ([sashwathn](https://github.com/sashwathn)) -- Update Slabinfo Logo [\#15585](https://github.com/netdata/netdata/pull/15585) ([sashwathn](https://github.com/sashwathn)) -- fix cpu MHz from /proc/cpuinfo [\#15584](https://github.com/netdata/netdata/pull/15584) ([ilyam8](https://github.com/ilyam8)) -- small readme icon fix [\#15583](https://github.com/netdata/netdata/pull/15583) ([andrewm4894](https://github.com/andrewm4894)) -- update pandas collector metadata [\#15582](https://github.com/netdata/netdata/pull/15582) ([andrewm4894](https://github.com/andrewm4894)) -- Update zscores metadata yaml [\#15581](https://github.com/netdata/netdata/pull/15581) ([andrewm4894](https://github.com/andrewm4894)) -- Create metadata.yaml for MongoDB exporter [\#15580](https://github.com/netdata/netdata/pull/15580) ([shyamvalsan](https://github.com/shyamvalsan)) -- Create metadata.yaml for JSON exporter [\#15579](https://github.com/netdata/netdata/pull/15579) ([shyamvalsan](https://github.com/shyamvalsan)) -- Create metadata.yaml for Google PubSub exporter [\#15578](https://github.com/netdata/netdata/pull/15578) ([shyamvalsan](https://github.com/shyamvalsan)) -- Create metadata.yaml for AWS kinesis exporter [\#15577](https://github.com/netdata/netdata/pull/15577) ([shyamvalsan](https://github.com/shyamvalsan)) -- Create multi\_metadata.yaml for graphite exporters [\#15576](https://github.com/netdata/netdata/pull/15576) ([shyamvalsan](https://github.com/shyamvalsan)) -- Create multi\_metadata.yaml [\#15575](https://github.com/netdata/netdata/pull/15575) ([shyamvalsan](https://github.com/shyamvalsan)) -- Add missing file in CMakeLists.txt [\#15574](https://github.com/netdata/netdata/pull/15574) ([stelfrag](https://github.com/stelfrag)) -- comment out anomalies metadata and add note [\#15573](https://github.com/netdata/netdata/pull/15573) ([andrewm4894](https://github.com/andrewm4894)) -- Fixed deployment commands for Docker, Kubernetes and Linux [\#15572](https://github.com/netdata/netdata/pull/15572) ([sashwathn](https://github.com/sashwathn)) -- filter out systemd-udevd.service/udevd [\#15571](https://github.com/netdata/netdata/pull/15571) ([ilyam8](https://github.com/ilyam8)) -- Added FreeBSD integration and fixed Windows installation Steps [\#15570](https://github.com/netdata/netdata/pull/15570) ([sashwathn](https://github.com/sashwathn)) -- fix schema validation for some meta files [\#15569](https://github.com/netdata/netdata/pull/15569) ([ilyam8](https://github.com/ilyam8)) -- Drop duplicate / unused index [\#15568](https://github.com/netdata/netdata/pull/15568) ([stelfrag](https://github.com/stelfrag)) -- Xen yaml update [\#15567](https://github.com/netdata/netdata/pull/15567) ([thiagoftsm](https://github.com/thiagoftsm)) -- Timex yaml update [\#15565](https://github.com/netdata/netdata/pull/15565) ([thiagoftsm](https://github.com/thiagoftsm)) -- Create metadata.yaml for OpenTSDB Exporter [\#15563](https://github.com/netdata/netdata/pull/15563) ([shyamvalsan](https://github.com/shyamvalsan)) -- TC yaml update [\#15562](https://github.com/netdata/netdata/pull/15562) ([thiagoftsm](https://github.com/thiagoftsm)) -- Added Exporter and Notifications categories and removed them from Data Collection [\#15561](https://github.com/netdata/netdata/pull/15561) ([sashwathn](https://github.com/sashwathn)) -- Update slabinfo yaml [\#15560](https://github.com/netdata/netdata/pull/15560) ([thiagoftsm](https://github.com/thiagoftsm)) -- Update metadata.yaml for charts.d collectors [\#15559](https://github.com/netdata/netdata/pull/15559) ([MrZammler](https://github.com/MrZammler)) -- Perf yaml [\#15558](https://github.com/netdata/netdata/pull/15558) ([thiagoftsm](https://github.com/thiagoftsm)) -- detect the path the netdata-claim.sh script is in [\#15556](https://github.com/netdata/netdata/pull/15556) ([ktsaou](https://github.com/ktsaou)) -- Fixed typos in code blocks and added missing icons [\#15555](https://github.com/netdata/netdata/pull/15555) ([sashwathn](https://github.com/sashwathn)) -- Remove temporarily from the CI Tumbleweed support [\#15554](https://github.com/netdata/netdata/pull/15554) ([tkatsoulas](https://github.com/tkatsoulas)) -- fix ebpf.plugin system swapcalls [\#15553](https://github.com/netdata/netdata/pull/15553) ([ilyam8](https://github.com/ilyam8)) -- Fixes for `deploy.yaml`. [\#15551](https://github.com/netdata/netdata/pull/15551) ([Ferroin](https://github.com/Ferroin)) -- bump ui to v6.22.1 [\#15550](https://github.com/netdata/netdata/pull/15550) ([ilyam8](https://github.com/ilyam8)) -- Add schema and examples for notification method metadata. [\#15549](https://github.com/netdata/netdata/pull/15549) ([Ferroin](https://github.com/Ferroin)) -- Update python sensors metadata yaml [\#15548](https://github.com/netdata/netdata/pull/15548) ([andrewm4894](https://github.com/andrewm4894)) -- fix yamls [\#15547](https://github.com/netdata/netdata/pull/15547) ([Ancairon](https://github.com/Ancairon)) -- fix expiration dates for API responses [\#15546](https://github.com/netdata/netdata/pull/15546) ([ktsaou](https://github.com/ktsaou)) -- Add exporter integration schema. [\#15545](https://github.com/netdata/netdata/pull/15545) ([Ferroin](https://github.com/Ferroin)) -- postfix metadata.yaml - add links and some descriptions [\#15544](https://github.com/netdata/netdata/pull/15544) ([andrewm4894](https://github.com/andrewm4894)) -- Update metadata for multiple python collectors. [\#15543](https://github.com/netdata/netdata/pull/15543) ([tkatsoulas](https://github.com/tkatsoulas)) -- bump ui to v6.22.0 [\#15542](https://github.com/netdata/netdata/pull/15542) ([ilyam8](https://github.com/ilyam8)) -- Fill in yaml files for some python collectors [\#15541](https://github.com/netdata/netdata/pull/15541) ([Ancairon](https://github.com/Ancairon)) -- Fix deployment and categories [\#15540](https://github.com/netdata/netdata/pull/15540) ([sashwathn](https://github.com/sashwathn)) -- docs: fix apps fd badges and typos [\#15539](https://github.com/netdata/netdata/pull/15539) ([ilyam8](https://github.com/ilyam8)) -- change api.netdata.cloud to app.netdata.cloud [\#15538](https://github.com/netdata/netdata/pull/15538) ([ilyam8](https://github.com/ilyam8)) -- Update metadata.yaml for some python collectors - 2 [\#15537](https://github.com/netdata/netdata/pull/15537) ([MrZammler](https://github.com/MrZammler)) -- Change nvidia\_smi link to go version in COLLECTORS.md [\#15536](https://github.com/netdata/netdata/pull/15536) ([Ancairon](https://github.com/Ancairon)) -- Update nfacct yaml [\#15535](https://github.com/netdata/netdata/pull/15535) ([thiagoftsm](https://github.com/thiagoftsm)) -- Update ioping yaml [\#15534](https://github.com/netdata/netdata/pull/15534) ([thiagoftsm](https://github.com/thiagoftsm)) -- Freeimpi yaml [\#15533](https://github.com/netdata/netdata/pull/15533) ([thiagoftsm](https://github.com/thiagoftsm)) -- Updated all Linux distros, macOS and Docker [\#15532](https://github.com/netdata/netdata/pull/15532) ([sashwathn](https://github.com/sashwathn)) -- Update platform support info and add a schema. [\#15531](https://github.com/netdata/netdata/pull/15531) ([Ferroin](https://github.com/Ferroin)) -- added cloud status in registry?action=hello [\#15530](https://github.com/netdata/netdata/pull/15530) ([ktsaou](https://github.com/ktsaou)) -- update memcached metadata.yaml [\#15529](https://github.com/netdata/netdata/pull/15529) ([andrewm4894](https://github.com/andrewm4894)) -- Update python d varnish metadata [\#15528](https://github.com/netdata/netdata/pull/15528) ([andrewm4894](https://github.com/andrewm4894)) -- Update yaml description \(diskspace\) [\#15527](https://github.com/netdata/netdata/pull/15527) ([thiagoftsm](https://github.com/thiagoftsm)) -- wait for node\_id while claiming [\#15526](https://github.com/netdata/netdata/pull/15526) ([ktsaou](https://github.com/ktsaou)) -- add `diskquota` collector to third party collectors list [\#15524](https://github.com/netdata/netdata/pull/15524) ([andrewm4894](https://github.com/andrewm4894)) -- Add quick\_start key to deploy schema. [\#15522](https://github.com/netdata/netdata/pull/15522) ([Ferroin](https://github.com/Ferroin)) -- Add a schema for the categories.yaml file. [\#15521](https://github.com/netdata/netdata/pull/15521) ([Ferroin](https://github.com/Ferroin)) -- fix collector multi schema [\#15520](https://github.com/netdata/netdata/pull/15520) ([ilyam8](https://github.com/ilyam8)) -- Allow to create alert hashes with --disable-cloud [\#15519](https://github.com/netdata/netdata/pull/15519) ([MrZammler](https://github.com/MrZammler)) -- Python collector yaml updates [\#15517](https://github.com/netdata/netdata/pull/15517) ([Ancairon](https://github.com/Ancairon)) -- eBPF Yaml complement [\#15516](https://github.com/netdata/netdata/pull/15516) ([thiagoftsm](https://github.com/thiagoftsm)) -- Add AMD GPU collector [\#15515](https://github.com/netdata/netdata/pull/15515) ([Dim-P](https://github.com/Dim-P)) -- Update metadata.yaml for some python collectors [\#15513](https://github.com/netdata/netdata/pull/15513) ([MrZammler](https://github.com/MrZammler)) -- Update metadata.yaml for some python collectors [\#15510](https://github.com/netdata/netdata/pull/15510) ([andrewm4894](https://github.com/andrewm4894)) -- Add schema for deployment integrations and centralize integrations schemas. [\#15509](https://github.com/netdata/netdata/pull/15509) ([Ferroin](https://github.com/Ferroin)) -- update gitignore to include vscode settings for schema validation [\#15508](https://github.com/netdata/netdata/pull/15508) ([andrewm4894](https://github.com/andrewm4894)) -- Add Samba collector yaml [\#15507](https://github.com/netdata/netdata/pull/15507) ([Ancairon](https://github.com/Ancairon)) -- Fill in metadata for idlejitter plugin. [\#15506](https://github.com/netdata/netdata/pull/15506) ([Ferroin](https://github.com/Ferroin)) -- apps.plugin limits tracing [\#15504](https://github.com/netdata/netdata/pull/15504) ([ktsaou](https://github.com/ktsaou)) -- Allow manage/health api call to be used without bearer [\#15503](https://github.com/netdata/netdata/pull/15503) ([MrZammler](https://github.com/MrZammler)) -- Avoid an extra uuid\_copy when creating new MRG entries [\#15502](https://github.com/netdata/netdata/pull/15502) ([stelfrag](https://github.com/stelfrag)) -- freeipmi flush keepalive msgs [\#15499](https://github.com/netdata/netdata/pull/15499) ([ilyam8](https://github.com/ilyam8)) -- add required properties to multi-module schema [\#15496](https://github.com/netdata/netdata/pull/15496) ([ilyam8](https://github.com/ilyam8)) -- proc integrations [\#15494](https://github.com/netdata/netdata/pull/15494) ([ktsaou](https://github.com/ktsaou)) -- docs: clarify health percentage option [\#15492](https://github.com/netdata/netdata/pull/15492) ([ilyam8](https://github.com/ilyam8)) -- Fix resource leak - CID 396310 [\#15491](https://github.com/netdata/netdata/pull/15491) ([stelfrag](https://github.com/stelfrag)) -- Improve the update of the alert chart name in the database [\#15490](https://github.com/netdata/netdata/pull/15490) ([stelfrag](https://github.com/stelfrag)) -- PCI Advanced Error Reporting \(AER\) [\#15488](https://github.com/netdata/netdata/pull/15488) ([ktsaou](https://github.com/ktsaou)) -- Dynamic Config MVP0 [\#15486](https://github.com/netdata/netdata/pull/15486) ([underhood](https://github.com/underhood)) -- Add a machine distinct id to analytics [\#15485](https://github.com/netdata/netdata/pull/15485) ([MrZammler](https://github.com/MrZammler)) -- Add basic slabinfo metadata. [\#15484](https://github.com/netdata/netdata/pull/15484) ([Ferroin](https://github.com/Ferroin)) -- Update charts.d.plugin yaml [\#15483](https://github.com/netdata/netdata/pull/15483) ([Ancairon](https://github.com/Ancairon)) -- Make title reflect legacy agent dashboard [\#15479](https://github.com/netdata/netdata/pull/15479) ([Ancairon](https://github.com/Ancairon)) -- docs: note that health foreach works only with template [\#15478](https://github.com/netdata/netdata/pull/15478) ([ilyam8](https://github.com/ilyam8)) -- Yaml file updates [\#15477](https://github.com/netdata/netdata/pull/15477) ([Ancairon](https://github.com/Ancairon)) -- Rename most-popular to most\_popular in categories.yaml [\#15476](https://github.com/netdata/netdata/pull/15476) ([Ancairon](https://github.com/Ancairon)) -- Fix coverity issue [\#15475](https://github.com/netdata/netdata/pull/15475) ([stelfrag](https://github.com/stelfrag)) -- eBPF Yaml [\#15474](https://github.com/netdata/netdata/pull/15474) ([thiagoftsm](https://github.com/thiagoftsm)) -- Memory Controller \(MC\) and DIMM Error Detection And Correction \(EDAC\) [\#15473](https://github.com/netdata/netdata/pull/15473) ([ktsaou](https://github.com/ktsaou)) -- meta schema change multi-instance to multi\_instance [\#15470](https://github.com/netdata/netdata/pull/15470) ([ilyam8](https://github.com/ilyam8)) -- fix anchors [\#15469](https://github.com/netdata/netdata/pull/15469) ([Ancairon](https://github.com/Ancairon)) -- fix the calculation of incremental-sum [\#15468](https://github.com/netdata/netdata/pull/15468) ([ktsaou](https://github.com/ktsaou)) -- apps.plugin fds limits improvements [\#15467](https://github.com/netdata/netdata/pull/15467) ([ktsaou](https://github.com/ktsaou)) -- Add community key in schema [\#15465](https://github.com/netdata/netdata/pull/15465) ([Ancairon](https://github.com/Ancairon)) -- Overhaul deployment strategies documentation [\#15464](https://github.com/netdata/netdata/pull/15464) ([ralphm](https://github.com/ralphm)) -- Update debugfs plugin metadata. [\#15463](https://github.com/netdata/netdata/pull/15463) ([Ferroin](https://github.com/Ferroin)) -- Update proc plugin yaml [\#15460](https://github.com/netdata/netdata/pull/15460) ([Ancairon](https://github.com/Ancairon)) -- Macos yaml updates [\#15459](https://github.com/netdata/netdata/pull/15459) ([Ancairon](https://github.com/Ancairon)) -- Freeipmi yaml updates [\#15458](https://github.com/netdata/netdata/pull/15458) ([Ancairon](https://github.com/Ancairon)) -- Add short descriptions to cgroups yaml [\#15457](https://github.com/netdata/netdata/pull/15457) ([Ancairon](https://github.com/Ancairon)) -- readme: reorder cols in whats new and add links [\#15455](https://github.com/netdata/netdata/pull/15455) ([andrewm4894](https://github.com/andrewm4894)) -- Store and transmit chart\_name to cloud in alert events [\#15441](https://github.com/netdata/netdata/pull/15441) ([MrZammler](https://github.com/MrZammler)) -- Refactor RRD code. [\#15423](https://github.com/netdata/netdata/pull/15423) ([vkalintiris](https://github.com/vkalintiris)) - ## [v1.41.0](https://github.com/netdata/netdata/tree/v1.41.0) (2023-07-19) [Full Changelog](https://github.com/netdata/netdata/compare/v1.40.1...v1.41.0) -**Merged pull requests:** - -- Include license for web v2 [\#15453](https://github.com/netdata/netdata/pull/15453) ([tkatsoulas](https://github.com/tkatsoulas)) -- Updates to metadata.yaml [\#15452](https://github.com/netdata/netdata/pull/15452) ([shyamvalsan](https://github.com/shyamvalsan)) -- Add apps yaml [\#15451](https://github.com/netdata/netdata/pull/15451) ([Ancairon](https://github.com/Ancairon)) -- Add cgroups yaml [\#15450](https://github.com/netdata/netdata/pull/15450) ([Ancairon](https://github.com/Ancairon)) -- Fix multiline [\#15449](https://github.com/netdata/netdata/pull/15449) ([Ancairon](https://github.com/Ancairon)) -- bump v2 dashboard to v6.21.3 [\#15448](https://github.com/netdata/netdata/pull/15448) ([ilyam8](https://github.com/ilyam8)) -- fix alerts transitions search when something specific is asked for [\#15447](https://github.com/netdata/netdata/pull/15447) ([ktsaou](https://github.com/ktsaou)) -- collector meta: remove meta.alternative\_monitored\_instances [\#15445](https://github.com/netdata/netdata/pull/15445) ([ilyam8](https://github.com/ilyam8)) -- added missing fields to alerts instances [\#15442](https://github.com/netdata/netdata/pull/15442) ([ktsaou](https://github.com/ktsaou)) -- removed dup categories [\#15440](https://github.com/netdata/netdata/pull/15440) ([hugovalente-pm](https://github.com/hugovalente-pm)) -- Create netdata-assistant docs [\#15438](https://github.com/netdata/netdata/pull/15438) ([shyamvalsan](https://github.com/shyamvalsan)) -- apps.plugin fds limits improvements [\#15437](https://github.com/netdata/netdata/pull/15437) ([ktsaou](https://github.com/ktsaou)) -- disable apps\_group\_file\_descriptors\_utilization alarm [\#15435](https://github.com/netdata/netdata/pull/15435) ([ilyam8](https://github.com/ilyam8)) -- Add catch-all category entry in categories.yaml [\#15434](https://github.com/netdata/netdata/pull/15434) ([Ancairon](https://github.com/Ancairon)) -- Update CODEOWNERS [\#15433](https://github.com/netdata/netdata/pull/15433) ([andrewm4894](https://github.com/andrewm4894)) -- Remove duplicate category from categories.yaml [\#15432](https://github.com/netdata/netdata/pull/15432) ([Ancairon](https://github.com/Ancairon)) -- readme: add link for netdata cloud and sign-in cta [\#15431](https://github.com/netdata/netdata/pull/15431) ([andrewm4894](https://github.com/andrewm4894)) -- add chart id and name to alert instances and transitions [\#15430](https://github.com/netdata/netdata/pull/15430) ([ktsaou](https://github.com/ktsaou)) -- update v2 dashboard [\#15427](https://github.com/netdata/netdata/pull/15427) ([ilyam8](https://github.com/ilyam8)) -- fix unlocked registry access and add hostname to search response [\#15426](https://github.com/netdata/netdata/pull/15426) ([ktsaou](https://github.com/ktsaou)) -- Update README.md [\#15424](https://github.com/netdata/netdata/pull/15424) ([christophidesp](https://github.com/christophidesp)) -- Decode url before checking for question mark [\#15422](https://github.com/netdata/netdata/pull/15422) ([MrZammler](https://github.com/MrZammler)) -- use real-time clock for http response headers [\#15421](https://github.com/netdata/netdata/pull/15421) ([ktsaou](https://github.com/ktsaou)) -- Bugfix on alerts generation for yamls [\#15420](https://github.com/netdata/netdata/pull/15420) ([Ancairon](https://github.com/Ancairon)) -- Minor typo fix on consul.conf [\#15419](https://github.com/netdata/netdata/pull/15419) ([Ancairon](https://github.com/Ancairon)) -- monitor applications file descriptor limits [\#15417](https://github.com/netdata/netdata/pull/15417) ([ktsaou](https://github.com/ktsaou)) -- Update README.md [\#15416](https://github.com/netdata/netdata/pull/15416) ([ktsaou](https://github.com/ktsaou)) -- Update README.md [\#15414](https://github.com/netdata/netdata/pull/15414) ([ktsaou](https://github.com/ktsaou)) -- collector meta: restrict chart\_type to known values [\#15413](https://github.com/netdata/netdata/pull/15413) ([ilyam8](https://github.com/ilyam8)) -- Update README.md [\#15412](https://github.com/netdata/netdata/pull/15412) ([tkatsoulas](https://github.com/tkatsoulas)) -- add reference to cncf [\#15408](https://github.com/netdata/netdata/pull/15408) ([hugovalente-pm](https://github.com/hugovalente-pm)) - ## [v1.40.1](https://github.com/netdata/netdata/tree/v1.40.1) (2023-06-27) [Full Changelog](https://github.com/netdata/netdata/compare/v1.40.0...v1.40.1) diff --git a/CMakeLists.txt b/CMakeLists.txt index eb96b461c82acc..5d70d470bd7a1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -465,6 +465,7 @@ set(LIBNETDATA_FILES libnetdata/log/log.h libnetdata/os.c libnetdata/os.h + libnetdata/endian.h libnetdata/onewayalloc/onewayalloc.c libnetdata/onewayalloc/onewayalloc.h libnetdata/popen/popen.c @@ -488,9 +489,15 @@ set(LIBNETDATA_FILES libnetdata/threads/threads.h libnetdata/url/url.c libnetdata/url/url.h + libnetdata/dyn_conf/dyn_conf.c + libnetdata/dyn_conf/dyn_conf.h libnetdata/string/utf8.h libnetdata/worker_utilization/worker_utilization.c libnetdata/worker_utilization/worker_utilization.h + libnetdata/facets/facets.c + libnetdata/facets/facets.h + libnetdata/functions_evloop/functions_evloop.h + libnetdata/functions_evloop/functions_evloop.c libnetdata/http/http_defs.h ) @@ -711,6 +718,10 @@ set(STATSD_PLUGIN_FILES collectors/statsd.plugin/statsd.c ) +set(SYSTEMD_JOURNAL_PLUGIN_FILES + collectors/systemd-journal.plugin/systemd-journal.c + ) + set(RRD_PLUGIN_FILES database/contexts/api_v1.c database/contexts/api_v2.c @@ -735,6 +746,7 @@ set(RRD_PLUGIN_FILES database/rrdfunctions.h database/rrdhost.c database/rrdlabels.c + database/rrdlabels.h database/rrd.c database/rrd.h database/rrdset.c @@ -764,6 +776,9 @@ set(RRD_PLUGIN_FILES database/sqlite/sqlite_aclk_alert.h database/sqlite/sqlite3.c database/sqlite/sqlite3.h + database/sqlite/sqlite3recover.c + database/sqlite/sqlite3recover.h + database/sqlite/dbdata.c database/engine/rrdengine.c database/engine/rrdengine.h database/engine/rrddiskprotocol.h @@ -783,6 +798,10 @@ set(RRD_PLUGIN_FILES database/engine/metric.h database/engine/pdc.c database/engine/pdc.h + database/engine/page.c + database/engine/page.h + database/engine/page_test.cc + database/engine/page_test.h database/KolmogorovSmirnovDist.c database/KolmogorovSmirnovDist.h ) @@ -870,6 +889,7 @@ set(STREAMING_PLUGIN_FILES streaming/sender.c streaming/replication.c streaming/replication.h + streaming/common.h ) set(CLAIM_PLUGIN_FILES @@ -1055,6 +1075,44 @@ ELSE() message(STATUS "ML: disabled") ENDIF() +set(LOGSMANAGEMENT_FILES + logsmanagement/rrd_api/rrd_api_docker_ev.c + logsmanagement/rrd_api/rrd_api_docker_ev.h + logsmanagement/rrd_api/rrd_api_generic.c + logsmanagement/rrd_api/rrd_api_generic.h + logsmanagement/rrd_api/rrd_api_kernel.c + logsmanagement/rrd_api/rrd_api_kernel.h + logsmanagement/rrd_api/rrd_api_mqtt.c + logsmanagement/rrd_api/rrd_api_mqtt.h + logsmanagement/rrd_api/rrd_api_stats.c + logsmanagement/rrd_api/rrd_api_stats.h + logsmanagement/rrd_api/rrd_api_systemd.c + logsmanagement/rrd_api/rrd_api_systemd.h + logsmanagement/rrd_api/rrd_api_web_log.c + logsmanagement/rrd_api/rrd_api_web_log.h + logsmanagement/rrd_api/rrd_api.h + logsmanagement/unit_test/unit_test.c + logsmanagement/unit_test/unit_test.h + logsmanagement/circular_buffer.c + logsmanagement/circular_buffer.h + logsmanagement/db_api.c + logsmanagement/db_api.h + logsmanagement/file_info.h + logsmanagement/flb_plugin.c + logsmanagement/flb_plugin.h + logsmanagement/functions.c + logsmanagement/functions.h + logsmanagement/helper.h + logsmanagement/defaults.h + logsmanagement/logsmanag_config.c + logsmanagement/logsmanag_config.h + logsmanagement/logsmanagement.c + logsmanagement/parser.c + logsmanagement/parser.h + logsmanagement/query.c + logsmanagement/query.h + ) + set(NETDATA_FILES collectors/all.h ${DAEMON_FILES} @@ -1067,6 +1125,7 @@ set(NETDATA_FILES ${RRD_PLUGIN_FILES} ${REGISTRY_PLUGIN_FILES} ${STATSD_PLUGIN_FILES} + ${SYSTEMD_JOURNAL_PLUGIN_FILES} ${STREAMING_PLUGIN_FILES} ${WEB_PLUGIN_FILES} ${CLAIM_PLUGIN_FILES} @@ -1092,6 +1151,13 @@ add_definitions( -DVARLIB_DIR="/var/lib/netdata" ) +# ----------------------------------------------------------------------------- +# logs management + +IF(ENABLE_LOGSMANAGEMENT) + list(APPEND NETDATA_FILES ${LOGSMANAGEMENT_FILES}) +ENDIF() + # ----------------------------------------------------------------------------- # kinesis exporting connector @@ -1728,7 +1794,6 @@ endif() endif() endif() - # generate config.h so that CMake becomes independent of automake ## netdata version diff --git a/Makefile.am b/Makefile.am index e0e85d2881e285..398c6fb20cec23 100644 --- a/Makefile.am +++ b/Makefile.am @@ -114,6 +114,7 @@ SUBDIRS += \ web \ claim \ spawn \ + logsmanagement \ $(NULL) AM_CFLAGS = \ @@ -128,6 +129,8 @@ AM_CFLAGS = \ $(OPTIONAL_CUPS_CFLAGS) \ $(OPTIONAL_XENSTAT_CFLAGS) \ $(OPTIONAL_BPF_CFLAGS) \ + $(OPTIONAL_SYSTEMD_CFLAGS) \ + $(OPTIONAL_GTEST_CFLAGS) \ $(NULL) sbin_PROGRAMS = @@ -144,18 +147,28 @@ LIBNETDATA_FILES = \ libnetdata/avl/avl.h \ libnetdata/buffer/buffer.c \ libnetdata/buffer/buffer.h \ + libnetdata/buffered_reader/buffered_reader.c \ + libnetdata/buffered_reader/buffered_reader.h \ libnetdata/circular_buffer/circular_buffer.c \ libnetdata/circular_buffer/circular_buffer.h \ libnetdata/clocks/clocks.c \ libnetdata/clocks/clocks.h \ libnetdata/completion/completion.c \ libnetdata/completion/completion.h \ + libnetdata/datetime/iso8601.c \ + libnetdata/datetime/iso8601.h \ + libnetdata/datetime/rfc3339.c \ + libnetdata/datetime/rfc3339.h \ + libnetdata/datetime/rfc7231.c \ + libnetdata/datetime/rfc7231.h \ libnetdata/dictionary/dictionary.c \ libnetdata/dictionary/dictionary.h \ libnetdata/eval/eval.c \ libnetdata/eval/eval.h \ libnetdata/facets/facets.c \ libnetdata/facets/facets.h \ + libnetdata/functions_evloop/functions_evloop.c \ + libnetdata/functions_evloop/functions_evloop.h \ libnetdata/gorilla/gorilla.h \ libnetdata/gorilla/gorilla.cc \ libnetdata/inlined.h \ @@ -164,8 +177,12 @@ LIBNETDATA_FILES = \ libnetdata/libnetdata.c \ libnetdata/libnetdata.h \ libnetdata/required_dummies.h \ + libnetdata/line_splitter/line_splitter.c \ + libnetdata/line_splitter/line_splitter.h \ libnetdata/locks/locks.c \ libnetdata/locks/locks.h \ + libnetdata/log/journal.c \ + libnetdata/log/journal.h \ libnetdata/log/log.c \ libnetdata/log/log.h \ libnetdata/onewayalloc/onewayalloc.c \ @@ -176,6 +193,7 @@ LIBNETDATA_FILES = \ libnetdata/procfile/procfile.h \ libnetdata/os.c \ libnetdata/os.h \ + libnetdata/endian.h \ libnetdata/simple_pattern/simple_pattern.c \ libnetdata/simple_pattern/simple_pattern.h \ libnetdata/socket/socket.c \ @@ -192,6 +210,8 @@ LIBNETDATA_FILES = \ libnetdata/threads/threads.h \ libnetdata/url/url.c \ libnetdata/url/url.h \ + libnetdata/uuid/uuid.c \ + libnetdata/uuid/uuid.h \ libnetdata/json/json.c \ libnetdata/json/json.h \ libnetdata/json/jsmn.c \ @@ -201,9 +221,11 @@ LIBNETDATA_FILES = \ libnetdata/string/utf8.h \ libnetdata/worker_utilization/worker_utilization.c \ libnetdata/worker_utilization/worker_utilization.h \ + libnetdata/xxhash.h \ libnetdata/http/http_defs.h \ libnetdata/dyn_conf/dyn_conf.c \ libnetdata/dyn_conf/dyn_conf.h \ + libnetdata/simple_hashtable.h \ $(NULL) if ENABLE_PLUGIN_EBPF @@ -277,6 +299,10 @@ IDLEJITTER_PLUGIN_FILES = \ $(NULL) CGROUPS_PLUGIN_FILES = \ + collectors/cgroups.plugin/cgroup-internals.h \ + collectors/cgroups.plugin/cgroup-discovery.c \ + collectors/cgroups.plugin/cgroup-charts.c \ + collectors/cgroups.plugin/cgroup-top.c \ collectors/cgroups.plugin/sys_fs_cgroup.c \ collectors/cgroups.plugin/sys_fs_cgroup.h \ $(NULL) @@ -305,10 +331,84 @@ FREEIPMI_PLUGIN_FILES = \ $(NULL) SYSTEMD_JOURNAL_PLUGIN_FILES = \ + collectors/systemd-journal.plugin/systemd-internals.h \ + collectors/systemd-journal.plugin/systemd-main.c \ + collectors/systemd-journal.plugin/systemd-units.c \ collectors/systemd-journal.plugin/systemd-journal.c \ + collectors/systemd-journal.plugin/systemd-journal-watcher.c \ + collectors/systemd-journal.plugin/systemd-journal-annotations.c \ + collectors/systemd-journal.plugin/systemd-journal-files.c \ + collectors/systemd-journal.plugin/systemd-journal-fstat.c \ $(LIBNETDATA_FILES) \ $(NULL) +SYSTEMD_CAT_NATIVE_FILES = \ + libnetdata/log/systemd-cat-native.c \ + libnetdata/log/systemd-cat-native.h \ + $(LIBNETDATA_FILES) \ + $(NULL) + +LOG2JOURNAL_FILES = \ + collectors/log2journal/log2journal.h \ + collectors/log2journal/log2journal.c \ + collectors/log2journal/log2journal-help.c \ + collectors/log2journal/log2journal-yaml.c \ + collectors/log2journal/log2journal-json.c \ + collectors/log2journal/log2journal-logfmt.c \ + collectors/log2journal/log2journal-pcre2.c \ + collectors/log2journal/log2journal-params.c \ + collectors/log2journal/log2journal-inject.c \ + collectors/log2journal/log2journal-pattern.c \ + collectors/log2journal/log2journal-replace.c \ + collectors/log2journal/log2journal-rename.c \ + collectors/log2journal/log2journal-rewrite.c \ + $(NULL) + + +LOGSMANAGEMENT_FILES = \ + logsmanagement/circular_buffer.c \ + logsmanagement/circular_buffer.h \ + logsmanagement/db_api.c \ + logsmanagement/db_api.h \ + logsmanagement/defaults.h \ + logsmanagement/file_info.h \ + logsmanagement/flb_plugin.c \ + logsmanagement/flb_plugin.h \ + logsmanagement/functions.c \ + logsmanagement/functions.h \ + logsmanagement/helper.h \ + logsmanagement/logsmanag_config.c \ + logsmanagement/logsmanag_config.h \ + logsmanagement/logsmanagement.c \ + logsmanagement/parser.c \ + logsmanagement/parser.h \ + logsmanagement/query.c \ + logsmanagement/query.h \ + logsmanagement/rrd_api/rrd_api_docker_ev.c \ + logsmanagement/rrd_api/rrd_api_docker_ev.h \ + logsmanagement/rrd_api/rrd_api_generic.c \ + logsmanagement/rrd_api/rrd_api_generic.h \ + logsmanagement/rrd_api/rrd_api_kernel.c \ + logsmanagement/rrd_api/rrd_api_kernel.h \ + logsmanagement/rrd_api/rrd_api_mqtt.c \ + logsmanagement/rrd_api/rrd_api_mqtt.h \ + logsmanagement/rrd_api/rrd_api_stats.c \ + logsmanagement/rrd_api/rrd_api_stats.h \ + logsmanagement/rrd_api/rrd_api_systemd.c \ + logsmanagement/rrd_api/rrd_api_systemd.h \ + logsmanagement/rrd_api/rrd_api_web_log.c \ + logsmanagement/rrd_api/rrd_api_web_log.h \ + logsmanagement/rrd_api/rrd_api.h \ + database/sqlite/sqlite3.c \ + database/sqlite/sqlite3.h \ + $(LIBNETDATA_FILES) \ + $(NULL) + +LOGSMANAGEMENT_TESTS_FILES = \ + logsmanagement/unit_test/unit_test.c \ + logsmanagement/unit_test/unit_test.h \ + $(NULL) + CUPS_PLUGIN_FILES = \ collectors/cups.plugin/cups_plugin.c \ $(LIBNETDATA_FILES) \ @@ -472,6 +572,7 @@ RRD_PLUGIN_FILES = \ database/rrdfamily.c \ database/rrdhost.c \ database/rrdlabels.c \ + database/rrdlabels.h \ database/rrd.c \ database/rrd.h \ database/rrdset.c \ @@ -503,6 +604,9 @@ RRD_PLUGIN_FILES = \ database/sqlite/sqlite_aclk_alert.h \ database/sqlite/sqlite3.c \ database/sqlite/sqlite3.h \ + database/sqlite/sqlite3recover.c \ + database/sqlite/sqlite3recover.h \ + database/sqlite/dbdata.c \ database/KolmogorovSmirnovDist.c \ database/KolmogorovSmirnovDist.h \ $(NULL) @@ -572,10 +676,14 @@ if ENABLE_DBENGINE database/engine/cache.h \ database/engine/metric.c \ database/engine/metric.h \ + database/engine/page.c \ + database/engine/page.h \ + database/engine/page_test.cc \ + database/engine/page_test.h \ database/engine/pdc.c \ database/engine/pdc.h \ $(NULL) - + RRD_PLUGIN_KSY_BUILTFILES = \ database/engine/journalfile_v2.ksy \ database/engine/journalfile_v2_virtmemb.ksy \ @@ -598,6 +706,8 @@ API_PLUGIN_FILES = \ web/api/exporters/allmetrics.h \ web/api/exporters/shell/allmetrics_shell.c \ web/api/exporters/shell/allmetrics_shell.h \ + web/api/ilove/ilove.c \ + web/api/ilove/ilove.h \ web/api/queries/average/average.c \ web/api/queries/average/average.h \ web/api/queries/countif/countif.c \ @@ -657,11 +767,21 @@ API_PLUGIN_FILES = \ STREAMING_PLUGIN_FILES = \ streaming/rrdpush.c \ streaming/compression.c \ + streaming/compression.h \ + streaming/compression_brotli.c \ + streaming/compression_brotli.h \ + streaming/compression_gzip.c \ + streaming/compression_gzip.h \ + streaming/compression_lz4.c \ + streaming/compression_lz4.h \ + streaming/compression_zstd.c \ + streaming/compression_zstd.h \ streaming/sender.c \ streaming/receiver.c \ streaming/replication.h \ streaming/replication.c \ streaming/rrdpush.h \ + streaming/common.h \ $(NULL) REGISTRY_PLUGIN_FILES = \ @@ -702,8 +822,6 @@ CLAIM_FILES = \ if ENABLE_ACLK ACLK_FILES = \ - aclk/aclk_util.c \ - aclk/aclk_util.h \ aclk/aclk_stats.c \ aclk/aclk_stats.h \ aclk/aclk_query.c \ @@ -716,8 +834,6 @@ ACLK_FILES = \ aclk/aclk_tx_msgs.h \ aclk/aclk_rx_msgs.c \ aclk/aclk_rx_msgs.h \ - aclk/https_client.c \ - aclk/https_client.h \ aclk/aclk_alarm_api.c \ aclk/aclk_alarm_api.h \ aclk/aclk_contexts_api.c \ @@ -765,15 +881,9 @@ libmqttwebsockets_a_SOURCES = \ mqtt_websockets/src/common_public.c \ mqtt_websockets/src/include/common_public.h \ mqtt_websockets/src/include/common_internal.h \ - mqtt_websockets/c-rbuf/src/ringbuffer.c \ - mqtt_websockets/c-rbuf/include/ringbuffer.h \ - mqtt_websockets/c-rbuf/src/ringbuffer_internal.h \ - mqtt_websockets/c_rhash/src/c_rhash.c \ - mqtt_websockets/c_rhash/include/c_rhash.h \ - mqtt_websockets/c_rhash/src/c_rhash_internal.h \ $(NULL) -libmqttwebsockets_a_CFLAGS = $(CFLAGS) -DMQTT_WSS_CUSTOM_ALLOC -DRBUF_CUSTOM_MALLOC -DMQTT_WSS_CPUSTATS -I$(srcdir)/aclk/helpers -I$(srcdir)/mqtt_websockets/c_rhash/include +libmqttwebsockets_a_CFLAGS = $(CFLAGS) -DMQTT_WSS_CUSTOM_ALLOC -DMQTT_WSS_CPUSTATS -I$(srcdir)/aclk/helpers -I$(srcdir)/mqtt_websockets/c_rhash/include if MQTT_WSS_DEBUG libmqttwebsockets_a_CFLAGS += -DMQTT_WSS_DEBUG @@ -879,8 +989,25 @@ ACLK_ALWAYS_BUILD_FILES = \ aclk/aclk.h \ aclk/aclk_capas.c \ aclk/aclk_capas.h \ + aclk/aclk_util.c \ + aclk/aclk_util.h \ + aclk/https_client.c \ + aclk/https_client.h \ + $(NULL) + +noinst_LIBRARIES += libcrutils.a + +libcrutils_a_SOURCES = \ + mqtt_websockets/c-rbuf/src/ringbuffer.c \ + mqtt_websockets/c-rbuf/include/ringbuffer.h \ + mqtt_websockets/c-rbuf/src/ringbuffer_internal.h \ + mqtt_websockets/c_rhash/src/c_rhash.c \ + mqtt_websockets/c_rhash/include/c_rhash.h \ + mqtt_websockets/c_rhash/src/c_rhash_internal.h \ $(NULL) +libcrutils_a_CFLAGS = $(CFLAGS) -DRBUF_CUSTOM_MALLOC -I$(srcdir)/aclk/helpers -I$(abs_top_srcdir)/mqtt_websockets/c-rbuf/include -I$(srcdir)/mqtt_websockets/c_rhash/include + SPAWN_PLUGIN_FILES = \ spawn/spawn.c \ spawn/spawn_server.c \ @@ -968,6 +1095,10 @@ H2O_FILES = \ web/server/h2o/http_server.h \ web/server/h2o/h2o_utils.c \ web/server/h2o/h2o_utils.h \ + web/server/h2o/streaming.c \ + web/server/h2o/streaming.h \ + web/server/h2o/connlist.c \ + web/server/h2o/connlist.h \ $(NULL) libh2o_a_SOURCES = \ @@ -1134,13 +1265,20 @@ NETDATA_COMMON_LIBS = \ $(OPTIONAL_MQTT_LIBS) \ $(OPTIONAL_UV_LIBS) \ $(OPTIONAL_LZ4_LIBS) \ + $(OPTIONAL_CURL_LIBS) \ + $(OPTIONAL_ZSTD_LIBS) \ + $(OPTIONAL_BROTLIENC_LIBS) \ + $(OPTIONAL_BROTLIDEC_LIBS) \ $(OPTIONAL_DATACHANNEL_LIBS) \ libjudy.a \ + libcrutils.a \ $(OPTIONAL_SSL_LIBS) \ $(OPTIONAL_JSONC_LIBS) \ $(OPTIONAL_YAML_LIBS) \ $(OPTIONAL_ATOMIC_LIBS) \ $(OPTIONAL_DL_LIBS) \ + $(OPTIONAL_SYSTEMD_LIBS) \ + $(OPTIONAL_GTEST_LIBS) \ $(NULL) if ENABLE_ACLK @@ -1240,6 +1378,15 @@ if ENABLE_PLUGIN_FREEIPMI $(NULL) endif +if ENABLE_LOG2JOURNAL + sbin_PROGRAMS += log2journal + log2journal_SOURCES = $(LOG2JOURNAL_FILES) + log2journal_LDADD = \ + $(OPTIONAL_PCRE2_LIBS) \ + $(OPTIONAL_YAML_LIBS) \ + $(NULL) +endif + if ENABLE_PLUGIN_SYSTEMD_JOURNAL plugins_PROGRAMS += systemd-journal.plugin systemd_journal_plugin_SOURCES = $(SYSTEMD_JOURNAL_PLUGIN_FILES) @@ -1249,6 +1396,24 @@ if ENABLE_PLUGIN_SYSTEMD_JOURNAL $(NULL) endif +sbin_PROGRAMS += systemd-cat-native +systemd_cat_native_SOURCES = $(SYSTEMD_CAT_NATIVE_FILES) +systemd_cat_native_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(NULL) + +if ENABLE_LOGSMANAGEMENT + plugins_PROGRAMS += logs-management.plugin + logs_management_plugin_SOURCES = $(LOGSMANAGEMENT_FILES) +if ENABLE_LOGSMANAGEMENT_TESTS + logs_management_plugin_SOURCES += $(LOGSMANAGEMENT_TESTS_FILES) +endif + logs_management_plugin_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_SYSTEMD_LIBS) \ + $(NULL) +endif + if ENABLE_PLUGIN_EBPF plugins_PROGRAMS += ebpf.plugin ebpf_plugin_SOURCES = $(EBPF_PLUGIN_FILES) diff --git a/README.md b/README.md index aa408f3ac4140d..1fb026992c3753 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@

- Netdata + Netdata - Netdata + Netdata

Monitor your servers, containers, and applications,
in high-resolution and in real-time.

@@ -22,7 +22,6 @@ License: GPL v3+
Discord - Discourse topics GitHub Discussions

@@ -53,12 +52,40 @@ It scales nicely from just a single server to thousands of servers, even in comp - :bell: **Out of box Alerts**
Comes with hundreds of alerts out of the box to detect common issues and pitfalls, revealing issues that can easily go unnoticed. It supports several notification methods to let you know when your attention is needed. +- 📖 **systemd Journal Logs Explorer**
+ Provides a `systemd` journal logs explorer, to view, filter and analyze system and applications logs by directly accessing `systemd` journal files on individual hosts and infrastructure-wide logs centralization servers. + - :sunglasses: **Low Maintenance**
Fully automated in every aspect: automated dashboards, out-of-the-box alerts, auto-detection and auto-discovery of metrics, zero-touch machine-learning, easy scalability and high availability, and CI/CD friendly. - :star: **Open and Extensible**
Netdata is a modular platform that can be extended in all possible ways and it also integrates nicely with other monitoring solutions. +--- + +**LATEST BLOG POST**:
+ +

+ + Netdata + + + Netdata + +

+ +On the same workload, Netdata uses **35% less CPU**, **49% less RAM**, **12% less bandwidth**, **98% less disk I/O**, and is **75% more disk space efficient** on high resolution metrics storage, while providing more than a year of overall retention on the same disk footprint Prometheus offers 7 days of retention. [Read the full analysis in our blog](https://blog.netdata.cloud/netdata-vs-prometheus-performance-analysis/). + +--- + +NEW: **Netdata and LOGS !** 🥳
+ +Check the [systemd-journal plugin of Netdata](https://github.com/netdata/netdata/tree/master/collectors/systemd-journal.plugin), that allows you to view, explore, analyze and query `systemd` journal logs! + +![image](https://github.com/netdata/netdata/assets/2662304/691b7470-ec56-430c-8b81-0c9e49012679) + +--- +  

CNCF @@ -66,7 +93,7 @@ It scales nicely from just a single server to thousands of servers, even in comp
Netdata actively supports and is a member of the Cloud Native Computing Foundation (CNCF)
 
- ...and due to your love :heart:, it is the 3rd most :star:'d project in the CNCF landscape! + ...and due to your love :heart: and :star:, Netdata is leading the Observability category at the CNCF landscape!

 
@@ -98,9 +125,6 @@ It scales nicely from just a single server to thousands of servers, even in comp ## What's New and Coming? -> **Spoiler!**
-> Netdata Integrations Marketplace is coming... -
Click to see our immediate development plans and a summary view of the last 12 months' releases...  
@@ -108,13 +132,13 @@ It scales nicely from just a single server to thousands of servers, even in comp |:-----------------------------:|:---------------------------------------------------------------------------------------------------:|:------------:|:-------------------------------------------------------------------------------------------------------:| | WebRTC | Browser to Agent communication via WebRTC. | later | POC | | Advanced Troubleshooting | Expanded view of dashboard charts integrating Metrics Correlations, Anomaly Advisor, and many more. | later | interrupted | -| Easy Custom
Dashboards | Drag and drop charts to create custom dashboards on the fly, while troubleshooting! | next | planned | -| More Customizability | Set default settings for all charts and views! | next | planned | -| SystemD Journal | View the SystemD Journal of your systems on the dashboard. | soon | in progress | +| Easy Custom
Dashboards | Drag and drop charts to create custom dashboards on the fly, while troubleshooting! | soon | planned | +| More Customizability | Set default settings for all charts and views! | soon | planned | | UCUM Units | Migrate all metrics to the Unified Code for Units of Measure. | soon | in progress | -| **Netdata Cloud
On-Prem** | **Netdata Cloud available for On-Prem installation!** | **soon** | **in progress** | | Click to Activate | Configure Alerts and Data Collectors from the UI! | soon | in progress | -| Integrations | Netdata Integrations Marketplace! | soon | finishing | +| **Netdata Cloud
On-Prem** | **Netdata Cloud available for On-Prem installation!** | **available** | [fill this form](https://www.netdata.cloud/contact-us/?subject=on-prem) | +| `systemd` journal | View the `systemd` journal logs of your systems on the dashboard. | Oct
2023 | [v1.43](https://github.com/netdata/netdata/releases/tag/v1.43.0) | +| Integrations | Netdata Integrations Marketplace! | Aug
2023 | [v1.42](https://github.com/netdata/netdata/releases#v1420-integrations-marketplace) | | New Agent UI | Now Netdata Cloud and Netdata Agent share the same dashboard! | Jul
2023 | [v1.41](https://github.com/netdata/netdata/releases/tag/v1.41.0#v1410-one-dashboard) | | Summary Dashboards | High level tiles everywhere! | Jun
2023 | [v1.40](https://github.com/netdata/netdata/releases/tag/v1.40.0#v1400-visualization-summary-dashboards) | | Machine Learning | Multiple ML models per metric. | Jun
2023 | [v1.40](https://github.com/netdata/netdata/releases/tag/v1.40.0#v1400-ml-extended-training) | @@ -168,6 +192,9 @@ It scales nicely from just a single server to thousands of servers, even in comp Check also the [Netdata Deployment Strategies](https://learn.netdata.cloud/docs/architecture/deployment-strategies) to decide how to deploy it in your infrastructure. + By default, you will have immediately available a local dashboard. Netdata starts a web server for its dashboard at port `19999`. Open up your web browser of choice and +navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If installed on localhost, you can access it through `http://localhost:19999`. + ### 2. **Configure Collectors** :boom: Netdata auto-detects and auto-discovers most operating system data sources and applications. However, many data sources require some manual configuration, usually to allow Netdata to get access to the metrics. @@ -180,7 +207,7 @@ It scales nicely from just a single server to thousands of servers, even in comp Netdata comes with hundreds of pre-configured alerts, that automatically check your metrics, immediately after they start getting collected. - Netdata can dispatch alert notifications to multiple third party systems, including: `email`, `Alerta`, `AWS SNS`, `Discord`, `Dynatrace`, `flock`, `gotify`, `IRC`, `Matrix`, `MessageBird`, `Microsoft Teams`, `ntfy`, `OPSgenie`, `PagerDuty`, `Prowl`, `PushBullet`, `PushOver`, `RocketChat`, `Slack`, `SMS tools`, `StackPulse`, `Syslog`, `Telegram`, `Twilio`. + Netdata can dispatch alert notifications to multiple third party systems, including: `email`, `Alerta`, `AWS SNS`, `Discord`, `Dynatrace`, `flock`, `gotify`, `IRC`, `Matrix`, `MessageBird`, `Microsoft Teams`, `ntfy`, `OPSgenie`, `PagerDuty`, `Prowl`, `PushBullet`, `PushOver`, `RocketChat`, `Slack`, `SMS tools`, `Syslog`, `Telegram`, `Twilio`. By default, Netdata will send e-mail notifications, if there is a configured MTA on the system. @@ -217,11 +244,12 @@ It scales nicely from just a single server to thousands of servers, even in comp When your Netdata nodes are connected to Netdata Cloud, you can (on top of the above): + - Access your Netdata agents from anywhere + - Access sensitive Netdata agent features (like "Netdata Functions": processes, systemd-journal) - Organize your infra in spaces and rooms - Create, manage, and share **custom dashboards** - Invite your team and assign roles to them (Role Based Access Control - RBAC) - - Access Netdata Functions (processes top from the UI and more) - - Get infinite horizontal scalability (multiple independent parents are viewed as one infra) + - Get infinite horizontal scalability (multiple independent Netdata Agents are viewed as one infra) - Configure alerts from the UI (coming soon) - Configure data collection from the UI (coming soon) - Netdata Mobile App notifications (coming soon) @@ -248,7 +276,7 @@ Each Netdata Agent can perform the following functions: 1. **`COLLECT` metrics from their sources**
Uses [internal](https://github.com/netdata/netdata/tree/master/collectors) and [external](https://github.com/netdata/go.d.plugin/tree/master/modules) plugins to collect data from their sources. - Netdata auto-detects and collects almost everything from the operating system: including CPU, Interrupts, Memory, Disks, Mount Points, Filesystems, Network Stack, Network Interfaces, Containers, VMs, Processes, SystemD Units, Linux Performance Metrics, Linux eBPF, Hardware Sensors, IPMI, and more. + Netdata auto-detects and collects almost everything from the operating system: including CPU, Interrupts, Memory, Disks, Mount Points, Filesystems, Network Stack, Network Interfaces, Containers, VMs, Processes, `systemd` units, Linux Performance Metrics, Linux eBPF, Hardware Sensors, IPMI, and more. It collects application metrics from applications: PostgreSQL, MySQL/MariaDB, Redis, MongoDB, Nginx, Apache, and hundreds more. @@ -407,6 +435,8 @@ Yes, you can! Netdata has been designed to spread disk writes across time. Each metric is flushed to disk every 17 minutes, but metrics are flushed evenly across time, at an almost constant rate. Also, metrics are packed into bigger blocks we call `extents` and are compressed with LZ4 before saving them, to minimize the number of I/O operations made. +Netdata also employs direct I/O for all its database operations, ensuring optimized performance. By managing its own caches, Netdata avoids overburdening system caches, facilitating a harmonious coexistence with other applications. + Single node Agents (not Parents), should have a constant rate of about 50 KiB/s or less, with some spikes above that every minute (flushing of tier 1) and higher spikes every hour (flushing of tier 2). Health Alerts and Machine-Learning run queries to evaluate their expressions and learn from the metrics' patterns. These are also spread over time, so there should be an almost constant read rate too. @@ -427,6 +457,8 @@ Using the above, the Netdata Agent on your production system will not use a disk Netdata is a "ready to use" monitoring solution. Prometheus and Grafana are tools to build your own monitoring solution. +Netdata is also a lot faster, requires singificantly less resources and puts almost no stress on the server it runs. For a performance comparison check [this blog](https://blog.netdata.cloud/netdata-vs-prometheus-performance-analysis/). +
Click to see detailed answer ...  
 
@@ -442,6 +474,8 @@ So, the biggest difference of Netdata to Prometheus, and Grafana, is that we dec Maintaining such an index is a challenge: first, because the raw metrics collected do not provide this information, so we have to add it, and second because we need to maintain this index for the lifetime of each metric, which with our current database retention, it is usually more than a year. + At the same time, Netdata provides better retention than Prometheus due to database tiering, scales easier than Prometheus due to streaming, supports anomaly detection and it has a mertrics scoring engine to find the needle in the haystack when needed. + - When compared to Grafana, Netdata is fully automated. Grafana has more customization capabilities than Netdata, but Netdata presents fully functional dashboards by itself and most importantly it gives you the means to understand, analyze, filter, slice and dice the data without the need for you to edit queries or be aware of any peculiarities the underlying metrics may have. Furthermore, to help you when you need to find the needle in the haystack, Netdata has advanced troubleshooting tools provided by the Netdata metrics scoring engine, that allows it to score metrics based on their anomaly rate, their differences or similarities for any given time frame. @@ -545,12 +579,14 @@ Subscribing to Netdata Cloud is optional but many users find it enhances their e The Netdata Agent dashboard and the Netdata Cloud dashboard are the same. Still, Netdata Cloud provides additional features, that the Netdata Agent is not capable of. These include: - 1. Customizability (custom dashboards and other settings are persisted when you are signed in to Netdata Cloud) - 2. Configuration of Alerts and Data Collection from the UI (coming soon) - 3. Security (role-based access control - RBAC). - 4. Horizontal Scalability ("blend" multiple independent parents in one uniform infrastructure) - 5. Central Dispatch of Alert Notifications (even when multiple independent parents are involved) - 6. Mobile App for Alert Notifications (coming soon) + 1. Access your infrastructure from anywhere. + 2. Have SSO to protect sensitive features. + 3. Customizability (custom dashboards and other settings are persisted when you are signed in to Netdata Cloud) + 4. Configuration of Alerts and Data Collection from the UI (coming soon) + 5. Security (role-based access control - RBAC). + 6. Horizontal Scalability ("blend" multiple independent parents in one uniform infrastructure) + 7. Central Dispatch of Alert Notifications (even when multiple independent parents are involved) + 8. Mobile App for Alert Notifications (coming soon) So, although it is not required, you can get the most out of your Netdata setup by using Netdata Cloud. @@ -595,9 +631,9 @@ Netdata is a widely adopted project...
Click to see detailed answer ...  
 
-Browse the [Netdata stargazers on GitHub](https://github.com/netdata/netdata/stargazers) to discover users from renowned companies and enterprises, such as AMD, Amazon, Baidu, Cisco, Delta, Facebook, IBM, Intel, Netflix, Qualcomm, Riot Games, SAP, Samsung, Unity, Valve, and many others. +Browse the [Netdata stargazers on GitHub](https://github.com/netdata/netdata/stargazers) to discover users from renowned companies and enterprises, such as ABN AMRO Bank, AMD, Amazon, Baidu, Booking.com, Cisco, Delta, Facebook, Google, IBM, Intel, Logitech, Netflix, Nokia, Qualcomm, Realtek Semiconductor Corp, Redhat, Riot Games, SAP, Samsung, Unity, Valve, and many others. -Netdata also enjoys significant usage in academia, with notable institutions including New York University, Columbia University, New Jersey University, among several others. +Netdata also enjoys significant usage in academia, with notable institutions including New York University, Columbia University, New Jersey University, Seoul National University, University College London, among several others. And, Netdata is also used by numerous governmental organizations worldwide. @@ -646,6 +682,39 @@ The Netdata Cloud UI is not open-source. But we thought that it is to the benefi  
 
+### :moneybag: What is your monetization strategy? + +Netdata generates revenue through subscriptions to advanced features of Netdata Cloud and sales of on-premise and private versions of Netdata Cloud. + +
Click to see detailed answer ...
 
+ +Netdata generates revenue from these activities: + +1. **Netdata Cloud Subscriptions**
+ Direct funding for our project's vision comes from users subscribing to Netdata Cloud's advanced features. + +2. **Netdata Cloud On-Prem or Private**
+ Purchasing the on-premises or private versions of Netdata Cloud supports our financial growth. + +Our Open-Source Community and the free access to Netdata Cloud, contribute to Netdata in the following ways: + +- **Netdata Cloud Community Use**
+ The free usage of Netdata Cloud demonstrates its market relevance. While this doesn't generate revenue, it reinforces trust among new users and aids in securing appropriate project funding. + +- **User Feedback**
+ Feedback, especially issues and bug reports, is invaluable. It steers us towards a more resilient and efficient product. This, too, isn't a revenue source but is pivotal for our project's evolution. + +- **Anonymous Telemetry Insights**
+ Users who keep anonymous telemetry enabled, help us make data informed decisions in refining and enhancing Netdata. This isn't a revenue stream, but knowing which features are used and how, contributes in building a better product for everyone. + +We don't monetize, directly or indirectly, users' or "device heuristics" data. Any data collected from community members are exclusively used for the purposes stated above. + +Netdata grows financially when tehnnology intensive organizations and operators, need - due to regulatory or business requirements - the entire Netdata suite (including Netdata Cloud) on-prem or private, bundled with top-tier support. It is a win-win case for all parties involved: these companies get a battle tested, robust and reliable solution, while the broader community that helps us build this product, enjoys it at no cost. + + 
 
+
+ ## :book: Documentation Netdata's documentation is available at [**Netdata Learn**](https://learn.netdata.cloud). @@ -675,7 +744,7 @@ Join the Netdata community: > [Click here for the schedule](https://www.meetup.com/netdata/events/). You can also find Netdata on:
-[Twitter](https://twitter.com/linuxnetdata) | [YouTube](https://www.youtube.com/c/Netdata) | [Reddit](https://www.reddit.com/r/netdata/) | [LinkedIn](https://www.linkedin.com/company/netdata-cloud/) | [StackShare](https://stackshare.io/netdata) | [Product Hunt](https://www.producthunt.com/posts/netdata-monitoring-agent/) | [Repology](https://repology.org/metapackage/netdata/versions) | [Facebook](https://www.facebook.com/linuxnetdata/) +[Twitter](https://twitter.com/netdatahq) | [YouTube](https://www.youtube.com/c/Netdata) | [Reddit](https://www.reddit.com/r/netdata/) | [LinkedIn](https://www.linkedin.com/company/netdata-cloud/) | [StackShare](https://stackshare.io/netdata) | [Product Hunt](https://www.producthunt.com/posts/netdata-monitoring-agent/) | [Repology](https://repology.org/metapackage/netdata/versions) | [Facebook](https://www.facebook.com/linuxnetdata/) ## :pray: Contribute @@ -702,7 +771,6 @@ General information about contributions: - Check our [Security Policy](https://github.com/netdata/netdata/security/policy). - Found a bug? Open a [GitHub issue](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml&title=%5BBug%5D%3A+). - Read our [Contributing Guide](https://github.com/netdata/.github/blob/main/CONTRIBUTING.md), which contains all the information you need to contribute to Netdata, such as improving our documentation, engaging in the community, and developing new features. We've made it as frictionless as possible, but if you need help, just ping us on our community forums! -- We have a whole category dedicated to contributing and extending Netdata on our [community forums](https://community.netdata.cloud/c/agent-development/9) Package maintainers should read the guide on [building Netdata from source](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/source.md) for instructions on building each Netdata component from the source and preparing a package. diff --git a/REDISTRIBUTED.md b/REDISTRIBUTED.md index 76ee9fb766af0f..0ccc843f7507ab 100644 --- a/REDISTRIBUTED.md +++ b/REDISTRIBUTED.md @@ -108,7 +108,7 @@ connectivity is not available. Copyright 2016, Hyunje Alex Jun and other contributors [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE) -- [FontAwesome](https://fortawesome.github.io/Font-Awesome/) +- [FontAwesome](https://github.com/FortAwesome/Font-Awesome) Created by Dave Gandy Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL) @@ -141,7 +141,7 @@ connectivity is not available. Copyright 2014, Pavel Rojtberg [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1) -- [PyYAML](https://bitbucket.org/blackjack/pysensors) +- [PyYAML](https://pypi.org/project/PyYAML/) Copyright 2006, Kirill Simonov [MIT License](https://github.com/yaml/pyyaml/blob/master/LICENSE) @@ -190,4 +190,9 @@ connectivity is not available. Copyright March 2010 by Université de Montréal, Richard Simard and Pierre L'Ecuyer [GPL 3.0](https://www.gnu.org/licenses/gpl-3.0.en.html) +- [xxHash](https://github.com/Cyan4973/xxHash) + + Copyright (c) 2012-2021 Yann Collet + [BSD](https://github.com/Cyan4973/xxHash/blob/dev/LICENSE) + diff --git a/aclk/aclk.c b/aclk/aclk.c index 312db076ff5a56..e95d7d6ab7c6b8 100644 --- a/aclk/aclk.c +++ b/aclk/aclk.c @@ -154,7 +154,9 @@ static int load_private_key() static int wait_till_cloud_enabled() { - netdata_log_info("Waiting for Cloud to be enabled"); + nd_log(NDLS_DAEMON, NDLP_INFO, + "Waiting for Cloud to be enabled"); + while (!netdata_cloud_enabled) { sleep_usec(USEC_PER_SEC * 1); if (!service_running(SERVICE_ACLK)) @@ -233,17 +235,22 @@ void aclk_mqtt_wss_log_cb(mqtt_wss_log_type_t log_type, const char* str) switch(log_type) { case MQTT_WSS_LOG_ERROR: case MQTT_WSS_LOG_FATAL: + nd_log(NDLS_DAEMON, NDLP_ERR, "%s", str); + return; + case MQTT_WSS_LOG_WARN: - error_report("%s", str); + nd_log(NDLS_DAEMON, NDLP_WARNING, "%s", str); return; + case MQTT_WSS_LOG_INFO: - netdata_log_info("%s", str); + nd_log(NDLS_DAEMON, NDLP_INFO, "%s", str); return; + case MQTT_WSS_LOG_DEBUG: - netdata_log_debug(D_ACLK, "%s", str); return; + default: - netdata_log_error("Unknown log type from mqtt_wss"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Unknown log type from mqtt_wss"); } } @@ -297,7 +304,9 @@ static void puback_callback(uint16_t packet_id) #endif if (aclk_shared_state.mqtt_shutdown_msg_id == (int)packet_id) { - netdata_log_info("Shutdown message has been acknowledged by the cloud. Exiting gracefully"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Shutdown message has been acknowledged by the cloud. Exiting gracefully"); + aclk_shared_state.mqtt_shutdown_msg_rcvd = 1; } } @@ -335,9 +344,11 @@ static int handle_connection(mqtt_wss_client client) } if (disconnect_req || aclk_kill_link) { - netdata_log_info("Going to restart connection due to disconnect_req=%s (cloud req), aclk_kill_link=%s (reclaim)", - disconnect_req ? "true" : "false", - aclk_kill_link ? "true" : "false"); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "Going to restart connection due to disconnect_req=%s (cloud req), aclk_kill_link=%s (reclaim)", + disconnect_req ? "true" : "false", + aclk_kill_link ? "true" : "false"); + disconnect_req = 0; aclk_kill_link = 0; aclk_graceful_disconnect(client); @@ -390,7 +401,9 @@ static inline void mqtt_connected_actions(mqtt_wss_client client) void aclk_graceful_disconnect(mqtt_wss_client client) { - netdata_log_info("Preparing to gracefully shutdown ACLK connection"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Preparing to gracefully shutdown ACLK connection"); + aclk_queue_lock(); aclk_queue_flush(); @@ -403,17 +416,22 @@ void aclk_graceful_disconnect(mqtt_wss_client client) break; } if (aclk_shared_state.mqtt_shutdown_msg_rcvd) { - netdata_log_info("MQTT App Layer `disconnect` message sent successfully"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "MQTT App Layer `disconnect` message sent successfully"); break; } } - netdata_log_info("ACLK link is down"); - netdata_log_access("ACLK DISCONNECTED"); + + nd_log(NDLS_DAEMON, NDLP_WARNING, "ACLK link is down"); + nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK DISCONNECTED"); + aclk_stats_upd_online(0); last_disconnect_time = now_realtime_sec(); aclk_connected = 0; - netdata_log_info("Attempting to gracefully shutdown the MQTT/WSS connection"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Attempting to gracefully shutdown the MQTT/WSS connection"); + mqtt_wss_disconnect(client, 1000); } @@ -455,7 +473,9 @@ static int aclk_block_till_recon_allowed() { next_connection_attempt = now_realtime_sec() + (recon_delay / MSEC_PER_SEC); last_backoff_value = (float)recon_delay / MSEC_PER_SEC; - netdata_log_info("Wait before attempting to reconnect in %.3f seconds", recon_delay / (float)MSEC_PER_SEC); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Wait before attempting to reconnect in %.3f seconds", recon_delay / (float)MSEC_PER_SEC); + // we want to wake up from time to time to check netdata_exit while (recon_delay) { @@ -593,7 +613,9 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) return 1; } - netdata_log_info("Attempting connection now"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Attempting connection now"); + memset(&base_url, 0, sizeof(url_t)); if (url_parse(aclk_cloud_base_url, &base_url)) { aclk_status = ACLK_STATUS_INVALID_CLOUD_URL; @@ -680,7 +702,9 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) error_report("Can't use encoding=proto without at least \"proto\" capability."); continue; } - netdata_log_info("New ACLK protobuf protocol negotiated successfully (/env response)."); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "New ACLK protobuf protocol negotiated successfully (/env response)."); memset(&auth_url, 0, sizeof(url_t)); if (url_parse(aclk_env->auth_endpoint, &auth_url)) { @@ -750,9 +774,9 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) if (!ret) { last_conn_time_mqtt = now_realtime_sec(); - netdata_log_info("ACLK connection successfully established"); + nd_log(NDLS_DAEMON, NDLP_INFO, "ACLK connection successfully established"); aclk_status = ACLK_STATUS_CONNECTED; - netdata_log_access("ACLK CONNECTED"); + nd_log(NDLS_ACCESS, NDLP_INFO, "ACLK CONNECTED"); mqtt_connected_actions(client); return 0; } @@ -798,7 +822,9 @@ void *aclk_main(void *ptr) netdata_thread_disable_cancelability(); #if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) - netdata_log_info("Killing ACLK thread -> cloud functionality has been disabled"); + nd_log(NDLS_DAEMON, NDLP_INFO, + "Killing ACLK thread -> cloud functionality has been disabled"); + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; return NULL; #endif @@ -857,7 +883,7 @@ void *aclk_main(void *ptr) aclk_stats_upd_online(0); last_disconnect_time = now_realtime_sec(); aclk_connected = 0; - netdata_log_access("ACLK DISCONNECTED"); + nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK DISCONNECTED"); } } while (service_running(SERVICE_ACLK)); @@ -891,7 +917,7 @@ void *aclk_main(void *ptr) return NULL; } -void aclk_host_state_update(RRDHOST *host, int cmd) +void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) { uuid_t node_id; int ret = 0; @@ -924,7 +950,9 @@ void aclk_host_state_update(RRDHOST *host, int cmd) rrdhost_aclk_state_unlock(localhost); create_query->data.bin_payload.topic = ACLK_TOPICID_CREATE_NODE; create_query->data.bin_payload.msg_name = "CreateNodeInstance"; - netdata_log_info("Registering host=%s, hops=%u", host->machine_guid, host->system_info->hops); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Registering host=%s, hops=%u", host->machine_guid, host->system_info->hops); + aclk_queue_query(create_query); return; } @@ -934,7 +962,7 @@ void aclk_host_state_update(RRDHOST *host, int cmd) node_instance_connection_t node_state_update = { .hops = host->system_info->hops, .live = cmd, - .queryable = 1, + .queryable = queryable, .session_id = aclk_session_newarch }; node_state_update.node_id = mallocz(UUID_STR_LEN); @@ -947,8 +975,9 @@ void aclk_host_state_update(RRDHOST *host, int cmd) query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); rrdhost_aclk_state_unlock(localhost); - netdata_log_info("Queuing status update for node=%s, live=%d, hops=%u",(char*)node_state_update.node_id, cmd, - host->system_info->hops); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Queuing status update for node=%s, live=%d, hops=%u, queryable=%d", + (char*)node_state_update.node_id, cmd, host->system_info->hops, queryable); freez((void*)node_state_update.node_id); query->data.bin_payload.msg_name = "UpdateNodeInstanceConnection"; query->data.bin_payload.topic = ACLK_TOPICID_NODE_CONN; @@ -990,9 +1019,10 @@ void aclk_send_node_instances() node_state_update.claim_id = localhost->aclk_state.claimed_id; query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); rrdhost_aclk_state_unlock(localhost); - netdata_log_info("Queuing status update for node=%s, live=%d, hops=%d",(char*)node_state_update.node_id, - list->live, - list->hops); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Queuing status update for node=%s, live=%d, hops=%d, queryable=1", + (char*)node_state_update.node_id, list->live, list->hops); freez((void*)node_state_update.capabilities); freez((void*)node_state_update.node_id); @@ -1014,8 +1044,11 @@ void aclk_send_node_instances() node_instance_creation.claim_id = localhost->aclk_state.claimed_id, create_query->data.bin_payload.payload = generate_node_instance_creation(&create_query->data.bin_payload.size, &node_instance_creation); rrdhost_aclk_state_unlock(localhost); - netdata_log_info("Queuing registration for host=%s, hops=%d",(char*)node_instance_creation.machine_guid, - list->hops); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Queuing registration for host=%s, hops=%d", + (char*)node_instance_creation.machine_guid, list->hops); + freez((void *)node_instance_creation.machine_guid); aclk_queue_query(create_query); } @@ -1292,7 +1325,7 @@ char *aclk_state_json(void) } void add_aclk_host_labels(void) { - DICTIONARY *labels = localhost->rrdlabels; + RRDLABELS *labels = localhost->rrdlabels; #ifdef ENABLE_ACLK rrdlabels_add(labels, "_aclk_available", "true", RRDLABEL_SRC_AUTO|RRDLABEL_SRC_ACLK); @@ -1322,7 +1355,7 @@ void add_aclk_host_labels(void) { void aclk_queue_node_info(RRDHOST *host, bool immediate) { - struct aclk_sync_host_config *wc = (struct aclk_sync_host_config *) host->aclk_sync_host_config; + struct aclk_sync_cfg_t *wc = host->aclk_config; if (likely(wc)) wc->node_info_send_time = (host == localhost || immediate) ? 1 : now_realtime_sec(); } diff --git a/aclk/aclk.h b/aclk/aclk.h index 0badc1a628c6f1..72d1a2e119fd2f 100644 --- a/aclk/aclk.h +++ b/aclk/aclk.h @@ -75,7 +75,7 @@ extern struct aclk_shared_state { int mqtt_shutdown_msg_rcvd; } aclk_shared_state; -void aclk_host_state_update(RRDHOST *host, int cmd); +void aclk_host_state_update(RRDHOST *host, int cmd, int queryable); void aclk_send_node_instances(void); void aclk_send_bin_msg(char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname); diff --git a/aclk/aclk_otp.c b/aclk/aclk_otp.c index 99b2adea272a88..207ca08cf0d16a 100644 --- a/aclk/aclk_otp.c +++ b/aclk/aclk_otp.c @@ -502,7 +502,7 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p } // Decrypt Challenge / Get response - unsigned char *response_plaintext; + unsigned char *response_plaintext = NULL; int response_plaintext_bytes = private_decrypt(p_key, challenge, challenge_bytes, &response_plaintext); if (response_plaintext_bytes < 0) { netdata_log_error("Couldn't decrypt the challenge received"); diff --git a/aclk/aclk_query.c b/aclk/aclk_query.c index 07d571be178b0e..da5385fdb83523 100644 --- a/aclk/aclk_query.c +++ b/aclk/aclk_query.c @@ -90,6 +90,12 @@ static bool aclk_web_client_interrupt_cb(struct web_client *w __maybe_unused, vo } static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) { + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "aclk"), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + int retval = 0; BUFFER *local_buffer = NULL; size_t size = 0; @@ -110,9 +116,9 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) usec_t t; web_client_timeout_checkpoint_set(w, query->timeout); if(web_client_timeout_checkpoint_and_check(w, &t)) { - netdata_log_access("QUERY CANCELED: QUEUE TIME EXCEEDED %llu ms (LIMIT %d ms)", t / USEC_PER_MS, query->timeout); + nd_log(NDLS_ACCESS, NDLP_ERR, "QUERY CANCELED: QUEUE TIME EXCEEDED %llu ms (LIMIT %d ms)", t / USEC_PER_MS, query->timeout); retval = 1; - w->response.code = HTTP_RESP_BACKEND_FETCH_FAILED; + w->response.code = HTTP_RESP_SERVICE_UNAVAILABLE; aclk_http_msg_v2_err(query_thr->client, query->callback_topic, query->msg_id, w->response.code, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, NULL, 0); goto cleanup; } @@ -217,25 +223,8 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) // send msg. w->response.code = aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id, t, query->created, w->response.code, local_buffer->buffer, local_buffer->len); - struct timeval tv; - cleanup: - now_monotonic_high_precision_timeval(&tv); - netdata_log_access("%llu: %d '[ACLK]:%d' '%s' (sent/all = %zu/%zu bytes %0.0f%%, prep/sent/total = %0.2f/%0.2f/%0.2f ms) %d '%s'", - w->id - , gettid() - , query_thr->idx - , "DATA" - , sent - , size - , size > sent ? -(((size - sent) / (double)size) * 100.0) : ((size > 0) ? (((sent - size ) / (double)size) * 100.0) : 0.0) - , dt_usec(&w->timings.tv_ready, &w->timings.tv_in) / 1000.0 - , dt_usec(&tv, &w->timings.tv_ready) / 1000.0 - , dt_usec(&tv, &w->timings.tv_in) / 1000.0 - , w->response.code - , strip_control_characters((char *)buffer_tostring(w->url_as_received)) - ); - + web_client_log_completed_request(w, false); web_client_release_to_cache(w); pending_req_list_rm(query->msg_id); diff --git a/aclk/aclk_rx_msgs.c b/aclk/aclk_rx_msgs.c index 84ade2b34601a7..0e91e28c04f47c 100644 --- a/aclk/aclk_rx_msgs.c +++ b/aclk/aclk_rx_msgs.c @@ -108,7 +108,7 @@ static inline int aclk_v2_payload_get_query(const char *payload, char **query_ur } start = payload + 4; - if(!(end = strstr(payload, " HTTP/1.1\x0D\x0A"))) { + if(!(end = strstr(payload, HTTP_1_1 HTTP_ENDL))) { errno = 0; netdata_log_error("Doesn't look like HTTP GET request."); return 1; @@ -449,13 +449,13 @@ int stop_streaming_contexts(const char *msg, size_t msg_len) int cancel_pending_req(const char *msg, size_t msg_len) { - struct aclk_cancel_pending_req cmd; + struct aclk_cancel_pending_req cmd = {.request_id = NULL, .trace_id = NULL}; if(parse_cancel_pending_req(msg, msg_len, &cmd)) { error_report("Error parsing CancelPendingReq"); return 1; } - netdata_log_access("ACLK CancelPendingRequest REQ: %s, cloud trace-id: %s", cmd.request_id, cmd.trace_id); + nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK CancelPendingRequest REQ: %s, cloud trace-id: %s", cmd.request_id, cmd.trace_id); if (mark_pending_req_cancelled(cmd.request_id)) error_report("CancelPending Request for %s failed. No such pending request.", cmd.request_id); diff --git a/aclk/aclk_tx_msgs.c b/aclk/aclk_tx_msgs.c index 26e20dfb2c8a2a..4102c818d39ee1 100644 --- a/aclk/aclk_tx_msgs.c +++ b/aclk/aclk_tx_msgs.c @@ -194,15 +194,16 @@ int aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_ int rc = aclk_send_message_with_bin_payload(client, msg, topic, payload, payload_len); switch (rc) { - case HTTP_RESP_FORBIDDEN: - aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_REQ_REPLY_TOO_BIG, CLOUD_EMSG_REQ_REPLY_TOO_BIG, NULL, 0); - break; - case HTTP_RESP_INTERNAL_SERVER_ERROR: - aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_FAIL_TOPIC, CLOUD_EMSG_FAIL_TOPIC, payload, payload_len); - break; - case HTTP_RESP_BACKEND_FETCH_FAILED: - aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, payload, payload_len); - break; + case HTTP_RESP_FORBIDDEN: + aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_REQ_REPLY_TOO_BIG, CLOUD_EMSG_REQ_REPLY_TOO_BIG, NULL, 0); + break; + case HTTP_RESP_INTERNAL_SERVER_ERROR: + aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_FAIL_TOPIC, CLOUD_EMSG_FAIL_TOPIC, payload, payload_len); + break; + case HTTP_RESP_GATEWAY_TIMEOUT: + case HTTP_RESP_SERVICE_UNAVAILABLE: + aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, payload, payload_len); + break; } return rc ? rc : http_code; } diff --git a/aclk/aclk_util.c b/aclk/aclk_util.c index 00920e0690ee13..3bf2e3f188a2c4 100644 --- a/aclk/aclk_util.c +++ b/aclk/aclk_util.c @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "aclk_util.h" + +#ifdef ENABLE_ACLK + #include "aclk_proxy.h" #include "daemon/common.h" @@ -437,6 +440,7 @@ void aclk_set_proxy(char **ohost, int *port, char **uname, char **pwd, enum mqtt freez(proxy); } +#endif /* ENABLE_ACLK */ #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110 static EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void) diff --git a/aclk/aclk_util.h b/aclk/aclk_util.h index 6b7e4e9c2b3bbb..38ef5b0bcbf1ce 100644 --- a/aclk/aclk_util.h +++ b/aclk/aclk_util.h @@ -3,6 +3,8 @@ #define ACLK_UTIL_H #include "libnetdata/libnetdata.h" + +#ifdef ENABLE_ACLK #include "mqtt_wss_client.h" #define CLOUD_EC_MALFORMED_NODE_ID 1 @@ -112,6 +114,7 @@ unsigned long int aclk_tbeb_delay(int reset, int base, unsigned long int min, un #define aclk_tbeb_reset(x) aclk_tbeb_delay(1, 0, 0, 0) void aclk_set_proxy(char **ohost, int *port, char **uname, char **pwd, enum mqtt_wss_proxy_type *type); +#endif /* ENABLE_ACLK */ int base64_encode_helper(unsigned char *out, int *outl, const unsigned char *in, int in_len); diff --git a/aclk/https_client.c b/aclk/https_client.c index 623082027dc72c..5385786b82875d 100644 --- a/aclk/https_client.c +++ b/aclk/https_client.c @@ -4,20 +4,12 @@ #include "https_client.h" -#include "mqtt_websockets/c-rbuf/include/ringbuffer.h" - #include "aclk_util.h" #include "daemon/global_statistics.h" #define DEFAULT_CHUNKED_RESPONSE_BUFFER_SIZE (4096) -enum http_parse_state { - HTTP_PARSE_INITIAL = 0, - HTTP_PARSE_HEADERS, - HTTP_PARSE_CONTENT -}; - static const char *http_req_type_to_str(http_req_type_t req) { switch (req) { case HTTP_REQ_GET: @@ -33,39 +25,33 @@ static const char *http_req_type_to_str(http_req_type_t req) { #define TRANSFER_ENCODING_CHUNKED (-2) -typedef struct { - enum http_parse_state state; - int content_length; - int http_code; - - // for chunked data only - char *chunked_response; - size_t chunked_response_size; - size_t chunked_response_written; - - enum chunked_content_state { - CHUNKED_CONTENT_CHUNK_SIZE = 0, - CHUNKED_CONTENT_CHUNK_DATA, - CHUNKED_CONTENT_CHUNK_END_CRLF, - CHUNKED_CONTENT_FINAL_CRLF - } chunked_content_state; - - size_t chunk_size; - size_t chunk_got; -} http_parse_ctx; - #define HTTP_PARSE_CTX_INITIALIZER { .state = HTTP_PARSE_INITIAL, .content_length = -1, .http_code = 0 } -static inline void http_parse_ctx_clear(http_parse_ctx *ctx) { +void http_parse_ctx_create(http_parse_ctx *ctx) +{ ctx->state = HTTP_PARSE_INITIAL; ctx->content_length = -1; ctx->http_code = 0; + ctx->headers = c_rhash_new(0); + ctx->flags = HTTP_PARSE_FLAGS_DEFAULT; +} + +void http_parse_ctx_destroy(http_parse_ctx *ctx) +{ + c_rhash_iter_t iter; + const char *key; + + c_rhash_iter_t_initialize(&iter); + while ( !c_rhash_iter_str_keys(ctx->headers, &iter, &key) ) { + void *val; + c_rhash_get_ptr_by_str(ctx->headers, key, &val); + freez(val); + } + + c_rhash_destroy(ctx->headers); } #define POLL_TO_MS 100 -#define NEED_MORE_DATA 0 -#define PARSE_SUCCESS 1 -#define PARSE_ERROR -1 #define HTTP_LINE_TERM "\x0D\x0A" #define RESP_PROTO "HTTP/1.1 " #define HTTP_KEYVAL_SEPARATOR ": " @@ -76,7 +62,7 @@ static int process_http_hdr(http_parse_ctx *parse_ctx, const char *key, const ch { // currently we care only about specific headers // we can skip the rest - if (!strcmp("content-length", key)) { + if (parse_ctx->content_length < 0 && !strcmp("content-length", key)) { if (parse_ctx->content_length == TRANSFER_ENCODING_CHUNKED) { netdata_log_error("Content-length and transfer-encoding: chunked headers are mutually exclusive"); return 1; @@ -85,7 +71,7 @@ static int process_http_hdr(http_parse_ctx *parse_ctx, const char *key, const ch netdata_log_error("Duplicate content-length header"); return 1; } - parse_ctx->content_length = atoi(val); + parse_ctx->content_length = str2u(val); if (parse_ctx->content_length < 0) { netdata_log_error("Invalid content-length %d", parse_ctx->content_length); return 1; @@ -102,9 +88,20 @@ static int process_http_hdr(http_parse_ctx *parse_ctx, const char *key, const ch } return 0; } + char *val_cpy = strdupz(val); + c_rhash_insert_str_ptr(parse_ctx->headers, key, val_cpy); return 0; } +const char *get_http_header_by_name(http_parse_ctx *ctx, const char *name) +{ + const char *ret; + if (c_rhash_get_ptr_by_str(ctx->headers, name, (void**)&ret)) + return NULL; + + return ret; +} + static int parse_http_hdr(rbuf_t buf, http_parse_ctx *parse_ctx) { int idx, idx_end; @@ -169,8 +166,8 @@ static int process_chunked_content(rbuf_t buf, http_parse_ctx *parse_ctx) case CHUNKED_CONTENT_CHUNK_SIZE: if (!rbuf_find_bytes(buf, HTTP_LINE_TERM, strlen(HTTP_LINE_TERM), &idx)) { if (rbuf_bytes_available(buf) >= rbuf_get_capacity(buf)) - return PARSE_ERROR; - return NEED_MORE_DATA; + return HTTP_PARSE_ERROR; + return HTTP_PARSE_NEED_MORE_DATA; } if (idx == 0) { parse_ctx->chunked_content_state = CHUNKED_CONTENT_FINAL_CRLF; @@ -178,7 +175,7 @@ static int process_chunked_content(rbuf_t buf, http_parse_ctx *parse_ctx) } if (idx >= HTTP_HDR_BUFFER_SIZE) { netdata_log_error("Chunk size is too long"); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } char buf_size[HTTP_HDR_BUFFER_SIZE]; rbuf_pop(buf, buf_size, idx); @@ -186,13 +183,13 @@ static int process_chunked_content(rbuf_t buf, http_parse_ctx *parse_ctx) long chunk_size = strtol(buf_size, NULL, 16); if (chunk_size < 0 || chunk_size == LONG_MAX) { netdata_log_error("Chunk size out of range"); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } parse_ctx->chunk_size = chunk_size; if (parse_ctx->chunk_size == 0) { if (errno == EINVAL) { netdata_log_error("Invalid chunk size"); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } parse_ctx->chunked_content_state = CHUNKED_CONTENT_CHUNK_END_CRLF; continue; @@ -204,7 +201,7 @@ static int process_chunked_content(rbuf_t buf, http_parse_ctx *parse_ctx) // fallthrough case CHUNKED_CONTENT_CHUNK_DATA: if (!(bytes_to_copy = rbuf_bytes_available(buf))) - return NEED_MORE_DATA; + return HTTP_PARSE_NEED_MORE_DATA; if (bytes_to_copy > parse_ctx->chunk_size - parse_ctx->chunk_got) bytes_to_copy = parse_ctx->chunk_size - parse_ctx->chunk_got; rbuf_pop(buf, parse_ctx->chunked_response + parse_ctx->chunked_response_written, bytes_to_copy); @@ -217,19 +214,19 @@ static int process_chunked_content(rbuf_t buf, http_parse_ctx *parse_ctx) case CHUNKED_CONTENT_FINAL_CRLF: case CHUNKED_CONTENT_CHUNK_END_CRLF: if (rbuf_bytes_available(buf) < strlen(HTTP_LINE_TERM)) - return NEED_MORE_DATA; + return HTTP_PARSE_NEED_MORE_DATA; char buf_crlf[strlen(HTTP_LINE_TERM)]; rbuf_pop(buf, buf_crlf, strlen(HTTP_LINE_TERM)); if (memcmp(buf_crlf, HTTP_LINE_TERM, strlen(HTTP_LINE_TERM))) { netdata_log_error("CRLF expected"); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } if (parse_ctx->chunked_content_state == CHUNKED_CONTENT_FINAL_CRLF) { if (parse_ctx->chunked_response_size != parse_ctx->chunked_response_written) netdata_log_error("Chunked response size mismatch"); chunked_response_buffer_grow_by(parse_ctx, 1); parse_ctx->chunked_response[parse_ctx->chunked_response_written] = 0; - return PARSE_SUCCESS; + return HTTP_PARSE_SUCCESS; } if (parse_ctx->chunk_size == 0) { parse_ctx->chunked_content_state = CHUNKED_CONTENT_FINAL_CRLF; @@ -241,34 +238,34 @@ static int process_chunked_content(rbuf_t buf, http_parse_ctx *parse_ctx) } while(1); } -static int parse_http_response(rbuf_t buf, http_parse_ctx *parse_ctx) +http_parse_rc parse_http_response(rbuf_t buf, http_parse_ctx *parse_ctx) { int idx; char rc[4]; do { if (parse_ctx->state != HTTP_PARSE_CONTENT && !rbuf_find_bytes(buf, HTTP_LINE_TERM, strlen(HTTP_LINE_TERM), &idx)) - return NEED_MORE_DATA; + return HTTP_PARSE_NEED_MORE_DATA; switch (parse_ctx->state) { case HTTP_PARSE_INITIAL: if (rbuf_memcmp_n(buf, RESP_PROTO, strlen(RESP_PROTO))) { netdata_log_error("Expected response to start with \"%s\"", RESP_PROTO); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } rbuf_bump_tail(buf, strlen(RESP_PROTO)); if (rbuf_pop(buf, rc, 4) != 4) { netdata_log_error("Expected HTTP status code"); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } if (rc[3] != ' ') { netdata_log_error("Expected space after HTTP return code"); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } rc[3] = 0; parse_ctx->http_code = atoi(rc); if (parse_ctx->http_code < 100 || parse_ctx->http_code >= 600) { netdata_log_error("HTTP code not in range 100 to 599"); - return PARSE_ERROR; + return HTTP_PARSE_ERROR; } rbuf_find_bytes(buf, HTTP_LINE_TERM, strlen(HTTP_LINE_TERM), &idx); @@ -284,7 +281,7 @@ static int parse_http_response(rbuf_t buf, http_parse_ctx *parse_ctx) break; } if (parse_http_hdr(buf, parse_ctx)) - return PARSE_ERROR; + return HTTP_PARSE_ERROR; rbuf_find_bytes(buf, HTTP_LINE_TERM, strlen(HTTP_LINE_TERM), &idx); rbuf_bump_tail(buf, idx + strlen(HTTP_LINE_TERM)); break; @@ -294,11 +291,14 @@ static int parse_http_response(rbuf_t buf, http_parse_ctx *parse_ctx) return process_chunked_content(buf, parse_ctx); if (parse_ctx->content_length < 0) - return PARSE_SUCCESS; + return HTTP_PARSE_SUCCESS; + + if (parse_ctx->flags & HTTP_PARSE_FLAG_DONT_WAIT_FOR_CONTENT) + return HTTP_PARSE_SUCCESS; if (rbuf_bytes_available(buf) >= (size_t)parse_ctx->content_length) - return PARSE_SUCCESS; - return NEED_MORE_DATA; + return HTTP_PARSE_SUCCESS; + return HTTP_PARSE_NEED_MORE_DATA; } } while(1); } @@ -486,7 +486,7 @@ static int read_parse_response(https_req_ctx_t *ctx) { } while (ctx->poll_fd.events == 0 && rbuf_bytes_free(ctx->buf_rx) > 0); } while (!(ret = parse_http_response(ctx->buf_rx, &ctx->parse_ctx))); - if (ret != PARSE_SUCCESS) { + if (ret != HTTP_PARSE_SUCCESS) { netdata_log_error("Error parsing HTTP response"); return 1; } @@ -500,7 +500,7 @@ static int handle_http_request(https_req_ctx_t *ctx) { BUFFER *hdr = buffer_create(TX_BUFFER_SIZE, &netdata_buffers_statistics.buffers_aclk); int rc = 0; - http_parse_ctx_clear(&ctx->parse_ctx); + http_parse_ctx_create(&ctx->parse_ctx); // Prepare data to send switch (ctx->request->request_type) { @@ -526,7 +526,7 @@ static int handle_http_request(https_req_ctx_t *ctx) { buffer_strcat(hdr, ctx->request->url); } - buffer_strcat(hdr, " HTTP/1.1\x0D\x0A"); + buffer_strcat(hdr, HTTP_1_1 HTTP_ENDL); //TODO Headers! if (ctx->request->request_type != HTTP_REQ_CONNECT) { @@ -661,12 +661,15 @@ int https_request(https_req_t *request, https_req_response_t *response) { ctx->request = &req; if (handle_http_request(ctx)) { netdata_log_error("Failed to CONNECT with proxy"); + http_parse_ctx_destroy(&ctx->parse_ctx); goto exit_sock; } if (ctx->parse_ctx.http_code != 200) { netdata_log_error("Proxy didn't return 200 OK (got %d)", ctx->parse_ctx.http_code); + http_parse_ctx_destroy(&ctx->parse_ctx); goto exit_sock; } + http_parse_ctx_destroy(&ctx->parse_ctx); netdata_log_info("Proxy accepted CONNECT upgrade"); } ctx->request = request; @@ -713,8 +716,10 @@ int https_request(https_req_t *request, https_req_response_t *response) { // The actual request here if (handle_http_request(ctx)) { netdata_log_error("Couldn't process request"); + http_parse_ctx_destroy(&ctx->parse_ctx); goto exit_SSL; } + http_parse_ctx_destroy(&ctx->parse_ctx); response->http_code = ctx->parse_ctx.http_code; if (ctx->parse_ctx.content_length == TRANSFER_ENCODING_CHUNKED) { response->payload_size = ctx->parse_ctx.chunked_response_size; diff --git a/aclk/https_client.h b/aclk/https_client.h index daf4766f87a15a..0b97fbb0253188 100644 --- a/aclk/https_client.h +++ b/aclk/https_client.h @@ -5,6 +5,9 @@ #include "libnetdata/libnetdata.h" +#include "mqtt_websockets/c-rbuf/include/ringbuffer.h" +#include "mqtt_websockets/c_rhash/include/c_rhash.h" + typedef enum http_req_type { HTTP_REQ_GET = 0, HTTP_REQ_POST, @@ -77,4 +80,56 @@ void https_req_response_init(https_req_response_t *res); int https_request(https_req_t *request, https_req_response_t *response); +// we expose previously internal parser as this is usefull also from +// other parts of the code +enum http_parse_state { + HTTP_PARSE_INITIAL = 0, + HTTP_PARSE_HEADERS, + HTTP_PARSE_CONTENT +}; + +typedef uint32_t parse_ctx_flags_t; + +#define HTTP_PARSE_FLAG_DONT_WAIT_FOR_CONTENT ((parse_ctx_flags_t)0x01) + +#define HTTP_PARSE_FLAGS_DEFAULT ((parse_ctx_flags_t)0) + +typedef struct { + parse_ctx_flags_t flags; + + enum http_parse_state state; + int content_length; + int http_code; + + c_rhash headers; + + // for chunked data only + char *chunked_response; + size_t chunked_response_size; + size_t chunked_response_written; + + enum chunked_content_state { + CHUNKED_CONTENT_CHUNK_SIZE = 0, + CHUNKED_CONTENT_CHUNK_DATA, + CHUNKED_CONTENT_CHUNK_END_CRLF, + CHUNKED_CONTENT_FINAL_CRLF + } chunked_content_state; + + size_t chunk_size; + size_t chunk_got; +} http_parse_ctx; + +void http_parse_ctx_create(http_parse_ctx *ctx); +void http_parse_ctx_destroy(http_parse_ctx *ctx); + +typedef enum { + HTTP_PARSE_ERROR = -1, + HTTP_PARSE_NEED_MORE_DATA = 0, + HTTP_PARSE_SUCCESS = 1 +} http_parse_rc; + +http_parse_rc parse_http_response(rbuf_t buf, http_parse_ctx *parse_ctx); + +const char *get_http_header_by_name(http_parse_ctx *ctx, const char *name); + #endif /* NETDATA_HTTPS_CLIENT_H */ diff --git a/aclk/schema-wrappers/alarm_config.cc b/aclk/schema-wrappers/alarm_config.cc index fe0b0517cf6ad2..64d28f3242323d 100644 --- a/aclk/schema-wrappers/alarm_config.cc +++ b/aclk/schema-wrappers/alarm_config.cc @@ -15,28 +15,22 @@ void destroy_aclk_alarm_configuration(struct aclk_alarm_configuration *cfg) freez(cfg->alarm); freez(cfg->tmpl); freez(cfg->on_chart); - freez(cfg->classification); freez(cfg->type); freez(cfg->component); - freez(cfg->os); freez(cfg->hosts); freez(cfg->plugin); freez(cfg->module); freez(cfg->charts); - freez(cfg->families); freez(cfg->lookup); freez(cfg->every); freez(cfg->units); - freez(cfg->green); freez(cfg->red); - freez(cfg->calculation_expr); freez(cfg->warning_expr); freez(cfg->critical_expr); - freez(cfg->recipient); freez(cfg->exec); freez(cfg->delay); @@ -44,12 +38,11 @@ void destroy_aclk_alarm_configuration(struct aclk_alarm_configuration *cfg) freez(cfg->info); freez(cfg->options); freez(cfg->host_labels); - freez(cfg->p_db_lookup_dimensions); freez(cfg->p_db_lookup_method); freez(cfg->p_db_lookup_options); - freez(cfg->chart_labels); + freez(cfg->summary); } char *generate_provide_alarm_configuration(size_t *len, struct provide_alarm_configuration *data) @@ -65,14 +58,12 @@ char *generate_provide_alarm_configuration(size_t *len, struct provide_alarm_con cfg->set_template_(data->cfg.tmpl); if(data->cfg.on_chart) cfg->set_on_chart(data->cfg.on_chart); - if (data->cfg.classification) cfg->set_classification(data->cfg.classification); if (data->cfg.type) cfg->set_type(data->cfg.type); if (data->cfg.component) cfg->set_component(data->cfg.component); - if (data->cfg.os) cfg->set_os(data->cfg.os); if (data->cfg.hosts) @@ -83,27 +74,22 @@ char *generate_provide_alarm_configuration(size_t *len, struct provide_alarm_con cfg->set_module(data->cfg.module); if(data->cfg.charts) cfg->set_charts(data->cfg.charts); - if(data->cfg.families) - cfg->set_families(data->cfg.families); if(data->cfg.lookup) cfg->set_lookup(data->cfg.lookup); if(data->cfg.every) cfg->set_every(data->cfg.every); if(data->cfg.units) cfg->set_units(data->cfg.units); - if (data->cfg.green) cfg->set_green(data->cfg.green); if (data->cfg.red) cfg->set_red(data->cfg.red); - if (data->cfg.calculation_expr) cfg->set_calculation_expr(data->cfg.calculation_expr); if (data->cfg.warning_expr) cfg->set_warning_expr(data->cfg.warning_expr); if (data->cfg.critical_expr) cfg->set_critical_expr(data->cfg.critical_expr); - if (data->cfg.recipient) cfg->set_recipient(data->cfg.recipient); if (data->cfg.exec) @@ -131,6 +117,8 @@ char *generate_provide_alarm_configuration(size_t *len, struct provide_alarm_con if (data->cfg.chart_labels) cfg->set_chart_labels(data->cfg.chart_labels); + if (data->cfg.summary) + cfg->set_summary(data->cfg.summary); *len = PROTO_COMPAT_MSG_SIZE(msg); char *bin = (char*)mallocz(*len); diff --git a/aclk/schema-wrappers/alarm_config.h b/aclk/schema-wrappers/alarm_config.h index 4eaa4fd708d989..3c9a5d9a89cb74 100644 --- a/aclk/schema-wrappers/alarm_config.h +++ b/aclk/schema-wrappers/alarm_config.h @@ -24,7 +24,6 @@ struct aclk_alarm_configuration { char *plugin; char *module; char *charts; - char *families; char *lookup; char *every; char *units; @@ -52,6 +51,7 @@ struct aclk_alarm_configuration { int32_t p_update_every; char *chart_labels; + char *summary; }; void destroy_aclk_alarm_configuration(struct aclk_alarm_configuration *cfg); diff --git a/aclk/schema-wrappers/alarm_stream.cc b/aclk/schema-wrappers/alarm_stream.cc index 1538bc9e09420c..29d80e39eb24c2 100644 --- a/aclk/schema-wrappers/alarm_stream.cc +++ b/aclk/schema-wrappers/alarm_stream.cc @@ -66,60 +66,41 @@ static alarms::v1::AlarmStatus aclk_alarm_status_to_proto(enum aclk_alarm_status void destroy_alarm_log_entry(struct alarm_log_entry *entry) { - //freez(entry->node_id); - //freez(entry->claim_id); - freez(entry->chart); freez(entry->name); - freez(entry->family); - freez(entry->config_hash); - freez(entry->timezone); - freez(entry->exec_path); freez(entry->conf_source); freez(entry->command); - freez(entry->value_string); freez(entry->old_value_string); - freez(entry->rendered_info); freez(entry->chart_context); freez(entry->transition_id); freez(entry->chart_name); + freez(entry->summary); } static void fill_alarm_log_entry(struct alarm_log_entry *data, AlarmLogEntry *proto) { proto->set_node_id(data->node_id); proto->set_claim_id(data->claim_id); - proto->set_chart(data->chart); proto->set_name(data->name); - if (data->family) - proto->set_family(data->family); - proto->set_when(data->when); - proto->set_config_hash(data->config_hash); - proto->set_utc_offset(data->utc_offset); proto->set_timezone(data->timezone); - proto->set_exec_path(data->exec_path); proto->set_conf_source(data->conf_source); proto->set_command(data->command); - proto->set_duration(data->duration); proto->set_non_clear_duration(data->non_clear_duration); - - proto->set_status(aclk_alarm_status_to_proto(data->status)); proto->set_old_status(aclk_alarm_status_to_proto(data->old_status)); proto->set_delay(data->delay); proto->set_delay_up_to_timestamp(data->delay_up_to_timestamp); - proto->set_last_repeat(data->last_repeat); proto->set_silenced(data->silenced); @@ -136,6 +117,7 @@ static void fill_alarm_log_entry(struct alarm_log_entry *data, AlarmLogEntry *pr proto->set_event_id(data->event_id); proto->set_transition_id(data->transition_id); proto->set_chart_name(data->chart_name); + proto->set_summary(data->summary); } char *generate_alarm_log_entry(size_t *len, struct alarm_log_entry *data) diff --git a/aclk/schema-wrappers/alarm_stream.h b/aclk/schema-wrappers/alarm_stream.h index 87893e0db6767f..3c81ff4452a433 100644 --- a/aclk/schema-wrappers/alarm_stream.h +++ b/aclk/schema-wrappers/alarm_stream.h @@ -76,7 +76,8 @@ struct alarm_log_entry { char *chart_name; uint64_t event_id; - char *transition_id; + char *transition_id; + char *summary; }; struct send_alarm_checkpoint { diff --git a/aclk/schema-wrappers/node_info.h b/aclk/schema-wrappers/node_info.h index de4ade78aaaa4d..4f57601dff5190 100644 --- a/aclk/schema-wrappers/node_info.h +++ b/aclk/schema-wrappers/node_info.h @@ -39,7 +39,7 @@ struct aclk_node_info { const char *custom_info; const char *machine_guid; - DICTIONARY *host_labels_ptr; + RRDLABELS *host_labels_ptr; struct machine_learning_info ml_info; }; diff --git a/aclk/schema-wrappers/schema_wrapper_utils.cc b/aclk/schema-wrappers/schema_wrapper_utils.cc index 6573e629970f09..96a4b9bf13d002 100644 --- a/aclk/schema-wrappers/schema_wrapper_utils.cc +++ b/aclk/schema-wrappers/schema_wrapper_utils.cc @@ -14,8 +14,8 @@ void set_timeval_from_google_timestamp(const google::protobuf::Timestamp &ts, st tv->tv_usec = ts.nanos()/1000; } -int label_add_to_map_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { - (void)ls; +int label_add_to_map_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) +{ auto map = (google::protobuf::Map *)data; map->insert({name, value}); return 1; diff --git a/build_external/clean-install-arch-debug.Dockerfile b/build_external/clean-install-arch-debug.Dockerfile index 5a67bfbc39c973..a3080ea69375d1 100644 --- a/build_external/clean-install-arch-debug.Dockerfile +++ b/build_external/clean-install-arch-debug.Dockerfile @@ -48,9 +48,10 @@ RUN find . -type f >/opt/netdata/manifest RUN CFLAGS="-Og -g -ggdb -Wall -Wextra -Wformat-signedness -DNETDATA_INTERNAL_CHECKS=1\ -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --require-cloud --disable-lto -RUN ln -sf /dev/stdout /var/log/netdata/access.log -RUN ln -sf /dev/stdout /var/log/netdata/debug.log -RUN ln -sf /dev/stderr /var/log/netdata/error.log +RUN ln -sf /dev/stdout /var/log/netdata/access.log && \ + ln -sf /dev/stdout /var/log/netdata/debug.log && \ + ln -sf /dev/stderr /var/log/netdata/error.log && \ + ln -sf /dev/stdout /var/log/netdata/fluentbit.log RUN printf >/opt/netdata/source/gdb_batch '\ set args -D \n\ diff --git a/build_external/clean-install-arch-extras.Dockerfile b/build_external/clean-install-arch-extras.Dockerfile index 8c6f4fbaa21af6..8b18057e9934ce 100644 --- a/build_external/clean-install-arch-extras.Dockerfile +++ b/build_external/clean-install-arch-extras.Dockerfile @@ -48,9 +48,10 @@ RUN find . -type f >/opt/netdata/manifest RUN CFLAGS="-Og -g -ggdb -Wall -Wextra -Wformat-signedness -DNETDATA_INTERNAL_CHECKS=1\ -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --require-cloud --disable-lto -RUN ln -sf /dev/stdout /var/log/netdata/access.log -RUN ln -sf /dev/stdout /var/log/netdata/debug.log -RUN ln -sf /dev/stderr /var/log/netdata/error.log +RUN ln -sf /dev/stdout /var/log/netdata/access.log && \ + ln -sf /dev/stdout /var/log/netdata/debug.log && \ + ln -sf /dev/stderr /var/log/netdata/error.log && \ + ln -sf /dev/stdout /var/log/netdata/fluentbit.log RUN rm /var/lib/netdata/registry/netdata.public.unique.id diff --git a/build_external/clean-install-arch.Dockerfile b/build_external/clean-install-arch.Dockerfile index d4d0d47061b8d1..b3c61fa15a5219 100644 --- a/build_external/clean-install-arch.Dockerfile +++ b/build_external/clean-install-arch.Dockerfile @@ -47,8 +47,9 @@ RUN find . -type f >/opt/netdata/manifest RUN CFLAGS="-O1 -ggdb -Wall -Wextra -Wformat-signedness -DNETDATA_INTERNAL_CHECKS=1\ -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --disable-lto -RUN ln -sf /dev/stdout /var/log/netdata/access.log -RUN ln -sf /dev/stdout /var/log/netdata/debug.log -RUN ln -sf /dev/stderr /var/log/netdata/error.log +RUN ln -sf /dev/stdout /var/log/netdata/access.log && \ + ln -sf /dev/stdout /var/log/netdata/debug.log && \ + ln -sf /dev/stderr /var/log/netdata/error.log && \ + ln -sf /dev/stdout /var/log/netdata/fluentbit.log CMD ["/usr/sbin/netdata", "-D"] diff --git a/build_external/clean-install.Dockerfile b/build_external/clean-install.Dockerfile index bf63a5599e8444..0ee154e3007965 100644 --- a/build_external/clean-install.Dockerfile +++ b/build_external/clean-install.Dockerfile @@ -29,9 +29,10 @@ RUN find . -type f >/opt/netdata/manifest RUN CFLAGS="-O1 -ggdb -Wall -Wextra -Wformat-signedness -DNETDATA_INTERNAL_CHECKS=1\ -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --disable-lto -RUN ln -sf /dev/stdout /var/log/netdata/access.log -RUN ln -sf /dev/stdout /var/log/netdata/debug.log -RUN ln -sf /dev/stderr /var/log/netdata/error.log +RUN ln -sf /dev/stdout /var/log/netdata/access.log && \ + ln -sf /dev/stdout /var/log/netdata/debug.log && \ + ln -sf /dev/stderr /var/log/netdata/error.log && \ + ln -sf /dev/stdout /var/log/netdata/fluentbit.log RUN rm /var/lib/netdata/registry/netdata.public.unique.id diff --git a/build_external/scenarios/children-to-localhost/child_netdata.conf b/build_external/scenarios/children-to-localhost/child_netdata.conf new file mode 100644 index 00000000000000..1f8b0a1d6717e3 --- /dev/null +++ b/build_external/scenarios/children-to-localhost/child_netdata.conf @@ -0,0 +1,2 @@ +[db] + mode = dbengine diff --git a/build_external/scenarios/children-to-localhost/child_stream.conf b/build_external/scenarios/children-to-localhost/child_stream.conf index 72a353fe05ea87..a8ed306bb6b627 100644 --- a/build_external/scenarios/children-to-localhost/child_stream.conf +++ b/build_external/scenarios/children-to-localhost/child_stream.conf @@ -1,7 +1,7 @@ [stream] enabled = yes destination = tcp:host.docker.internal - api key = 00000000-0000-0000-0000-000000000000 + api key = 11111111-2222-3333-4444-555555555555 timeout seconds = 60 default port = 19999 send charts matching = * diff --git a/build_external/scenarios/children-to-localhost/docker-compose.yml b/build_external/scenarios/children-to-localhost/docker-compose.yml index 59739f9eba9ff9..dea56fe5268952 100644 --- a/build_external/scenarios/children-to-localhost/docker-compose.yml +++ b/build_external/scenarios/children-to-localhost/docker-compose.yml @@ -5,5 +5,6 @@ services: command: /usr/sbin/netdata -D volumes: - ./child_stream.conf:/etc/netdata/stream.conf:ro + - ./child_netdata.conf:/etc/netdata/netdata.conf:ro extra_hosts: - "host.docker.internal:host-gateway" diff --git a/build_external/scenarios/children-to-localhost/parent_stream.conf b/build_external/scenarios/children-to-localhost/parent_stream.conf index bf85ae258ad832..36a416b912cf21 100644 --- a/build_external/scenarios/children-to-localhost/parent_stream.conf +++ b/build_external/scenarios/children-to-localhost/parent_stream.conf @@ -1,4 +1,4 @@ -[00000000-0000-0000-0000-000000000000] +[11111111-2222-3333-4444-555555555555] enabled = yes allow from = * default history = 3600 diff --git a/claim/claim.c b/claim/claim.c index d81440d2a1b350..774b65ebb17235 100644 --- a/claim/claim.c +++ b/claim/claim.c @@ -47,7 +47,7 @@ char *get_agent_claimid() extern struct registry registry; /* rrd_init() and post_conf_load() must have been called before this function */ -CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, const char **msg) +CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, const char **msg __maybe_unused) { if (!force || !netdata_cloud_enabled) { netdata_log_error("Refusing to claim agent -> cloud functionality has been disabled"); @@ -323,11 +323,11 @@ static bool check_claim_param(const char *s) { } void claim_reload_all(void) { - error_log_limit_unlimited(); + nd_log_limits_unlimited(); load_claiming_state(); registry_update_cloud_base_url(); rrdpush_send_claimed_id(localhost); - error_log_limit_reset(); + nd_log_limits_reset(); } int api_v2_claim(struct web_client *w, char *url) { diff --git a/claim/netdata-claim.sh.in b/claim/netdata-claim.sh.in index cc6c1093220d45..a3db591cdee2ba 100755 --- a/claim/netdata-claim.sh.in +++ b/claim/netdata-claim.sh.in @@ -175,7 +175,7 @@ elif [ -f "${MACHINE_GUID_FILE}" ]; then echo >&2 "netdata.public.unique.id is not readable. Please make sure you have rights to read it (Filename: ${MACHINE_GUID_FILE})." exit 18 else - if mkdir -p "${MACHINE_GUID_FILE%/*}" && /bin/echo -n "$(gen_id)" > "${MACHINE_GUID_FILE}"; then + if mkdir -p "${MACHINE_GUID_FILE%/*}" && echo -n "$(gen_id)" > "${MACHINE_GUID_FILE}"; then ID="$(cat "${MACHINE_GUID_FILE}")" MGUID=$ID else diff --git a/cli/cli.c b/cli/cli.c index 288173b1e571d7..2efa518e63bf02 100644 --- a/cli/cli.c +++ b/cli/cli.c @@ -3,25 +3,18 @@ #include "cli.h" #include "daemon/pipename.h" -void error_int(int is_collector __maybe_unused, const char *prefix __maybe_unused, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) { - FILE *fp = stderr; - +void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) { va_list args; - va_start( args, fmt ); - vfprintf(fp, fmt, args ); - va_end( args ); + va_start(args, fmt); + vfprintf(stderr, fmt, args ); + va_end(args); } #ifdef NETDATA_INTERNAL_CHECKS uint64_t debug_flags; -void debug_int( const char *file __maybe_unused , const char *function __maybe_unused , const unsigned long line __maybe_unused, const char *fmt __maybe_unused, ... ) -{ - -} - -void fatal_int( const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt __maybe_unused, ... ) +void netdata_logger_fatal( const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt __maybe_unused, ... ) { abort(); }; diff --git a/collectors/COLLECTORS.md b/collectors/COLLECTORS.md index aa56ac7024b392..9a349959346b62 100644 --- a/collectors/COLLECTORS.md +++ b/collectors/COLLECTORS.md @@ -41,641 +41,1154 @@ If you don't see the app/service you'd like to monitor in this list: in [Go](https://github.com/netdata/go.d.plugin/blob/master/README.md#how-to-develop-a-collector) or [Python](https://github.com/netdata/netdata/blob/master/docs/guides/python-collector.md) -## Available Collectors - -- [Monitor anything with Netdata](#monitor-anything-with-netdata) - - [Add your application to Netdata](#add-your-application-to-netdata) - - [Available Collectors](#available-collectors) - - [Service and application collectors](#service-and-application-collectors) - - [Generic](#generic) - - [APM (application performance monitoring)](#apm-application-performance-monitoring) - - [Containers and VMs](#containers-and-vms) - - [Data stores](#data-stores) - - [Distributed computing](#distributed-computing) - - [Email](#email) - - [Kubernetes](#kubernetes) - - [Logs](#logs) - - [Messaging](#messaging) - - [Network](#network) - - [Provisioning](#provisioning) - - [Remote devices](#remote-devices) - - [Search](#search) - - [Storage](#storage) - - [Web](#web) - - [System collectors](#system-collectors) - - [Applications](#applications) - - [Disks and filesystems](#disks-and-filesystems) - - [eBPF](#ebpf) - - [Hardware](#hardware) - - [Memory](#memory) - - [Networks](#networks) - - [Operating systems](#operating-systems) - - [Processes](#processes) - - [Resources](#resources) - - [Users](#users) - - [Netdata collectors](#netdata-collectors) - - [Orchestrators](#orchestrators) - - [Third-party collectors](#third-party-collectors) - - [Etc](#etc) - -## Service and application collectors - -The Netdata Agent auto-detects and collects metrics from all of the services and applications below. You can also -configure any of these collectors according to your setup and infrastructure. - -### Generic - -- [Prometheus endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/README.md): Gathers - metrics from any number of Prometheus endpoints, with support to autodetect more than 600 services and applications. -- [Pandas](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/pandas/README.md): A Python - collector that gathers - metrics from a [pandas](https://pandas.pydata.org/) dataframe. Pandas is a high level data processing library in - Python that can read various formats of data from local files or web endpoints. Custom processing and transformation - logic can also be expressed as part of the collector configuration. - -### APM (application performance monitoring) - -- [Go applications](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/go_expvar/README.md): - Monitor any Go application that exposes its - metrics with the `expvar` package from the Go standard library. -- [Java Spring Boot 2 applications](https://github.com/netdata/go.d.plugin/blob/master/modules/springboot2/README.md): - Monitor running Java Spring Boot 2 applications that expose their metrics with the use of the Spring Boot Actuator. -- [statsd](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md): Implement a high - performance `statsd` server for Netdata. -- [phpDaemon](https://github.com/netdata/go.d.plugin/blob/master/modules/phpdaemon/README.md): Collect worker - statistics (total, active, idle), and uptime for web and network applications. -- [uWSGI](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/uwsgi/README.md): Monitor - performance metrics exposed by the uWSGI Stats - Server. +## Available Data Collection Integrations + +### APM + +- [Alamos FE2 server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/alamos_fe2_server.md) + +- [Apache Airflow](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apache_airflow.md) + +- [Apache Flink](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apache_flink.md) + +- [Audisto](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/audisto.md) + +- [Dependency-Track](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dependency-track.md) + +- [Go applications (EXPVAR)](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md) + +- [Google Pagespeed](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/google_pagespeed.md) + +- [IBM AIX systems Njmon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_aix_systems_njmon.md) + +- [JMX](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jmx.md) + +- [Java Spring-boot 2 applications](https://github.com/netdata/go.d.plugin/blob/master/modules/springboot2/integrations/java_spring-boot_2_applications.md) + +- [NRPE daemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nrpe_daemon.md) + +- [Sentry](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sentry.md) + +- [Sysload](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sysload.md) + +- [VSCode](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/vscode.md) + +- [YOURLS URL Shortener](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/yourls_url_shortener.md) + +- [bpftrace variables](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bpftrace_variables.md) + +- [gpsd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gpsd.md) + +- [jolokia](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jolokia.md) + +- [phpDaemon](https://github.com/netdata/go.d.plugin/blob/master/modules/phpdaemon/integrations/phpdaemon.md) + +### Authentication and Authorization + +- [Fail2ban](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md) + +- [FreeRADIUS](https://github.com/netdata/go.d.plugin/blob/master/modules/freeradius/integrations/freeradius.md) + +- [HashiCorp Vault secrets](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hashicorp_vault_secrets.md) + +- [LDAP](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ldap.md) + +- [OpenLDAP (community)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openldap_community.md) + +- [OpenLDAP](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/openldap/integrations/openldap.md) + +- [RADIUS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/radius.md) + +- [SSH](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ssh.md) + +- [TACACS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tacacs.md) + +### Blockchain Servers + +- [Chia](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/chia.md) + +- [Crypto exchanges](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/crypto_exchanges.md) + +- [Cryptowatch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cryptowatch.md) + +- [Energi Core Wallet](https://github.com/netdata/go.d.plugin/blob/master/modules/energid/integrations/energi_core_wallet.md) + +- [Go-ethereum](https://github.com/netdata/go.d.plugin/blob/master/modules/geth/integrations/go-ethereum.md) + +- [Helium miner (validator)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/helium_miner_validator.md) + +- [IOTA full node](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/iota_full_node.md) + +- [Sia](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sia.md) + +### CICD Platforms + +- [Concourse](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/concourse.md) + +- [GitLab Runner](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gitlab_runner.md) + +- [Jenkins](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jenkins.md) + +- [Puppet](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/puppet/integrations/puppet.md) + +### Cloud Provider Managed + +- [AWS EC2 Compute instances](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_ec2_compute_instances.md) + +- [AWS EC2 Spot Instance](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_ec2_spot_instance.md) + +- [AWS ECS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_ecs.md) + +- [AWS Health events](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_health_events.md) + +- [AWS Quota](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_quota.md) + +- [AWS S3 buckets](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_s3_buckets.md) + +- [AWS SQS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_sqs.md) + +- [AWS instance health](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_instance_health.md) + +- [Akamai Global Traffic Management](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/akamai_global_traffic_management.md) + +- [Akami Cloudmonitor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/akami_cloudmonitor.md) + +- [Alibaba Cloud](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/alibaba_cloud.md) + +- [ArvanCloud CDN](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/arvancloud_cdn.md) + +- [Azure AD App passwords](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_ad_app_passwords.md) + +- [Azure Elastic Pool SQL](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_elastic_pool_sql.md) + +- [Azure Resources](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_resources.md) + +- [Azure SQL](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_sql.md) + +- [Azure Service Bus](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_service_bus.md) + +- [Azure application](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_application.md) + +- [BigQuery](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bigquery.md) + +- [CloudWatch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloudwatch.md) + +- [Dell EMC ECS cluster](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_emc_ecs_cluster.md) + +- [DigitalOcean](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/digitalocean.md) + +- [GCP GCE](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gcp_gce.md) + +- [GCP Quota](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gcp_quota.md) + +- [Google Cloud Platform](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/google_cloud_platform.md) + +- [Google Stackdriver](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/google_stackdriver.md) + +- [Linode](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/linode.md) + +- [Lustre metadata](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/lustre_metadata.md) + +- [Nextcloud servers](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nextcloud_servers.md) + +- [OpenStack](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openstack.md) + +- [Zerto](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/zerto.md) ### Containers and VMs -- [Docker containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md): Monitor the - health and performance of individual Docker containers using the cgroups collector plugin. -- [DockerD](https://github.com/netdata/go.d.plugin/blob/master/modules/docker/README.md): Collect container health - statistics. -- [Docker Engine](https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/README.md): Collect - runtime statistics from the `docker` daemon using the `metrics-address` feature. -- [Docker Hub](https://github.com/netdata/go.d.plugin/blob/master/modules/dockerhub/README.md): Collect statistics - about Docker repositories, such as pulls, starts, status, time since last update, and more. -- [Libvirt](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md): Monitor the health and - performance of individual Libvirt containers - using the cgroups collector plugin. -- [LXC](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md): Monitor the health and - performance of individual LXC containers using - the cgroups collector plugin. -- [LXD](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md): Monitor the health and - performance of individual LXD containers using - the cgroups collector plugin. -- [systemd-nspawn](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md): Monitor the - health and performance of individual - systemd-nspawn containers using the cgroups collector plugin. -- [vCenter Server Appliance](https://github.com/netdata/go.d.plugin/blob/master/modules/vcsa/README.md): Monitor - appliance system, components, and software update health statuses via the Health API. -- [vSphere](https://github.com/netdata/go.d.plugin/blob/master/modules/vsphere/README.md): Collect host and virtual - machine performance metrics. -- [Xen/XCP-ng](https://github.com/netdata/netdata/blob/master/collectors/xenstat.plugin/README.md): Collect XenServer - and XCP-ng metrics using `libxenstat`. - -### Data stores - -- [CockroachDB](https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/README.md): Monitor various - database components using `_status/vars` endpoint. -- [Consul](https://github.com/netdata/go.d.plugin/blob/master/modules/consul/README.md): Capture service and unbound - checks status (passing, warning, critical, maintenance). -- [Couchbase](https://github.com/netdata/go.d.plugin/blob/master/modules/couchbase/README.md): Gather per-bucket - metrics from any number of instances of the distributed JSON document database. -- [CouchDB](https://github.com/netdata/go.d.plugin/blob/master/modules/couchdb/README.md): Monitor database health and - performance metrics - (reads/writes, HTTP traffic, replication status, etc). -- [MongoDB](https://github.com/netdata/go.d.plugin/blob/master/modules/mongodb/README.md): Collect server, database, - replication and sharding performance and health metrics. -- [MySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md): Collect database global, - replication and per user statistics. -- [OracleDB](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/oracledb/README.md): Monitor - database performance and health metrics. -- [Pika](https://github.com/netdata/go.d.plugin/blob/master/modules/pika/README.md): Gather metric, such as clients, - memory usage, queries, and more from the Redis interface-compatible database. -- [Postgres](https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/README.md): Collect database health - and performance metrics. -- [ProxySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/proxysql/README.md): Monitor database backend - and frontend performance metrics. -- [Redis](https://github.com/netdata/go.d.plugin/blob/master/modules/redis/README.md): Monitor status from any - number of database instances by reading the server's response to the `INFO ALL` command. -- [RethinkDB](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/rethinkdbs/README.md): Collect - database server and cluster statistics. -- [Riak KV](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/riakkv/README.md): Collect - database stats from the `/stats` endpoint. -- [Zookeeper](https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/README.md): Monitor application - health metrics reading the server's response to the `mntr` command. -- [Memcached](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/memcached/README.md): Collect - memory-caching system performance metrics. - -### Distributed computing - -- [BOINC](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/boinc/README.md): Monitor the total - number of tasks, open tasks, and task - states for the distributed computing client. -- [Gearman](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/gearman/README.md): Collect - application summary (queued, running) and per-job - worker statistics (queued, idle, running). - -### Email - -- [Dovecot](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/dovecot/README.md): Collect email - server performance metrics by reading the - server's response to the `EXPORT global` command. -- [EXIM](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/exim/README.md): Uses the `exim` tool - to monitor the queue length of a - mail/message transfer agent (MTA). -- [Postfix](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/postfix/README.md): Uses - the `postqueue` tool to monitor the queue length of a - mail/message transfer agent (MTA). +- [Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/containers.md) -### Kubernetes +- [Docker Engine](https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/integrations/docker_engine.md) + +- [Docker Hub repository](https://github.com/netdata/go.d.plugin/blob/master/modules/dockerhub/integrations/docker_hub_repository.md) + +- [Docker](https://github.com/netdata/go.d.plugin/blob/master/modules/docker/integrations/docker.md) + +- [LXC Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/lxc_containers.md) + +- [Libvirt Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/libvirt_containers.md) + +- [NSX-T](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nsx-t.md) + +- [Podman](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/podman.md) + +- [Proxmox Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/proxmox_containers.md) + +- [Proxmox VE](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/proxmox_ve.md) + +- [VMware vCenter Server](https://github.com/netdata/go.d.plugin/blob/master/modules/vsphere/integrations/vmware_vcenter_server.md) + +- [Virtual Machines](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/virtual_machines.md) + +- [Xen XCP-ng](https://github.com/netdata/netdata/blob/master/collectors/xenstat.plugin/integrations/xen_xcp-ng.md) + +- [cAdvisor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cadvisor.md) + +- [oVirt Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/ovirt_containers.md) + +- [vCenter Server Appliance](https://github.com/netdata/go.d.plugin/blob/master/modules/vcsa/integrations/vcenter_server_appliance.md) + +### Databases + +- [4D Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/4d_server.md) + +- [AWS RDS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_rds.md) + +- [Cassandra](https://github.com/netdata/go.d.plugin/blob/master/modules/cassandra/integrations/cassandra.md) + +- [ClickHouse](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clickhouse.md) + +- [ClusterControl CMON](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clustercontrol_cmon.md) + +- [CockroachDB](https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/integrations/cockroachdb.md) + +- [CouchDB](https://github.com/netdata/go.d.plugin/blob/master/modules/couchdb/integrations/couchdb.md) + +- [Couchbase](https://github.com/netdata/go.d.plugin/blob/master/modules/couchbase/integrations/couchbase.md) + +- [HANA](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hana.md) + +- [Hasura GraphQL Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hasura_graphql_server.md) + +- [InfluxDB](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/influxdb.md) + +- [Machbase](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/machbase.md) + +- [MariaDB](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/integrations/mariadb.md) + +- [Memcached (community)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/memcached_community.md) + +- [Memcached](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/memcached/integrations/memcached.md) + +- [MongoDB](https://github.com/netdata/go.d.plugin/blob/master/modules/mongodb/integrations/mongodb.md) + +- [MySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/integrations/mysql.md) + +- [ODBC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/odbc.md) + +- [Oracle DB (community)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/oracle_db_community.md) + +- [Oracle DB](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/oracledb/integrations/oracle_db.md) + +- [Patroni](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/patroni.md) + +- [Percona MySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/integrations/percona_mysql.md) + +- [PgBouncer](https://github.com/netdata/go.d.plugin/blob/master/modules/pgbouncer/integrations/pgbouncer.md) + +- [Pgpool-II](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pgpool-ii.md) + +- [Pika](https://github.com/netdata/go.d.plugin/blob/master/modules/pika/integrations/pika.md) + +- [PostgreSQL](https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/integrations/postgresql.md) + +- [ProxySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/proxysql/integrations/proxysql.md) + +- [Redis](https://github.com/netdata/go.d.plugin/blob/master/modules/redis/integrations/redis.md) + +- [RethinkDB](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md) + +- [RiakKV](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/riakkv/integrations/riakkv.md) + +- [SQL Database agnostic](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sql_database_agnostic.md) + +- [Vertica](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/vertica.md) + +- [Warp10](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/warp10.md) + +- [pgBackRest](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pgbackrest.md) + +### Distributed Computing Systems + +- [BOINC](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/boinc/integrations/boinc.md) + +- [Gearman](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/gearman/integrations/gearman.md) -- [Kubelet](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/README.md): Monitor one or more - instances of the Kubelet agent and collects metrics on number of pods/containers running, volume of Docker - operations, and more. -- [kube-proxy](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/README.md): Collect - metrics, such as syncing proxy rules and REST client requests, from one or more instances of `kube-proxy`. -- [Service discovery](https://github.com/netdata/agent-service-discovery/blob/master/README.md): Find what services are running on a - cluster's pods, converts that into configuration files, and exports them so they can be monitored by Netdata. - -### Logs - -- [Fluentd](https://github.com/netdata/go.d.plugin/blob/master/modules/fluentd/README.md): Gather application - plugins metrics from an endpoint provided by `in_monitor plugin`. -- [Logstash](https://github.com/netdata/go.d.plugin/blob/master/modules/logstash/README.md): Monitor JVM threads, - memory usage, garbage collection statistics, and more. -- [OpenVPN status logs](https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn_status_log/README.md): Parse - server log files and provide summary (client, traffic) metrics. -- [Squid web server logs](https://github.com/netdata/go.d.plugin/blob/master/modules/squidlog/README.md): Tail Squid - access logs to return the volume of requests, types of requests, bandwidth, and much more. -- [Web server logs (Go version for Apache, NGINX)](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md): Tail access logs and provide - very detailed web server performance statistics. This module is able to parse 200k+ rows in less than half a second. -- [Web server logs (Apache, NGINX)](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md): Tail - access log - file and collect web server/caching proxy metrics. - -### Messaging - -- [ActiveMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/activemq/README.md): Collect message broker - queues and topics statistics using the ActiveMQ Console API. -- [Beanstalk](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/beanstalk/README.md): Collect - server and tube-level statistics, such as CPU - usage, jobs rates, commands, and more. -- [Pulsar](https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/README.md): Collect summary, - namespaces, and topics performance statistics. -- [RabbitMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/README.md): Collect message - broker overview, system and per virtual host metrics. -- [VerneMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/vernemq/README.md): Monitor MQTT broker - health and performance metrics. It collects all available info for both MQTTv3 and v5 communication - -### Network - -- [Bind 9](https://github.com/netdata/go.d.plugin/blob/master/modules/bind/README.md): Collect nameserver summary - performance statistics via a web interface (`statistics-channels` feature). -- [Chrony](https://github.com/netdata/go.d.plugin/blob/master/modules/chrony/README.md): Monitor the precision and - statistics of a local `chronyd` server. -- [CoreDNS](https://github.com/netdata/go.d.plugin/blob/master/modules/coredns/README.md): Measure DNS query round - trip time. -- [Dnsmasq](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq_dhcp/README.md): Automatically - detects all configured `Dnsmasq` DHCP ranges and Monitor their utilization. -- [DNSdist](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsdist/README.md): Collect - load-balancer performance and health metrics. -- [Dnsmasq DNS Forwarder](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq/README.md): Gather - queries, entries, operations, and events for the lightweight DNS forwarder. -- [DNS Query Time](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsquery/README.md): Monitor the round - trip time for DNS queries in milliseconds. -- [Freeradius](https://github.com/netdata/go.d.plugin/blob/master/modules/freeradius/README.md): Collect - server authentication and accounting statistics from the `status server`. -- [Libreswan](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/libreswan/README.md): Collect - bytes-in, bytes-out, and uptime metrics. -- [Icecast](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/icecast/README.md): Monitor the - number of listeners for active sources. -- [ISC Bind (RDNC)](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/bind_rndc/README.md): - Collect nameserver summary performance - statistics using the `rndc` tool. -- [ISC DHCP](https://github.com/netdata/go.d.plugin/blob/master/modules/isc_dhcpd/README.md): Reads a - `dhcpd.leases` file and collects metrics on total active leases, pool active leases, and pool utilization. -- [OpenLDAP](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/openldap/README.md): Provides - statistics information from the OpenLDAP - (`slapd`) server. -- [NSD](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/nsd/README.md): Monitor nameserver - performance metrics using the `nsd-control` - tool. -- [NTP daemon](https://github.com/netdata/go.d.plugin/blob/master/modules/ntpd/README.md): Monitor the system variables - of the local `ntpd` daemon (optionally including variables of the polled peers) using the NTP Control Message Protocol - via a UDP socket. -- [OpenSIPS](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/opensips/README.md): Collect - server health and performance metrics using the - `opensipsctl` tool. -- [OpenVPN](https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn/README.md): Gather server summary - (client, traffic) and per user metrics (traffic, connection time) stats using `management-interface`. -- [Pi-hole](https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/README.md): Monitor basic (DNS - queries, clients, blocklist) and extended (top clients, top permitted, and blocked domains) statistics using the PHP - API. -- [PowerDNS Authoritative Server](https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/README.md): - Monitor one or more instances of the nameserver software to collect questions, events, and latency metrics. -- [PowerDNS Recursor](https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/README.md#recursor): - Gather incoming/outgoing questions, drops, timeouts, and cache usage from any number of DNS recursor instances. -- [RetroShare](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/retroshare/README.md): Monitor - application bandwidth, peers, and DHT - metrics. -- [Tor](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/tor/README.md): Capture traffic usage - statistics using the Tor control port. -- [Unbound](https://github.com/netdata/go.d.plugin/blob/master/modules/unbound/README.md): Collect DNS resolver - summary and extended system and per thread metrics via the `remote-control` interface. - -### Provisioning - -- [Puppet](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/puppet/README.md): Monitor the - status of Puppet Server and Puppet DB. - -### Remote devices - -- [AM2320](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/am2320/README.md): Monitor sensor - temperature and humidity. -- [Access point](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/ap/README.md): Monitor - client, traffic and signal metrics using the `aw` - tool. -- [APC UPS](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/apcupsd/README.md): Capture status - information using the `apcaccess` tool. -- [Energi Core](https://github.com/netdata/go.d.plugin/blob/master/modules/energid/README.md): Monitor - blockchain indexes, memory usage, network usage, and transactions of wallet instances. -- [UPS/PDU](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/nut/README.md): Read the status of - UPS/PDU devices using the `upsc` tool. -- [SNMP devices](https://github.com/netdata/go.d.plugin/blob/master/modules/snmp/README.md): Gather data using the SNMP - protocol. -- [1-Wire sensors](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/w1sensor/README.md): - Monitor sensor temperature. - -### Search - -- [Elasticsearch](https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/README.md): Collect - dozens of metrics on search engine performance from local nodes and local indices. Includes cluster health and - statistics. -- [Solr](https://github.com/netdata/go.d.plugin/blob/master/modules/solr/README.md): Collect application search - requests, search errors, update requests, and update errors statistics. - -### Storage - -- [Ceph](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/ceph/README.md): Monitor the Ceph - cluster usage and server data consumption. -- [HDFS](https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/README.md): Monitor health and performance - metrics for filesystem datanodes and namenodes. -- [IPFS](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/ipfs/README.md): Collect file system - bandwidth, peers, and repo metrics. -- [Scaleio](https://github.com/netdata/go.d.plugin/blob/master/modules/scaleio/README.md): Monitor storage system, - storage pools, and SDCS health and performance metrics via VxFlex OS Gateway API. -- [Samba](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/samba/README.md): Collect file - sharing metrics using the `smbstatus` tool. - -### Web - -- [Apache](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/README.md): Collect Apache web - server performance metrics via the `server-status?auto` endpoint. -- [HAProxy](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/haproxy/README.md): Collect - frontend, backend, and health metrics. -- [HTTP endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/README.md): Monitor - any HTTP endpoint's availability and response time. -- [Lighttpd](https://github.com/netdata/go.d.plugin/blob/master/modules/lighttpd/README.md): Collect web server - performance metrics using the `server-status?auto` endpoint. -- [Litespeed](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/litespeed/README.md): Collect - web server data (network, connection, - requests, cache) by reading `.rtreport*` files. -- [Nginx](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/README.md): Monitor web server - status information by gathering metrics via `ngx_http_stub_status_module`. -- [Nginx VTS](https://github.com/netdata/go.d.plugin/blob/master/modules/nginxvts/README.md): Gathers metrics from - any Nginx deployment with the _virtual host traffic status module_ enabled, including metrics on uptime, memory - usage, and cache, and more. -- [PHP-FPM](https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/README.md): Collect application - summary and processes health metrics by scraping the status page (`/status?full`). -- [TCP endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/portcheck/README.md): Monitor any - TCP endpoint's availability and response time. -- [Spigot Minecraft servers](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/spigotmc/README.md): - Monitor average ticket rate and number - of users. -- [Squid](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/squid/README.md): Monitor client and - server bandwidth/requests by gathering - data from the Cache Manager component. -- [Tengine](https://github.com/netdata/go.d.plugin/blob/master/modules/tengine/README.md): Monitor web server - statistics using information provided by `ngx_http_reqstat_module`. -- [Tomcat](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/tomcat/README.md): Collect web - server performance metrics from the Manager App - (`/manager/status?XML=true`). -- [Traefik](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/traefik/README.md): Uses Traefik's - Health API to provide statistics. -- [Varnish](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/varnish/README.md): Provides HTTP - accelerator global, backends (VBE), and - disks (SMF) statistics using the `varnishstat` tool. -- [x509 check](https://github.com/netdata/go.d.plugin/blob/master/modules/x509check/README.md): Monitor certificate - expiration time. -- [Whois domain expiry](https://github.com/netdata/go.d.plugin/blob/master/modules/whoisquery/README.md): Checks the - remaining time until a given domain is expired. - -## System collectors - -The Netdata Agent can collect these system- and hardware-level metrics using a variety of collectors, some of which -(such as `proc.plugin`) collect multiple types of metrics simultaneously. - -### Applications - -- [Fail2ban](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/fail2ban/README.md): Parses - configuration files to detect all jails, then - uses log files to report ban rates and volume of banned IPs. -- [Monit](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/monit/README.md): Monitor statuses - of targets (service-checks) using the XML - stats interface. -- [Windows](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/README.md): Collect CPU, memory, - network, disk, OS, system, and log-in metrics scraping [windows_exporter](https://github.com/prometheus-community/windows_exporter). - -### Disks and filesystems - -- [BCACHE](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor BCACHE statistics - with the `proc.plugin` collector. -- [Block devices](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Gather metrics about - the health and performance of block - devices using the `proc.plugin` collector. -- [Btrfs](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitors Btrfs filesystems - with the `proc.plugin` collector. -- [Device mapper](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Gather metrics about - the Linux device mapper with the proc - collector. -- [Disk space](https://github.com/netdata/netdata/blob/master/collectors/diskspace.plugin/README.md): Collect disk space - usage metrics on Linux mount points. -- [Clock synchronization](https://github.com/netdata/netdata/blob/master/collectors/timex.plugin/README.md): Collect the - system clock synchronization status on Linux. -- [Files and directories](https://github.com/netdata/go.d.plugin/blob/master/modules/filecheck/README.md): Gather - metrics about the existence, modification time, and size of files or directories. -- [ioping.plugin](https://github.com/netdata/netdata/blob/master/collectors/ioping.plugin/README.md): Measure disk - read/write latency. -- [NFS file servers and clients](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): - Gather operations, utilization, and space usage - using the `proc.plugin` collector. -- [RAID arrays](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Collect health, disk - status, operation status, and more with the `proc.plugin` collector. -- [Veritas Volume Manager](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Gather - metrics about the Veritas Volume Manager (VVM). -- [ZFS](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor bandwidth and - utilization of ZFS disks/partitions using the proc - collector. +### DNS and DHCP Servers + +- [Akamai Edge DNS Traffic](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/akamai_edge_dns_traffic.md) + +- [CoreDNS](https://github.com/netdata/go.d.plugin/blob/master/modules/coredns/integrations/coredns.md) + +- [DNS query](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsquery/integrations/dns_query.md) + +- [DNSBL](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dnsbl.md) + +- [DNSdist](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsdist/integrations/dnsdist.md) + +- [Dnsmasq DHCP](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md) + +- [Dnsmasq](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq/integrations/dnsmasq.md) + +- [ISC Bind (RNDC)](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md) + +- [ISC DHCP](https://github.com/netdata/go.d.plugin/blob/master/modules/isc_dhcpd/integrations/isc_dhcp.md) + +- [Name Server Daemon](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md) + +- [NextDNS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nextdns.md) + +- [Pi-hole](https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/integrations/pi-hole.md) + +- [PowerDNS Authoritative Server](https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/integrations/powerdns_authoritative_server.md) + +- [PowerDNS Recursor](https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns_recursor/integrations/powerdns_recursor.md) + +- [Unbound](https://github.com/netdata/go.d.plugin/blob/master/modules/unbound/integrations/unbound.md) ### eBPF -- [Files](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md): Provides information about - how often a system calls kernel - functions related to file descriptors using the eBPF collector. -- [Virtual file system (VFS)](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md): Monitor - IO, errors, deleted objects, and - more for kernel virtual file systems (VFS) using the eBPF collector. -- [Processes](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md): Monitor threads, task - exits, and errors using the eBPF collector. - -### Hardware - -- [Adaptec RAID](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/adaptec_raid/README.md): - Monitor logical and physical devices health - metrics using the `arcconf` tool. -- [CUPS](https://github.com/netdata/netdata/blob/master/collectors/cups.plugin/README.md): Monitor CUPS. -- [FreeIPMI](https://github.com/netdata/netdata/blob/master/collectors/freeipmi.plugin/README.md): - Uses `libipmimonitoring-dev` or `libipmimonitoring-devel` to - monitor the number of sensors, temperatures, voltages, currents, and more. -- [Hard drive temperature](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/hddtemp/README.md): - Monitor the temperature of storage - devices. -- [HP Smart Storage Arrays](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/hpssa/README.md): - Monitor controller, cache module, logical - and physical drive state, and temperature using the `ssacli` tool. -- [MegaRAID controllers](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/megacli/README.md): - Collect adapter, physical drives, and - battery stats using the `megacli` tool. -- [NVIDIA GPU](https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/README.md): Monitor - performance metrics (memory usage, fan - speed, pcie bandwidth utilization, temperature, and more) using the `nvidia-smi` tool. -- [Sensors](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/sensors/README.md): Reads system - sensors information (temperature, voltage, - electric current, power, and more) from `/sys/devices/`. -- [S.M.A.R.T](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/smartd_log/README.md): Reads - SMART Disk Monitoring daemon logs. - -### Memory - -- [Available memory](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Tracks changes in - available RAM using the `proc.plugin` collector. -- [Committed memory](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor committed - memory using the `proc.plugin` collector. -- [Huge pages](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Gather metrics about - huge pages in Linux and FreeBSD with the - `proc.plugin` collector. -- [KSM](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Measure the amount of merging, - savings, and effectiveness using the - `proc.plugin` collector. -- [Numa](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Gather metrics on the number - of non-uniform memory access (NUMA) events - every second using the `proc.plugin` collector. -- [Page faults](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Collect the number of - memory page faults per second using the - `proc.plugin` collector. -- [RAM](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Collect metrics on system RAM, - available RAM, and more using the - `proc.plugin` collector. -- [SLAB](https://github.com/netdata/netdata/blob/master/collectors/slabinfo.plugin/README.md): Collect kernel SLAB - details on Linux systems. -- [swap](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor the amount of free - and used swap at every second using the - `proc.plugin` collector. -- [Writeback memory](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Collect how much - memory is actively being written to disk at - every second using the `proc.plugin` collector. - -### Networks - -- [Access points](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/ap/README.md): Visualizes - data related to access points. -- [Ping](https://github.com/netdata/go.d.plugin/blob/master/modules/ping/README.md): Measure network latency, jitter and - packet loss between the monitored node - and any number of remote network end points. -- [Netfilter](https://github.com/netdata/netdata/blob/master/collectors/nfacct.plugin/README.md): Collect netfilter - firewall, connection tracker, and accounting - metrics using `libmnl` and `libnetfilter_acct`. -- [Network stack](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor the - networking stack for errors, TCP connection aborts, - bandwidth, and more. -- [Network QoS](https://github.com/netdata/netdata/blob/master/collectors/tc.plugin/README.md): Collect traffic QoS - metrics (`tc`) of Linux network interfaces. -- [SYNPROXY](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor entries uses, SYN - packets received, TCP cookies, and more. - -### Operating systems - -- [freebsd.plugin](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/README.md): Collect resource - usage and performance data on FreeBSD systems. -- [macOS](https://github.com/netdata/netdata/blob/master/collectors/macos.plugin/README.md): Collect resource usage and - performance data on macOS systems. - -### Processes - -- [Applications](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md): Gather CPU, disk, - memory, network, eBPF, and other metrics per - application using the `apps.plugin` collector. -- [systemd](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md): Monitor the CPU and - memory usage of systemd services using the - `cgroups.plugin` collector. -- [systemd unit states](https://github.com/netdata/go.d.plugin/blob/master/modules/systemdunits/README.md): See the - state (active, inactive, activating, deactivating, failed) of various systemd unit types. -- [System processes](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Collect metrics - on system load and total processes running - using `/proc/loadavg` and the `proc.plugin` collector. -- [Uptime](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor the uptime of a - system using the `proc.plugin` collector. - -### Resources - -- [CPU frequency](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor CPU - frequency, as set by the `cpufreq` kernel module, - using the `proc.plugin` collector. -- [CPU idle](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Measure CPU idle every - second using the `proc.plugin` collector. -- [CPU performance](https://github.com/netdata/netdata/blob/master/collectors/perf.plugin/README.md): Collect CPU - performance metrics using performance monitoring - units (PMU). -- [CPU throttling](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Gather metrics - about thermal throttling using the `/proc/stat` - module and the `proc.plugin` collector. -- [CPU utilization](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Capture CPU - utilization, both system-wide and per-core, using - the `/proc/stat` module and the `proc.plugin` collector. -- [Entropy](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor the available - entropy on a system using the `proc.plugin` - collector. -- [Interprocess Communication (IPC)](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): - Monitor IPC semaphores and shared memory - using the `proc.plugin` collector. -- [Interrupts](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Monitor interrupts per - second using the `proc.plugin` collector. -- [IdleJitter](https://github.com/netdata/netdata/blob/master/collectors/idlejitter.plugin/README.md): Measure CPU - latency and jitter on all operating systems. -- [SoftIRQs](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Collect metrics on - SoftIRQs, both system-wide and per-core, using the - `proc.plugin` collector. -- [SoftNet](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md): Capture SoftNet events per - second, both system-wide and per-core, - using the `proc.plugin` collector. - -### Users - -- [systemd-logind](https://github.com/netdata/go.d.plugin/blob/master/modules/logind/README.md): Monitor active - sessions, users, and seats tracked - by `systemd-logind` or `elogind`. -- [User/group usage](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md): Gather CPU, disk, - memory, network, and other metrics per user - and user group using the `apps.plugin` collector. - -## Netdata collectors - -These collectors are recursive in nature, in that they monitor some function of the Netdata Agent itself. Some -collectors are described only in code and associated charts in Netdata dashboards. - -- [ACLK (code only)](https://github.com/netdata/netdata/blob/master/aclk/legacy/aclk_stats.c): View whether a Netdata - Agent is connected to Netdata Cloud via the [ACLK](https://github.com/netdata/netdata/blob/master/aclk/README.md), the - volume of queries, process times, and more. -- [Alarms](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/alarms/README.md): This collector - creates an - **Alarms** menu with one line plot showing the alarm states of a Netdata Agent over time. -- [Anomalies](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/anomalies/README.md): This - collector uses the - Python PyOD library to perform unsupervised anomaly detection on your Netdata charts and/or dimensions. -- [Exporting (code only)](https://github.com/netdata/netdata/blob/master/exporting/send_internal_metrics.c): Gather - metrics on CPU utilization for - the [exporting engine](https://github.com/netdata/netdata/blob/master/exporting/README.md), and specific metrics for - each enabled - exporting connector. -- [Global statistics (code only)](https://github.com/netdata/netdata/blob/master/daemon/global_statistics.c): See - metrics on the CPU utilization, network traffic, volume of web clients, API responses, database engine usage, and - more. - -## Orchestrators - -Plugin orchestrators organize and run many of the above collectors. - -If you're interested in developing a new collector that you'd like to contribute to Netdata, we highly recommend using -the `go.d.plugin`. - -- [go.d.plugin](https://github.com/netdata/go.d.plugin): An orchestrator for data collection modules written in `go`. -- [python.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md): An - orchestrator for data collection modules written in `python` v2/v3. -- [charts.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md): An - orchestrator for data collection modules written in `bash` v4+. - -## Third-party collectors - -These collectors are developed and maintained by third parties and, unlike the other collectors, are not installed by -default. To use a third-party collector, visit their GitHub/documentation page and follow their installation procedures. - -
-Typical third party Python collector installation instructions - -In general the below steps should be sufficient to use a third party collector. - -1. Download collector code file - into [folder expected by Netdata](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#environment-variables). -2. Download default collector configuration file - into [folder expected by Netdata](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#environment-variables). -3. [Edit configuration file](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure#configure-a-collector) - from step 2 if required. -4. [Enable collector](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure#enable-a-collector-or-its-orchestrator). -5. [Restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) - -For example below are the steps to enable -the [Python ClickHouse collector](https://github.com/netdata/community/tree/main/collectors/python.d.plugin/clickhouse). - -```bash -# download python collector script to /usr/libexec/netdata/python.d/ -$ sudo wget https://raw.githubusercontent.com/netdata/community/main/collectors/python.d.plugin/clickhouse/clickhouse.chart.py -O /usr/libexec/netdata/python.d/clickhouse.chart.py - -# (optional) download default .conf to /etc/netdata/python.d/ -$ sudo wget https://raw.githubusercontent.com/netdata/community/main/collectors/python.d.plugin/clickhouse/clickhouse.conf -O /etc/netdata/python.d/clickhouse.conf - -# enable collector by adding line a new line with "clickhouse: yes" to /etc/netdata/python.d.conf file -# this will append to the file if it already exists or create it if not -$ sudo echo "clickhouse: yes" >> /etc/netdata/python.d.conf - -# (optional) edit clickhouse.conf if needed -$ sudo vi /etc/netdata/python.d/clickhouse.conf - -# restart netdata -# see docs for more information: https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md -$ sudo systemctl restart netdata -``` - -
- -- [CyberPower UPS](https://github.com/HawtDogFlvrWtr/netdata_cyberpwrups_plugin): Polls CyberPower UPS data using - PowerPanel® Personal Linux. -- [Logged-in users](https://github.com/veksh/netdata-numsessions): Collect the number of currently logged-on users. -- [nextcloud](https://github.com/arnowelzel/netdata-nextcloud): Monitor Nextcloud servers. -- [nim-netdata-plugin](https://github.com/FedericoCeratto/nim-netdata-plugin): A helper to create native Netdata - plugins using Nim. -- [Nvidia GPUs](https://github.com/coraxx/netdata_nv_plugin): Monitor Nvidia GPUs. -- [Teamspeak 3](https://github.com/coraxx/netdata_ts3_plugin): Pulls active users and bandwidth from TeamSpeak 3 - servers. -- [SSH](https://github.com/Yaser-Amiri/netdata-ssh-module): Monitor failed authentication requests of an SSH server. -- [ClickHouse](https://github.com/netdata/community/tree/main/collectors/python.d.plugin/clickhouse): - Monitor [ClickHouse](https://clickhouse.com/) database. -- [Ethtool](https://github.com/ghanapunq/netdata_ethtool_plugin): Monitor network interfaces with ethtool. -- [netdata-needrestart](https://github.com/nodiscc/netdata-needrestart) - Check/graph the number of processes/services/kernels that should be restarted after upgrading packages. -- [netdata-debsecan](https://github.com/nodiscc/netdata-debsecan) - Check/graph the number of CVEs in currently installed packages. -- [netdata-logcount](https://github.com/nodiscc/netdata-logcount) - Check/graph the number of syslog messages, by level over time. -- [netdata-apt](https://github.com/nodiscc/netdata-apt) - Check/graph and alert on the number of upgradeable packages, and available distribution upgrades. -- [diskquota](https://github.com/netdata/community/tree/main/collectors/python.d.plugin/diskquota) - Monitors the defined quotas on one or more filesystems depending on configuration. - -## Etc - -- [charts.d example](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/example/README.md): An - example `charts.d` collector. -- [python.d example](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/example/README.md): An - example `python.d` collector. -- [go.d example](https://github.com/netdata/go.d.plugin/blob/master/modules/example/README.md): An - example `go.d` collector. +- [eBPF Cachestat](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_cachestat.md) + +- [eBPF DCstat](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_dcstat.md) + +- [eBPF Disk](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_disk.md) + +- [eBPF Filedescriptor](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md) + +- [eBPF Filesystem](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_filesystem.md) + +- [eBPF Hardirq](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_hardirq.md) + +- [eBPF MDflush](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_mdflush.md) + +- [eBPF Mount](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_mount.md) + +- [eBPF OOMkill](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_oomkill.md) + +- [eBPF Process](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_process.md) + +- [eBPF Processes](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_processes.md) + +- [eBPF SHM](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_shm.md) + +- [eBPF SWAP](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_swap.md) + +- [eBPF Socket](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_socket.md) + +- [eBPF SoftIRQ](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_softirq.md) + +- [eBPF Sync](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_sync.md) + +- [eBPF VFS](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_vfs.md) + +### FreeBSD + +- [FreeBSD NFS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/freebsd_nfs.md) + +- [FreeBSD RCTL-RACCT](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/freebsd_rctl-racct.md) + +- [dev.cpu.0.freq](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md) + +- [dev.cpu.temperature](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md) + +- [devstat](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/devstat.md) + +- [getifaddrs](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/getifaddrs.md) + +- [getmntinfo](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/getmntinfo.md) + +- [hw.intrcnt](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/hw.intrcnt.md) + +- [ipfw](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/ipfw.md) + +- [kern.cp_time](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.cp_time.md) + +- [kern.ipc.msq](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.ipc.msq.md) + +- [kern.ipc.sem](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.ipc.sem.md) + +- [kern.ipc.shm](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.ipc.shm.md) + +- [net.inet.icmp.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md) + +- [net.inet.ip.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md) + +- [net.inet.tcp.states](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md) + +- [net.inet.tcp.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md) + +- [net.inet.udp.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md) + +- [net.inet6.icmp6.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md) + +- [net.inet6.ip6.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md) + +- [net.isr](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.isr.md) + +- [system.ram](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/system.ram.md) + +- [uptime](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/uptime.md) + +- [vm.loadavg](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.loadavg.md) + +- [vm.stats.sys.v_intr](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md) + +- [vm.stats.sys.v_soft](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md) + +- [vm.stats.sys.v_swtch](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md) + +- [vm.stats.vm.v_pgfaults](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md) + +- [vm.stats.vm.v_swappgs](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md) + +- [vm.swap_info](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.swap_info.md) + +- [vm.vmtotal](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.vmtotal.md) + +- [zfs](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/zfs.md) + +### FTP Servers + +- [ProFTPD](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/proftpd.md) + +### Gaming + +- [BungeeCord](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bungeecord.md) + +- [CS:GO](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cs:go.md) + +- [Minecraft](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/minecraft.md) + +- [OpenRCT2](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openrct2.md) + +- [SpigotMC](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md) + +- [Steam](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/steam.md) + +### Generic Data Collection + +- [Custom Exporter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/custom_exporter.md) + +- [Excel spreadsheet](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/excel_spreadsheet.md) + +- [Generic Command Line Output](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/generic_command_line_output.md) + +- [JetBrains Floating License Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jetbrains_floating_license_server.md) + +- [OpenWeatherMap](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openweathermap.md) + +- [Pandas](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/pandas/integrations/pandas.md) + +- [Prometheus endpoint](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/prometheus_endpoint.md) + +- [SNMP devices](https://github.com/netdata/go.d.plugin/blob/master/modules/snmp/integrations/snmp_devices.md) + +- [Shell command](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/shell_command.md) + +- [Tankerkoenig API](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tankerkoenig_api.md) + +- [TwinCAT ADS Web Service](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/twincat_ads_web_service.md) + +### Hardware Devices and Sensors + +- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md) + +- [AM2320](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/am2320/integrations/am2320.md) + +- [AMD CPU & GPU](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/amd_cpu_&_gpu.md) + +- [AMD GPU](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/amd_gpu.md) + +- [ARM HWCPipe](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/arm_hwcpipe.md) + +- [CUPS](https://github.com/netdata/netdata/blob/master/collectors/cups.plugin/integrations/cups.md) + +- [HDD temperature](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md) + +- [HP iLO](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hp_ilo.md) + +- [IBM CryptoExpress (CEX) cards](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md) + +- [IBM Z Hardware Management Console](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_z_hardware_management_console.md) + +- [IPMI (By SoundCloud)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ipmi_by_soundcloud.md) + +- [Intelligent Platform Management Interface (IPMI)](https://github.com/netdata/netdata/blob/master/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md) + +- [Linux Sensors (lm-sensors)](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md) + +- [Linux Sensors (sysfs)](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md) + +- [NVML](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nvml.md) + +- [Nvidia GPU](https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/integrations/nvidia_gpu.md) + +- [Raritan PDU](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/raritan_pdu.md) + +- [S.M.A.R.T.](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md) + +- [ServerTech](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/servertech.md) + +- [Siemens S7 PLC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/siemens_s7_plc.md) + +- [T-Rex NVIDIA GPU Miner](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md) + +### IoT Devices + +- [Airthings Waveplus air sensor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/airthings_waveplus_air_sensor.md) + +- [Bobcat Miner 300](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bobcat_miner_300.md) + +- [Christ Elektronik CLM5IP power panel](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md) + +- [CraftBeerPi](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/craftbeerpi.md) + +- [Dutch Electricity Smart Meter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dutch_electricity_smart_meter.md) + +- [Elgato Key Light devices.](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/elgato_key_light_devices..md) + +- [Energomera smart power meters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/energomera_smart_power_meters.md) + +- [Helium hotspot](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/helium_hotspot.md) + +- [Homebridge](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/homebridge.md) + +- [Homey](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/homey.md) + +- [Jarvis Standing Desk](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jarvis_standing_desk.md) + +- [MP707 USB thermometer](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mp707_usb_thermometer.md) + +- [Modbus protocol](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/modbus_protocol.md) + +- [Monnit Sensors MQTT](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/monnit_sensors_mqtt.md) + +- [Nature Remo E lite devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nature_remo_e_lite_devices.md) + +- [Netatmo sensors](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netatmo_sensors.md) + +- [OpenHAB](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openhab.md) + +- [Personal Weather Station](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/personal_weather_station.md) + +- [Philips Hue](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/philips_hue.md) + +- [Pimoroni Enviro+](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pimoroni_enviro+.md) + +- [Powerpal devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/powerpal_devices.md) + +- [Radio Thermostat](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/radio_thermostat.md) + +- [SMA Inverters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sma_inverters.md) + +- [Salicru EQX inverter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/salicru_eqx_inverter.md) + +- [Sense Energy](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sense_energy.md) + +- [Shelly humidity sensor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/shelly_humidity_sensor.md) + +- [Smart meters SML](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/smart_meters_sml.md) + +- [Solar logging stick](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/solar_logging_stick.md) + +- [SolarEdge inverters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/solaredge_inverters.md) + +- [Solis Ginlong 5G inverters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/solis_ginlong_5g_inverters.md) + +- [Sunspec Solar Energy](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sunspec_solar_energy.md) + +- [TP-Link P110](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tp-link_p110.md) + +- [Tado smart heating solution](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tado_smart_heating_solution.md) + +- [Tesla Powerwall](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tesla_powerwall.md) + +- [Tesla Wall Connector](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tesla_wall_connector.md) + +- [Tesla vehicle](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tesla_vehicle.md) + +- [Xiaomi Mi Flora](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/xiaomi_mi_flora.md) + +- [iqAir AirVisual air quality monitors](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md) + +### Kubernetes + +- [Cilium Agent](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cilium_agent.md) + +- [Cilium Operator](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cilium_operator.md) + +- [Cilium Proxy](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cilium_proxy.md) + +- [Kubelet](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/integrations/kubelet.md) + +- [Kubeproxy](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/integrations/kubeproxy.md) + +- [Kubernetes Cluster Cloud Cost](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md) + +- [Kubernetes Cluster State](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_state/integrations/kubernetes_cluster_state.md) + +- [Kubernetes Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/kubernetes_containers.md) + +- [Rancher](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/rancher.md) + +### Linux Systems + +- [CPU performance](https://github.com/netdata/netdata/blob/master/collectors/perf.plugin/integrations/cpu_performance.md) + +- [Disk space](https://github.com/netdata/netdata/blob/master/collectors/diskspace.plugin/integrations/disk_space.md) + +- [Files and directories](https://github.com/netdata/go.d.plugin/blob/master/modules/filecheck/integrations/files_and_directories.md) + +- [OpenRC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openrc.md) + +#### CPU + +- [Interrupts](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/interrupts.md) + +- [SoftIRQ statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/softirq_statistics.md) + +#### Disk + +- [Disk Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/disk_statistics.md) + +- [MD RAID](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/md_raid.md) + +##### BTRFS + +- [BTRFS](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/btrfs.md) + +##### NFS + +- [NFS Client](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/nfs_client.md) + +- [NFS Server](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/nfs_server.md) + +##### ZFS + +- [ZFS Adaptive Replacement Cache](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md) + +- [ZFS Pools](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/zfs_pools.md) + +#### Firewall + +- [Conntrack](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/conntrack.md) + +- [Netfilter](https://github.com/netdata/netdata/blob/master/collectors/nfacct.plugin/integrations/netfilter.md) + +- [Synproxy](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/synproxy.md) + +- [nftables](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nftables.md) + +#### IPC + +- [Inter Process Communication](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/inter_process_communication.md) + +#### Kernel + +- [Linux kernel SLAB allocator statistics](https://github.com/netdata/netdata/blob/master/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md) + +- [Power Capping](https://github.com/netdata/netdata/blob/master/collectors/debugfs.plugin/integrations/power_capping.md) + +#### Memory + +- [Kernel Same-Page Merging](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/kernel_same-page_merging.md) + +- [Linux ZSwap](https://github.com/netdata/netdata/blob/master/collectors/debugfs.plugin/integrations/linux_zswap.md) + +- [Memory Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/memory_statistics.md) + +- [Memory Usage](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/memory_usage.md) + +- [Memory modules (DIMMs)](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/memory_modules_dimms.md) + +- [Non-Uniform Memory Access](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/non-uniform_memory_access.md) + +- [Page types](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/page_types.md) + +- [System Memory Fragmentation](https://github.com/netdata/netdata/blob/master/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md) + +- [ZRAM](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/zram.md) + +#### Network + +- [Access Points](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/ap/integrations/access_points.md) + +- [IP Virtual Server](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/ip_virtual_server.md) + +- [IPv6 Socket Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/ipv6_socket_statistics.md) + +- [InfiniBand](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/infiniband.md) + +- [Network interfaces](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/network_interfaces.md) + +- [Network statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/network_statistics.md) + +- [SCTP Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/sctp_statistics.md) + +- [Socket statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/socket_statistics.md) + +- [Softnet Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/softnet_statistics.md) + +- [Wireless network interfaces](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/wireless_network_interfaces.md) + +- [tc QoS classes](https://github.com/netdata/netdata/blob/master/collectors/tc.plugin/integrations/tc_qos_classes.md) + +#### Power Supply + +- [Power Supply](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/power_supply.md) + +#### Pressure + +- [Pressure Stall Information](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/pressure_stall_information.md) + +#### System + +- [Entropy](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/entropy.md) + +- [System Load Average](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/system_load_average.md) + +- [System Uptime](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/system_uptime.md) + +- [System statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/system_statistics.md) + +### Logs Servers + +- [AuthLog](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/authlog.md) + +- [Fluentd](https://github.com/netdata/go.d.plugin/blob/master/modules/fluentd/integrations/fluentd.md) + +- [Graylog Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/graylog_server.md) + +- [Logstash](https://github.com/netdata/go.d.plugin/blob/master/modules/logstash/integrations/logstash.md) + +- [journald](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/journald.md) + +- [loki](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/loki.md) + +- [mtail](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mtail.md) + +### macOS Systems + +- [Apple Time Machine](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apple_time_machine.md) + +- [macOS](https://github.com/netdata/netdata/blob/master/collectors/macos.plugin/integrations/macos.md) + +### Mail Servers + +- [DMARC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dmarc.md) + +- [Dovecot](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/dovecot/integrations/dovecot.md) + +- [Exim](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/exim/integrations/exim.md) + +- [Halon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/halon.md) + +- [Maildir](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/maildir.md) + +- [Postfix](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/postfix/integrations/postfix.md) + +### Media Services + +- [Discourse](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/discourse.md) + +- [Icecast](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/icecast/integrations/icecast.md) + +- [OBS Studio](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/obs_studio.md) + +- [RetroShare](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/retroshare/integrations/retroshare.md) + +- [SABnzbd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sabnzbd.md) + +- [Stream](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/stream.md) + +- [Twitch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/twitch.md) + +- [Zulip](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/zulip.md) + +### Message Brokers + +- [ActiveMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/activemq/integrations/activemq.md) + +- [Apache Pulsar](https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/integrations/apache_pulsar.md) + +- [Beanstalk](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md) + +- [IBM MQ](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_mq.md) + +- [Kafka Connect](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka_connect.md) + +- [Kafka ZooKeeper](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka_zookeeper.md) + +- [Kafka](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka.md) + +- [MQTT Blackbox](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mqtt_blackbox.md) + +- [RabbitMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/integrations/rabbitmq.md) + +- [Redis Queue](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/redis_queue.md) + +- [VerneMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/vernemq/integrations/vernemq.md) + +- [XMPP Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/xmpp_server.md) + +- [mosquitto](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mosquitto.md) + +### Networking Stack and Network Interfaces + +- [8430FT modem](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/8430ft_modem.md) + +- [A10 ACOS network devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/a10_acos_network_devices.md) + +- [Andrews & Arnold line status](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/andrews_&_arnold_line_status.md) + +- [Aruba devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aruba_devices.md) + +- [Bird Routing Daemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bird_routing_daemon.md) + +- [Checkpoint device](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/checkpoint_device.md) + +- [Cisco ACI](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cisco_aci.md) + +- [Citrix NetScaler](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/citrix_netscaler.md) + +- [DDWRT Routers](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ddwrt_routers.md) + +- [FRRouting](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/frrouting.md) + +- [Fortigate firewall](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/fortigate_firewall.md) + +- [Freifunk network](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/freifunk_network.md) + +- [Fritzbox network devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/fritzbox_network_devices.md) + +- [Hitron CGN series CPE](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hitron_cgn_series_cpe.md) + +- [Hitron CODA Cable Modem](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hitron_coda_cable_modem.md) + +- [Huawei devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/huawei_devices.md) + +- [Keepalived](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/keepalived.md) + +- [Meraki dashboard](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/meraki_dashboard.md) + +- [MikroTik devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mikrotik_devices.md) + +- [Mikrotik RouterOS devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mikrotik_routeros_devices.md) + +- [NetFlow](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netflow.md) + +- [NetMeter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netmeter.md) + +- [Open vSwitch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/open_vswitch.md) + +- [OpenROADM devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openroadm_devices.md) + +- [RIPE Atlas](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ripe_atlas.md) + +- [SONiC NOS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sonic_nos.md) + +- [SmartRG 808AC Cable Modem](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/smartrg_808ac_cable_modem.md) + +- [Starlink (SpaceX)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/starlink_spacex.md) + +- [Traceroute](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/traceroute.md) + +- [Ubiquiti UFiber OLT](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ubiquiti_ufiber_olt.md) + +- [Zyxel GS1200-8](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/zyxel_gs1200-8.md) + +### Incident Management + +- [OTRS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/otrs.md) + +- [StatusPage](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/statuspage.md) + +### Observability + +- [Collectd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/collectd.md) + +- [Dynatrace](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dynatrace.md) + +- [Grafana](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/grafana.md) + +- [Hubble](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hubble.md) + +- [Naemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/naemon.md) + +- [Nagios](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nagios.md) + +- [New Relic](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/new_relic.md) + +### Other + +- [Example collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/example/integrations/example_collector.md) + +- [GitHub API rate limit](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/github_api_rate_limit.md) + +- [GitHub repository](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/github_repository.md) + +- [Netdata Agent alarms](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md) + +- [python.d changefinder](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md) + +- [python.d zscores](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md) + +### Processes and System Services + +- [Applications](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/integrations/applications.md) + +- [Supervisor](https://github.com/netdata/go.d.plugin/blob/master/modules/supervisord/integrations/supervisor.md) + +- [User Groups](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/integrations/user_groups.md) + +- [Users](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/integrations/users.md) + +### Provisioning Systems + +- [BOSH](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bosh.md) + +- [Cloud Foundry Firehose](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloud_foundry_firehose.md) + +- [Cloud Foundry](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloud_foundry.md) + +- [Spacelift](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/spacelift.md) + +### Search Engines + +- [Elasticsearch](https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/integrations/elasticsearch.md) + +- [Meilisearch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/meilisearch.md) + +- [OpenSearch](https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/integrations/opensearch.md) + +- [Solr](https://github.com/netdata/go.d.plugin/blob/master/modules/solr/integrations/solr.md) + +- [Sphinx](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sphinx.md) + +### Security Systems + +- [Certificate Transparency](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/certificate_transparency.md) + +- [ClamAV daemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clamav_daemon.md) + +- [Clamscan results](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clamscan_results.md) + +- [Crowdsec](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/crowdsec.md) + +- [Honeypot](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/honeypot.md) + +- [Lynis audit reports](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/lynis_audit_reports.md) + +- [OpenVAS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openvas.md) + +- [SSL Certificate](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ssl_certificate.md) + +- [Suricata](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/suricata.md) + +- [Vault PKI](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/vault_pki.md) + +### Service Discovery / Registry + +- [Consul](https://github.com/netdata/go.d.plugin/blob/master/modules/consul/integrations/consul.md) + +- [Kafka Consumer Lag](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka_consumer_lag.md) + +- [ZooKeeper](https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/integrations/zookeeper.md) + +- [etcd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/etcd.md) + +### Storage, Mount Points and Filesystems + +- [AdaptecRAID](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md) + +- [Altaro Backup](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/altaro_backup.md) + +- [Borg backup](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/borg_backup.md) + +- [CVMFS clients](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cvmfs_clients.md) + +- [Ceph](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/ceph/integrations/ceph.md) + +- [Dell EMC Isilon cluster](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_emc_isilon_cluster.md) + +- [Dell EMC ScaleIO](https://github.com/netdata/go.d.plugin/blob/master/modules/scaleio/integrations/dell_emc_scaleio.md) + +- [Dell EMC XtremIO cluster](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_emc_xtremio_cluster.md) + +- [Dell PowerMax](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_powermax.md) + +- [EOS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/eos.md) + +- [Generic storage enclosure tool](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/generic_storage_enclosure_tool.md) + +- [HDSentinel](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hdsentinel.md) + +- [HP Smart Storage Arrays](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md) + +- [Hadoop Distributed File System (HDFS)](https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md) + +- [IBM Spectrum Virtualize](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_spectrum_virtualize.md) + +- [IBM Spectrum](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_spectrum.md) + +- [IPFS](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/ipfs/integrations/ipfs.md) + +- [Lagerist Disk latency](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/lagerist_disk_latency.md) + +- [MegaCLI](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/megacli/integrations/megacli.md) + +- [MogileFS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mogilefs.md) + +- [NVMe devices](https://github.com/netdata/go.d.plugin/blob/master/modules/nvme/integrations/nvme_devices.md) + +- [NetApp Solidfire](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netapp_solidfire.md) + +- [Netapp ONTAP API](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netapp_ontap_api.md) + +- [Samba](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/samba/integrations/samba.md) + +- [Starwind VSAN VSphere Edition](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md) + +- [Storidge](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/storidge.md) + +- [Synology ActiveBackup](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/synology_activebackup.md) + +### Synthetic Checks + +- [Blackbox](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/blackbox.md) + +- [Domain expiration date](https://github.com/netdata/go.d.plugin/blob/master/modules/whoisquery/integrations/domain_expiration_date.md) + +- [HTTP Endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/integrations/http_endpoints.md) + +- [IOPing](https://github.com/netdata/netdata/blob/master/collectors/ioping.plugin/integrations/ioping.md) + +- [Idle OS Jitter](https://github.com/netdata/netdata/blob/master/collectors/idlejitter.plugin/integrations/idle_os_jitter.md) + +- [Monit](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/monit/integrations/monit.md) + +- [Ping](https://github.com/netdata/go.d.plugin/blob/master/modules/ping/integrations/ping.md) + +- [Pingdom](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pingdom.md) + +- [Site 24x7](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/site_24x7.md) + +- [TCP Endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/portcheck/integrations/tcp_endpoints.md) + +- [Uptimerobot](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/uptimerobot.md) + +- [X.509 certificate](https://github.com/netdata/go.d.plugin/blob/master/modules/x509check/integrations/x.509_certificate.md) + +### System Clock and NTP + +- [Chrony](https://github.com/netdata/go.d.plugin/blob/master/modules/chrony/integrations/chrony.md) + +- [NTPd](https://github.com/netdata/go.d.plugin/blob/master/modules/ntpd/integrations/ntpd.md) + +- [Timex](https://github.com/netdata/netdata/blob/master/collectors/timex.plugin/integrations/timex.md) + +### Systemd + +- [Systemd Services](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/systemd_services.md) + +- [Systemd Units](https://github.com/netdata/go.d.plugin/blob/master/modules/systemdunits/integrations/systemd_units.md) + +- [systemd-logind users](https://github.com/netdata/go.d.plugin/blob/master/modules/logind/integrations/systemd-logind_users.md) + +### Task Queues + +- [Celery](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/celery.md) + +- [Mesos](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mesos.md) + +- [Slurm](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/slurm.md) + +### Telephony Servers + +- [GTP](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gtp.md) + +- [Kannel](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kannel.md) + +- [OpenSIPS](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/opensips/integrations/opensips.md) + +### UPS + +- [APC UPS](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md) + +- [Eaton UPS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/eaton_ups.md) + +- [UPS (NUT)](https://github.com/netdata/go.d.plugin/blob/master/modules/upsd/integrations/ups_nut.md) + +### VPNs + +- [Fastd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/fastd.md) + +- [Libreswan](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/libreswan/integrations/libreswan.md) + +- [OpenVPN status log](https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn_status_log/integrations/openvpn_status_log.md) + +- [OpenVPN](https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn/integrations/openvpn.md) + +- [SoftEther VPN Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/softether_vpn_server.md) + +- [Speedify CLI](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/speedify_cli.md) + +- [Tor](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/tor/integrations/tor.md) + +- [WireGuard](https://github.com/netdata/go.d.plugin/blob/master/modules/wireguard/integrations/wireguard.md) + +- [strongSwan](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/strongswan.md) + +### Web Servers and Web Proxies + +- [APIcast](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apicast.md) + +- [Apache](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/integrations/apache.md) + +- [Clash](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clash.md) + +- [Cloudflare PCAP](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloudflare_pcap.md) + +- [Envoy](https://github.com/netdata/go.d.plugin/blob/master/modules/envoy/integrations/envoy.md) + +- [Gobetween](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gobetween.md) + +- [HAProxy](https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/integrations/haproxy.md) + +- [HHVM](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hhvm.md) + +- [HTTPD](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/integrations/httpd.md) + +- [Lighttpd](https://github.com/netdata/go.d.plugin/blob/master/modules/lighttpd/integrations/lighttpd.md) + +- [Litespeed](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/litespeed/integrations/litespeed.md) + +- [NGINX Plus](https://github.com/netdata/go.d.plugin/blob/master/modules/nginxplus/integrations/nginx_plus.md) + +- [NGINX VTS](https://github.com/netdata/go.d.plugin/blob/master/modules/nginxvts/integrations/nginx_vts.md) + +- [NGINX](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/integrations/nginx.md) + +- [PHP-FPM](https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/integrations/php-fpm.md) + +- [Squid log files](https://github.com/netdata/go.d.plugin/blob/master/modules/squidlog/integrations/squid_log_files.md) + +- [Squid](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/squid/integrations/squid.md) + +- [Tengine](https://github.com/netdata/go.d.plugin/blob/master/modules/tengine/integrations/tengine.md) + +- [Tomcat](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/tomcat/integrations/tomcat.md) + +- [Traefik](https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/integrations/traefik.md) + +- [Varnish](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/varnish/integrations/varnish.md) + +- [Web server log files](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/integrations/web_server_log_files.md) + +- [uWSGI](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md) + +### Windows Systems + +- [Active Directory](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/active_directory.md) + +- [HyperV](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/hyperv.md) + +- [MS Exchange](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/ms_exchange.md) + +- [MS SQL Server](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/ms_sql_server.md) + +- [NET Framework](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/net_framework.md) + +- [Windows](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/windows.md) diff --git a/collectors/Makefile.am b/collectors/Makefile.am index d477e5b80ec6e5..1bbb2e0ef8e228 100644 --- a/collectors/Makefile.am +++ b/collectors/Makefile.am @@ -15,6 +15,7 @@ SUBDIRS = \ freebsd.plugin \ freeipmi.plugin \ idlejitter.plugin \ + log2journal \ macos.plugin \ nfacct.plugin \ xenstat.plugin \ diff --git a/collectors/all.h b/collectors/all.h index 22b75aaaa74ef7..38241dfa982a81 100644 --- a/collectors/all.h +++ b/collectors/all.h @@ -266,65 +266,76 @@ // IP STACK -#define NETDATA_CHART_PRIO_IP_ERRORS 4100 -#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4210 -#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4215 -#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4216 -#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4220 -#define NETDATA_CHART_PRIO_IP_TCP_OFO 4250 -#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4260 -#define NETDATA_CHART_PRIO_IP_TCP_MEM 4290 -#define NETDATA_CHART_PRIO_IP_BCAST 4500 -#define NETDATA_CHART_PRIO_IP_BCAST_PACKETS 4510 -#define NETDATA_CHART_PRIO_IP_MCAST 4600 -#define NETDATA_CHART_PRIO_IP_MCAST_PACKETS 4610 -#define NETDATA_CHART_PRIO_IP_ECN 4700 +#define NETDATA_CHART_PRIO_IP_TCP_PACKETS 4200 +#define NETDATA_CHART_PRIO_IP_TCP_ERRORS 4210 +#define NETDATA_CHART_PRIO_IP_TCP_ESTABLISHED_CONNS 4220 +#define NETDATA_CHART_PRIO_IP_TCP_OPENS 4220 +#define NETDATA_CHART_PRIO_IP_TCP_HANDSHAKE 4230 +#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4240 +#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4250 +#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4260 +#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4270 +#define NETDATA_CHART_PRIO_IP_TCP_OFO 4280 +#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4290 +#define NETDATA_CHART_PRIO_IP_TCP_MEM_PRESSURE 4300 +#define NETDATA_CHART_PRIO_IP_SOCKETS 4310 // IPv4 -#define NETDATA_CHART_PRIO_IPV4_SOCKETS 5100 -#define NETDATA_CHART_PRIO_IPV4_PACKETS 5130 -#define NETDATA_CHART_PRIO_IPV4_ERRORS 5150 -#define NETDATA_CHART_PRIO_IPV4_ICMP 5170 -#define NETDATA_CHART_PRIO_IPV4_TCP 5200 -#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5201 -#define NETDATA_CHART_PRIO_IPV4_TCP_MEM 5290 -#define NETDATA_CHART_PRIO_IPV4_UDP 5300 -#define NETDATA_CHART_PRIO_IPV4_UDP_MEM 5390 -#define NETDATA_CHART_PRIO_IPV4_UDPLITE 5400 +#define NETDATA_CHART_PRIO_IPV4_PACKETS 5000 +#define NETDATA_CHART_PRIO_IPV4_ERRORS 5050 +#define NETDATA_CHART_PRIO_IPV4_BCAST 5100 +#define NETDATA_CHART_PRIO_IPV4_BCAST_PACKETS 5105 +#define NETDATA_CHART_PRIO_IPV4_MCAST 5150 +#define NETDATA_CHART_PRIO_IPV4_MCAST_PACKETS 5155 +#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5180 +#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS_MEM 5185 +#define NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS 5200 +#define NETDATA_CHART_PRIO_IPV4_ICMP_MESSAGES 5205 +#define NETDATA_CHART_PRIO_IPV4_ICMP_ERRORS 5210 +#define NETDATA_CHART_PRIO_IPV4_UDP_PACKETS 5250 +#define NETDATA_CHART_PRIO_IPV4_UDP_ERRORS 5255 +#define NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS 5260 +#define NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS_MEM 5265 +#define NETDATA_CHART_PRIO_IPV4_UDPLITE_PACKETS 5300 +#define NETDATA_CHART_PRIO_IPV4_UDPLITE_ERRORS 5305 +#define NETDATA_CHART_PRIO_IPV4_UDPLITE_SOCKETS 5310 +#define NETDATA_CHART_PRIO_IPV4_ECN 5350 +#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_IN 5400 +#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_OUT 5405 +#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS 5410 +#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS_MEM 5415 #define NETDATA_CHART_PRIO_IPV4_RAW 5450 -#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS 5460 -#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM 5470 // IPv6 - -#define NETDATA_CHART_PRIO_IPV6_PACKETS 6200 -#define NETDATA_CHART_PRIO_IPV6_ECT 6210 -#define NETDATA_CHART_PRIO_IPV6_ERRORS 6300 -#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS 6400 -#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6401 -#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6402 -#define NETDATA_CHART_PRIO_IPV6_TCP 6500 -#define NETDATA_CHART_PRIO_IPV6_UDP 6600 -#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6601 -#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6610 -#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6700 -#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6701 -#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6710 -#define NETDATA_CHART_PRIO_IPV6_RAW 6800 -#define NETDATA_CHART_PRIO_IPV6_BCAST 6840 -#define NETDATA_CHART_PRIO_IPV6_MCAST 6850 -#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6851 -#define NETDATA_CHART_PRIO_IPV6_ICMP 6900 -#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6910 -#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6920 -#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6930 -#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6940 -#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6950 -#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6960 -#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6970 -#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6980 - +#define NETDATA_CHART_PRIO_IPV6_PACKETS 6000 +#define NETDATA_CHART_PRIO_IPV6_ERRORS 6005 +#define NETDATA_CHART_PRIO_IPV6_BCAST 6050 +#define NETDATA_CHART_PRIO_IPV6_MCAST 6100 +#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6105 +#define NETDATA_CHART_PRIO_IPV6_TCP_SOCKETS 6140 +#define NETDATA_CHART_PRIO_IPV6_ICMP 6150 +#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6155 +#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6160 +#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6165 +#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6170 +#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6180 +#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6185 +#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6190 +#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6195 +#define NETDATA_CHART_PRIO_IPV6_UDP 6200 +#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6205 +#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6210 +#define NETDATA_CHART_PRIO_IPV6_UDP_SOCKETS 6215 +#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6250 +#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6255 +#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6260 +#define NETDATA_CHART_PRIO_IPV6_UDPLITE_SOCKETS 6265 +#define NETDATA_CHART_PRIO_IPV6_ECT 6300 +#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6350 +#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6355 +#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS_SOCKETS 6360 +#define NETDATA_CHART_PRIO_IPV6_RAW_SOCKETS 6400 // Network interfaces @@ -390,6 +401,11 @@ #define NETDATA_CHART_PRIO_STATSD_PRIVATE 90000 // many charts +// Logs Management + +#define NETDATA_CHART_PRIO_LOGS_BASE 95000 // many charts +#define NETDATA_CHART_PRIO_LOGS_STATS_BASE 160000 // logsmanagement stats in "Netdata Monitoring" + // PCI #define NETDATA_CHART_PRIO_PCI_AER 100000 @@ -403,7 +419,8 @@ // [ml] charts #define ML_CHART_PRIO_DIMENSIONS 39181 #define ML_CHART_PRIO_ANOMALY_RATE 39182 -#define ML_CHART_PRIO_DETECTOR_EVENTS 39183 +#define ML_CHART_PRIO_TYPE_ANOMALY_RATE 39183 +#define ML_CHART_PRIO_DETECTOR_EVENTS 39184 // [netdata.ml] charts #define NETDATA_ML_CHART_RUNNING 890001 diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf index 659bd0f0316260..195536a0abbd91 100644 --- a/collectors/apps.plugin/apps_groups.conf +++ b/collectors/apps.plugin/apps_groups.conf @@ -83,13 +83,15 @@ xenstat.plugin: xenstat.plugin perf.plugin: perf.plugin charts.d.plugin: *charts.d.plugin* python.d.plugin: *python.d.plugin* +systemd-journal.plugin:*systemd-journal.plugin* tc-qos-helper: *tc-qos-helper.sh* fping: fping ioping: ioping go.d.plugin: *go.d.plugin* -slabinfo.plugin: slabinfo.plugin +slabinfo.plugin: *slabinfo.plugin* ebpf.plugin: *ebpf.plugin* debugfs.plugin: *debugfs.plugin* +logs-management.plugin: *logs-management.plugin* # agent-service-discovery agent_sd: agent_sd @@ -136,7 +138,7 @@ modem: ModemManager netmanager: NetworkManager nm* systemd-networkd networkctl netplan connmand wicked* avahi-autoipd networkd-dispatcher firewall: firewalld ufw nft tor: tor -bluetooth: bluetooth bluez bluedevil obexd +bluetooth: bluetooth bluetoothd bluez bluedevil obexd # ----------------------------------------------------------------------------- # high availability and balancers @@ -159,7 +161,7 @@ chat: irssi *vines* *prosody* murmurd # ----------------------------------------------------------------------------- # monitoring -logs: ulogd* syslog* rsyslog* logrotate systemd-journald rotatelogs sysklogd metalog +logs: ulogd* syslog* rsyslog* logrotate *systemd-journal* rotatelogs sysklogd metalog nms: snmpd vnstatd smokeping zabbix* munin* mon openhpid tailon nrpe monit: monit splunk: splunkd @@ -209,7 +211,7 @@ proxmox-ve: pve* spiceproxy # ----------------------------------------------------------------------------- # containers & virtual machines -containers: lxc* docker* balena* +containers: lxc* docker* balena* containerd VMs: vbox* VBox* qemu* kvm* libvirt: virtlogd virtqemud virtstoraged virtnetworkd virtlockd virtinterfaced libvirt: virtnodedevd virtproxyd virtsecretd libvirtd @@ -238,7 +240,7 @@ dhcp: *dhcp* dhclient # ----------------------------------------------------------------------------- # name servers and clients -dns: named unbound nsd pdns_server knotd gdnsd yadifad dnsmasq systemd-resolve* pihole* avahi-daemon avahi-dnsconfd +dns: named unbound nsd pdns_server knotd gdnsd yadifad dnsmasq *systemd-resolve* pihole* avahi-daemon avahi-dnsconfd dnsdist: dnsdist # ----------------------------------------------------------------------------- @@ -271,7 +273,7 @@ backup: rsync lsyncd bacula* borg rclone # ----------------------------------------------------------------------------- # cron -cron: cron* atd anacron systemd-cron* incrond +cron: cron* atd anacron *systemd-cron* incrond # ----------------------------------------------------------------------------- # UPS @@ -319,7 +321,7 @@ airflow: *airflow* # ----------------------------------------------------------------------------- # GUI -X: X Xorg xinit xdm Xwayland xsettingsd +X: X Xorg xinit xdm Xwayland xsettingsd touchegg wayland: swaylock swayidle waypipe wayvnc kde: *kdeinit* kdm sddm plasmashell startplasma-* kwin* kwallet* krunner kactivitymanager* gnome: gnome-* gdm gconf* mutter @@ -353,11 +355,11 @@ kswapd: kswapd zswap: zswap kcompactd: kcompactd -system: systemd-* udisks* udevd* *udevd ipv6_addrconf dbus-* rtkit* +system: systemd* udisks* udevd* *udevd ipv6_addrconf dbus-* rtkit* system: mdadm acpid uuidd upowerd elogind* eudev mdev lvmpolld dmeventd system: accounts-daemon rngd haveged rasdaemon irqbalance start-stop-daemon system: supervise-daemon openrc* init runit runsvdir runsv auditd lsmd -system: abrt* nscd rtkit-daemon gpg-agent usbguard* +system: abrt* nscd rtkit-daemon gpg-agent usbguard* boltd geoclue kernel: kworker kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod kernel: fsnotify_mark kthrotld deferwq scsi_* kdmflush oom_reaper kdevtempfs @@ -380,6 +382,7 @@ rabbitmq: *rabbitmq* sidekiq: *sidekiq* java: java ipfs: ipfs +erlang: beam.smp node: node factorio: factorio diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c index d25ae3f9bd2148..ecfea1f6f7095e 100644 --- a/collectors/apps.plugin/apps_plugin.c +++ b/collectors/apps.plugin/apps_plugin.c @@ -265,10 +265,12 @@ struct target { uint32_t idhash; char name[MAX_NAME + 1]; - + char clean_name[MAX_NAME + 1]; // sanitized name used in chart id (need to replace at least dots) uid_t uid; gid_t gid; + bool is_other; + kernel_uint_t minflt; kernel_uint_t cminflt; kernel_uint_t majflt; @@ -782,7 +784,8 @@ static struct target *get_users_target(uid_t uid) { snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); } - netdata_fix_chart_name(w->name); + strncpyz(w->clean_name, w->name, MAX_NAME); + netdata_fix_chart_name(w->clean_name); w->uid = uid; @@ -830,7 +833,8 @@ struct target *get_groups_target(gid_t gid) snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); } - netdata_fix_chart_name(w->name); + strncpyz(w->clean_name, w->name, MAX_NAME); + netdata_fix_chart_name(w->clean_name); w->gid = gid; @@ -899,6 +903,14 @@ static struct target *get_apps_groups_target(const char *id, struct target *targ else // copy the id strncpyz(w->name, nid, MAX_NAME); + + // dots are used to distinguish chart type and id in streaming, so we should replace them + strncpyz(w->clean_name, w->name, MAX_NAME); + netdata_fix_chart_name(w->clean_name); + for (char *d = w->clean_name; *d; d++) { + if (*d == '.') + *d = '_'; + } strncpyz(w->compare, nid, MAX_COMPARE_NAME); size_t len = strlen(w->compare); @@ -997,6 +1009,7 @@ static int read_apps_groups_conf(const char *path, const char *file) apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing if(!apps_groups_default_target) fatal("Cannot create default target"); + apps_groups_default_target->is_other = true; // allow the user to override group 'other' if(apps_groups_default_target->target) @@ -1457,17 +1470,17 @@ static inline int read_proc_pid_limits(struct pid_stat *p, void *ptr) { netdata_log_info( "FDS_LIMITS: PID %d (%s) is using " "%0.2f %% of its fds limits, " - "open fds = %llu (" - "files = %llu, " - "pipes = %llu, " - "sockets = %llu, " - "inotifies = %llu, " - "eventfds = %llu, " - "timerfds = %llu, " - "signalfds = %llu, " - "eventpolls = %llu " - "other = %llu " - "), open fds limit = %llu, " + "open fds = %"PRIu64 "(" + "files = %"PRIu64 ", " + "pipes = %"PRIu64 ", " + "sockets = %"PRIu64", " + "inotifies = %"PRIu64", " + "eventfds = %"PRIu64", " + "timerfds = %"PRIu64", " + "signalfds = %"PRIu64", " + "eventpolls = %"PRIu64" " + "other = %"PRIu64" " + "), open fds limit = %"PRIu64", " "%s, " "original line [%s]", p->pid, p->comm, p->openfds_limits_percent, all_fds, @@ -2460,7 +2473,7 @@ static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t tim for(i = 0; i < indent ;i++) buffer[i] = ' '; buffer[i] = '\0'; - fprintf(stderr, " %s %s%s (%d %s %llu" + fprintf(stderr, " %s %s%s (%d %s %"PRIu64"" , buffer , prefix , p->comm @@ -3431,8 +3444,8 @@ static void calculate_netdata_statistics(void) { // ---------------------------------------------------------------------------- // update chart dimensions -static inline void send_BEGIN(const char *type, const char *id, usec_t usec) { - fprintf(stdout, "BEGIN %s.%s %llu\n", type, id, usec); +static inline void send_BEGIN(const char *type, const char *name,const char *metric, usec_t usec) { + fprintf(stdout, "BEGIN %s.%s_%s %" PRIu64 "\n", type, name, metric, usec); } static inline void send_SET(const char *name, kernel_uint_t value) { @@ -3440,7 +3453,7 @@ static inline void send_SET(const char *name, kernel_uint_t value) { } static inline void send_END(void) { - fprintf(stdout, "END\n"); + fprintf(stdout, "END\n\n"); } void send_resource_usage_to_netdata(usec_t dt) { @@ -3518,11 +3531,11 @@ void send_resource_usage_to_netdata(usec_t dt) { } fprintf(stdout, - "BEGIN netdata.apps_cpu %llu\n" - "SET user = %llu\n" - "SET system = %llu\n" + "BEGIN netdata.apps_cpu %"PRIu64"\n" + "SET user = %"PRIu64"\n" + "SET system = %"PRIu64"\n" "END\n" - "BEGIN netdata.apps_sizes %llu\n" + "BEGIN netdata.apps_sizes %"PRIu64"\n" "SET calls = %zu\n" "SET files = %zu\n" "SET filenames = %zu\n" @@ -3549,7 +3562,7 @@ void send_resource_usage_to_netdata(usec_t dt) { ); fprintf(stdout, - "BEGIN netdata.apps_fix %llu\n" + "BEGIN netdata.apps_fix %"PRIu64"\n" "SET utime = %u\n" "SET stime = %u\n" "SET gtime = %u\n" @@ -3566,7 +3579,7 @@ void send_resource_usage_to_netdata(usec_t dt) { if(include_exited_childs) fprintf(stdout, - "BEGIN netdata.apps_children_fix %llu\n" + "BEGIN netdata.apps_children_fix %"PRIu64"\n" "SET cutime = %u\n" "SET cstime = %u\n" "SET cgtime = %u\n" @@ -3736,249 +3749,118 @@ static void normalize_utilization(struct target *root) { static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) { struct target *w; - send_BEGIN(type, "cpu", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (kernel_uint_t)(w->stime * stime_fix_ratio) + (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio) + (kernel_uint_t)(w->cstime * cstime_fix_ratio) + (kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL)); - } - send_END(); - - send_BEGIN(type, "cpu_user", dt); for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio)):0ULL)); - } - send_END(); - - send_BEGIN(type, "cpu_system", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cstime * cstime_fix_ratio)):0ULL)); - } - send_END(); + if (unlikely(!w->exposed)) + continue; - if(show_guest_time) { - send_BEGIN(type, "cpu_guest", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL)); - } + send_BEGIN(type, w->clean_name, "processes", dt); + send_SET("processes", w->processes); send_END(); - } -#ifndef __FreeBSD__ - send_BEGIN(type, "voluntary_ctxt_switches", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->status_voluntary_ctxt_switches); - } - send_END(); - - send_BEGIN(type, "involuntary_ctxt_switches", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->status_nonvoluntary_ctxt_switches); - } - send_END(); -#endif - - send_BEGIN(type, "threads", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->num_threads); - } - send_END(); - - send_BEGIN(type, "processes", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->processes); - } - send_END(); - -#ifndef __FreeBSD__ - send_BEGIN(type, "uptime", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (global_uptime > w->starttime)?(global_uptime - w->starttime):0); - } - send_END(); - - if (enable_detailed_uptime_charts) { - send_BEGIN(type, "uptime_min", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->uptime_min); - } + send_BEGIN(type, w->clean_name, "threads", dt); + send_SET("threads", w->num_threads); send_END(); - send_BEGIN(type, "uptime_avg", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->uptime_sum / w->processes); - } - send_END(); + if (unlikely(!w->processes && !w->is_other)) + continue; - send_BEGIN(type, "uptime_max", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->uptime_max); - } + send_BEGIN(type, w->clean_name, "cpu_utilization", dt); + send_SET("user", (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cutime * cutime_fix_ratio)) : 0ULL)); + send_SET("system", (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cstime * cstime_fix_ratio)) : 0ULL)); send_END(); - } -#endif - - send_BEGIN(type, "mem", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared):0ULL); - } - send_END(); - - send_BEGIN(type, "rss", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->status_vmrss); - } - send_END(); - - send_BEGIN(type, "vmem", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->status_vmsize); - } - send_END(); - -#ifndef __FreeBSD__ - send_BEGIN(type, "swap", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->status_vmswap); - } - send_END(); -#endif - - send_BEGIN(type, "minor_faults", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)):0ULL)); - } - send_END(); - - send_BEGIN(type, "major_faults", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)):0ULL)); - } - send_END(); #ifndef __FreeBSD__ - send_BEGIN(type, "lreads", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->io_logical_bytes_read); - } - send_END(); - - send_BEGIN(type, "lwrites", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->io_logical_bytes_written); - } - send_END(); -#endif - - send_BEGIN(type, "preads", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->io_storage_bytes_read); - } - send_END(); - - send_BEGIN(type, "pwrites", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->io_storage_bytes_written); - } - send_END(); - - if(enable_file_charts) { - send_BEGIN(type, "fds_open_limit", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->max_open_files_percent * 100.0); + if (enable_guest_charts) { + send_BEGIN(type, w->clean_name, "cpu_guest_utilization", dt); + send_SET("guest", (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)) : 0ULL)); + send_END(); } - send_END(); - send_BEGIN(type, "fds_open", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, pid_openfds_sum(w)); - } + send_BEGIN(type, w->clean_name, "cpu_context_switches", dt); + send_SET("voluntary", w->status_voluntary_ctxt_switches); + send_SET("involuntary", w->status_nonvoluntary_ctxt_switches); send_END(); - send_BEGIN(type, "fds_files", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.files); - } + send_BEGIN(type, w->clean_name, "mem_private_usage", dt); + send_SET("mem", (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared) : 0ULL); send_END(); +#endif - send_BEGIN(type, "fds_sockets", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.sockets); - } + send_BEGIN(type, w->clean_name, "mem_usage", dt); + send_SET("rss", w->status_vmrss); send_END(); - send_BEGIN(type, "fds_pipes", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.pipes); - } + send_BEGIN(type, w->clean_name, "vmem_usage", dt); + send_SET("vmem", w->status_vmsize); send_END(); - send_BEGIN(type, "fds_inotifies", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.inotifies); - } + send_BEGIN(type, w->clean_name, "mem_page_faults", dt); + send_SET("minor", (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)) : 0ULL)); + send_SET("major", (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)) : 0ULL)); send_END(); - send_BEGIN(type, "fds_eventfds", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.eventfds); - } +#ifndef __FreeBSD__ + send_BEGIN(type, w->clean_name, "swap_usage", dt); + send_SET("swap", w->status_vmswap); send_END(); +#endif - send_BEGIN(type, "fds_timerfds", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.timerfds); +#ifndef __FreeBSD__ + if (w->processes == 0) { + send_BEGIN(type, w->clean_name, "uptime", dt); + send_SET("uptime", 0); + send_END(); + + if (enable_detailed_uptime_charts) { + send_BEGIN(type, w->clean_name, "uptime_summary", dt); + send_SET("min", 0); + send_SET("avg", 0); + send_SET("max", 0); + send_END(); + } + } else { + send_BEGIN(type, w->clean_name, "uptime", dt); + send_SET("uptime", (global_uptime > w->starttime) ? (global_uptime - w->starttime) : 0); + send_END(); + + if (enable_detailed_uptime_charts) { + send_BEGIN(type, w->clean_name, "uptime_summary", dt); + send_SET("min", w->uptime_min); + send_SET("avg", w->processes > 0 ? w->uptime_sum / w->processes : 0); + send_SET("max", w->uptime_max); + send_END(); + } } - send_END(); +#endif - send_BEGIN(type, "fds_signalfds", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.signalfds); - } + send_BEGIN(type, w->clean_name, "disk_physical_io", dt); + send_SET("reads", w->io_storage_bytes_read); + send_SET("writes", w->io_storage_bytes_written); send_END(); - send_BEGIN(type, "fds_eventpolls", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.eventpolls); - } +#ifndef __FreeBSD__ + send_BEGIN(type, w->clean_name, "disk_logical_io", dt); + send_SET("reads", w->io_logical_bytes_read); + send_SET("writes", w->io_logical_bytes_written); send_END(); - - send_BEGIN(type, "fds_other", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) - send_SET(w->name, w->openfds.other); +#endif + if (enable_file_charts) { + send_BEGIN(type, w->clean_name, "fds_open_limit", dt); + send_SET("limit", w->max_open_files_percent * 100.0); + send_END(); + + send_BEGIN(type, w->clean_name, "fds_open", dt); + send_SET("files", w->openfds.files); + send_SET("sockets", w->openfds.sockets); + send_SET("pipes", w->openfds.sockets); + send_SET("inotifies", w->openfds.inotifies); + send_SET("event", w->openfds.eventfds); + send_SET("timer", w->openfds.timerfds); + send_SET("signal", w->openfds.signalfds); + send_SET("eventpolls", w->openfds.eventpolls); + send_SET("other", w->openfds.other); + send_END(); } - send_END(); } } @@ -3986,312 +3868,146 @@ static void send_collected_data_to_netdata(struct target *root, const char *type // ---------------------------------------------------------------------------- // generate the charts -static void send_charts_updates_to_netdata(struct target *root, const char *type, const char *title) +static void send_charts_updates_to_netdata(struct target *root, const char *type, const char *lbl_name, const char *title) { struct target *w; - int newly_added = 0; - for(w = root ; w ; w = w->next) { - if (w->target) continue; - - if(unlikely(w->processes && (debug_enabled || w->debug_enabled))) { - struct pid_on_target *pid_on_target; - - fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes, (w->processes == 1)?"":"es"); - - for(pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) { - fprintf(stderr, " %d", pid_on_target->pid); + if (debug_enabled) { + for (w = root; w; w = w->next) { + if (unlikely(!w->target && w->processes)) { + struct pid_on_target *pid_on_target; + fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process(es):", w->name, w->processes); + for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) { + fprintf(stderr, " %d", pid_on_target->pid); + } + fputc('\n', stderr); } - - fputc('\n', stderr); } - - if (!w->exposed && w->processes) { - newly_added++; - w->exposed = 1; - if (debug_enabled || w->debug_enabled) - debug_log_int("%s just added - regenerating charts.", w->name); - } - } - - // nothing more to show - if(!newly_added && show_guest_time == show_guest_time_old) return; - - // we have something new to show - // update the charts - fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (100%% = 1 core)' 'percentage' cpu %s.cpu stacked 20001 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, time_factor * RATES_DETAIL / 100, w->hidden ? "hidden" : ""); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MiB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.rss '' '%s Resident Set Size (w/shared)' 'MiB' mem %s.rss stacked 20004 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); } - APPS_PLUGIN_FUNCTIONS(); - fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MiB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); - } - APPS_PLUGIN_FUNCTIONS(); + for (w = root; w; w = w->next) { + if (likely(w->exposed || (!w->processes && !w->is_other))) + continue; - fprintf(stdout, "CHART %s.threads '' '%s Threads' 'threads' processes %s.threads stacked 20006 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); + w->exposed = 1; - fprintf(stdout, "CHART %s.processes '' '%s Processes' 'processes' processes %s.processes stacked 20007 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); + fprintf(stdout, "CHART %s.%s_cpu_utilization '' '%s CPU utilization (100%% = 1 core)' 'percentage' cpu %s.cpu_utilization stacked 20001 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION user '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU); + fprintf(stdout, "DIMENSION system '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU); #ifndef __FreeBSD__ - fprintf(stdout, "CHART %s.uptime '' '%s Carried Over Uptime' 'seconds' processes %s.uptime line 20008 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - if (enable_detailed_uptime_charts) { - fprintf(stdout, "CHART %s.uptime_min '' '%s Minimum Uptime' 'seconds' processes %s.uptime_min line 20009 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.uptime_avg '' '%s Average Uptime' 'seconds' processes %s.uptime_avg line 20010 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.uptime_max '' '%s Maximum Uptime' 'seconds' processes %s.uptime_max line 20011 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - } + if (enable_guest_charts) { + fprintf(stdout, "CHART %s.%s_cpu_guest_utilization '' '%s CPU guest utlization (100%% = 1 core)' 'percentage' cpu %s.cpu_guest_utilization line 20005 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION guest '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU); + } + + fprintf(stdout, "CHART %s.%s_cpu_context_switches '' '%s CPU context switches' 'switches/s' cpu %s.cpu_context_switches stacked 20010 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION voluntary '' absolute 1 %llu\n", RATES_DETAIL); + fprintf(stdout, "DIMENSION involuntary '' absolute 1 %llu\n", RATES_DETAIL); + + fprintf(stdout, "CHART %s.%s_mem_private_usage '' '%s memory usage without shared' 'MiB' mem %s.mem_private_usage area 20050 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION mem '' absolute %ld %ld\n", 1L, 1024L); #endif - fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (100%% = 1 core)' 'percentage' cpu %s.cpu_user stacked 20020 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU); - } - APPS_PLUGIN_FUNCTIONS(); + fprintf(stdout, "CHART %s.%s_mem_usage '' '%s memory RSS usage' 'MiB' mem %s.mem_usage area 20055 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION rss '' absolute %ld %ld\n", 1L, 1024L); - fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (100%% = 1 core)' 'percentage' cpu %s.cpu_system stacked 20021 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU); - } - APPS_PLUGIN_FUNCTIONS(); + fprintf(stdout, "CHART %s.%s_mem_page_faults '' '%s memory page faults' 'pgfaults/s' mem %s.mem_page_faults stacked 20060 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION major '' absolute 1 %llu\n", RATES_DETAIL); + fprintf(stdout, "DIMENSION minor '' absolute 1 %llu\n", RATES_DETAIL); - if(show_guest_time) { - fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (100%% = 1 core)' 'percentage' cpu %s.cpu_guest stacked 20022 %d\n", type, title, type, update_every); - for (w = root; w; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU); - } - APPS_PLUGIN_FUNCTIONS(); - } + fprintf(stdout, "CHART %s.%s_vmem_usage '' '%s virtual memory size' 'MiB' mem %s.vmem_usage line 20065 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION vmem '' absolute %ld %ld\n", 1L, 1024L); #ifndef __FreeBSD__ - fprintf(stdout, "CHART %s.voluntary_ctxt_switches '' '%s Voluntary Context Switches' 'switches/s' cpu %s.voluntary_ctxt_switches stacked 20023 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.involuntary_ctxt_switches '' '%s Involuntary Context Switches' 'switches/s' cpu %s.involuntary_ctxt_switches stacked 20024 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); + fprintf(stdout, "CHART %s.%s_swap_usage '' '%s swap usage' 'MiB' mem %s.swap_usage area 20065 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION swap '' absolute %ld %ld\n", 1L, 1024L); #endif #ifndef __FreeBSD__ - fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MiB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); - } - APPS_PLUGIN_FUNCTIONS(); -#endif - - fprintf(stdout, "CHART %s.major_faults '' '%s Major Page Faults (swap read)' 'page faults/s' swap %s.major_faults stacked 20012 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.minor_faults '' '%s Minor Page Faults' 'page faults/s' mem %s.minor_faults stacked 20011 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); - -#ifdef __FreeBSD__ - // FIXME: same metric name as in Linux but different units. - fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'blocks/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'blocks/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); + fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'KiB/s' disk %s.disk_physical_io area 20100 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL); + fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL); + + fprintf(stdout, "CHART %s.%s_disk_logical_io '' '%s disk logical IO' 'KiB/s' disk %s.disk_logical_io area 20105 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL); + fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL); #else - fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'KiB/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'KiB/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'KiB/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'KiB/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } - APPS_PLUGIN_FUNCTIONS(); + fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'blocks/s' disk %s.disk_physical_block_io area 20100 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", RATES_DETAIL); + fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", RATES_DETAIL); #endif - if(enable_file_charts) { - fprintf(stdout, "CHART %s.fds_open_limit '' '%s Open File Descriptors Limit' '%%' fds %s.fds_open_limit line 20050 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 100\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_open '' '%s Open File Descriptors' 'fds' fds %s.fds_open stacked 20051 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_files '' '%s Open Files' 'fds' fds %s.fds_files stacked 20052 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_sockets '' '%s Open Sockets' 'fds' fds %s.fds_sockets stacked 20053 %d\n", - type, title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_pipes '' '%s Pipes' 'fds' fds %s.fds_pipes stacked 20054 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_inotifies '' '%s iNotify File Descriptors' 'fds' fds %s.fds_inotifies stacked 20055 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); + fprintf(stdout, "CHART %s.%s_processes '' '%s processes' 'processes' processes %s.processes line 20150 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION processes '' absolute 1 1\n"); + + fprintf(stdout, "CHART %s.%s_threads '' '%s threads' 'threads' processes %s.threads line 20155 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION threads '' absolute 1 1\n"); + + if (enable_file_charts) { + fprintf(stdout, "CHART %s.%s_fds_open_limit '' '%s open file descriptors limit' '%%' fds %s.fds_open_limit line 20200 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION limit '' absolute 1 100\n"); + + fprintf(stdout, "CHART %s.%s_fds_open '' '%s open files descriptors' 'fds' fds %s.fds_open stacked 20210 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION files '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION sockets '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION pipes '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION inotifies '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION event '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION timer '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION signal '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION eventpolls '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION other '' absolute 1 1\n"); + } - fprintf(stdout, "CHART %s.fds_eventfds '' '%s Event File Descriptors' 'fds' fds %s.fds_eventfds stacked 20056 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_timerfds '' '%s Timer File Descriptors' 'fds' fds %s.fds_timerfds stacked 20057 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_signalfds '' '%s Signal File Descriptors' 'fds' fds %s.fds_signalfds stacked 20058 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_eventpolls '' '%s Event Poll File Descriptors' 'fds' fds %s.fds_eventpolls stacked 20059 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); - - fprintf(stdout, "CHART %s.fds_other '' '%s Other File Descriptors' 'fds' fds %s.fds_other stacked 20060 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - APPS_PLUGIN_FUNCTIONS(); +#ifndef __FreeBSD__ + fprintf(stdout, "CHART %s.%s_uptime '' '%s uptime' 'seconds' uptime %s.uptime line 20250 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION uptime '' absolute 1 1\n"); + + if (enable_detailed_uptime_charts) { + fprintf(stdout, "CHART %s.%s_uptime_summary '' '%s uptime summary' 'seconds' uptime %s.uptime_summary area 20255 %d\n", type, w->clean_name, title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION min '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION avg '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION max '' absolute 1 1\n"); + } +#endif } } - #ifndef __FreeBSD__ static void send_proc_states_count(usec_t dt) { @@ -4310,7 +4026,7 @@ static void send_proc_states_count(usec_t dt) } // send process state count - send_BEGIN("system", "processes_state", dt); + fprintf(stdout, "BEGIN system.processes_state %" PRIu64 "\n", dt); for (proc_state i = PROC_STATUS_RUNNING; i < PROC_STATUS_END; i++) { send_SET(proc_states[i], proc_state_count[i]); } @@ -4575,7 +4291,7 @@ static int check_capabilities() { } #endif -static netdata_mutex_t mutex = NETDATA_MUTEX_INITIALIZER; +static netdata_mutex_t apps_and_stdout_mutex = NETDATA_MUTEX_INITIALIZER; #define PROCESS_FILTER_CATEGORY "category:" #define PROCESS_FILTER_USER "user:" @@ -4629,8 +4345,8 @@ static void get_MemTotal(void) { } static void apps_plugin_function_processes_help(const char *transaction) { - pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600); - fprintf(stdout, "%s", + BUFFER *wb = buffer_create(0, NULL); + buffer_sprintf(wb, "%s", "apps.plugin / processes\n" "\n" "Function `processes` presents all the currently running processes of the system.\n" @@ -4660,7 +4376,9 @@ static void apps_plugin_function_processes_help(const char *transaction) { "\n" "Filters can be combined. Each filter can be given only one time.\n" ); - pluginsd_function_result_end_to_stdout(); + + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + buffer_free(wb); } #define add_value_field_llu_with_max(wb, key, value) do { \ @@ -4675,7 +4393,7 @@ static void apps_plugin_function_processes_help(const char *transaction) { buffer_json_add_array_item_double(wb, _tmp); \ } while(0) -static void function_processes(const char *transaction, char *function __maybe_unused, char *line_buffer __maybe_unused, int line_max __maybe_unused, int timeout __maybe_unused) { +static void function_processes(const char *transaction, char *function __maybe_unused, int timeout __maybe_unused, bool *cancelled __maybe_unused) { struct pid_stat *p; char *words[PLUGINSD_MAX_WORDS] = { NULL }; @@ -4696,21 +4414,24 @@ static void function_processes(const char *transaction, char *function __maybe_u if(!category && strncmp(keyword, PROCESS_FILTER_CATEGORY, strlen(PROCESS_FILTER_CATEGORY)) == 0) { category = find_target_by_name(apps_groups_root_target, &keyword[strlen(PROCESS_FILTER_CATEGORY)]); if(!category) { - pluginsd_function_json_error(transaction, HTTP_RESP_BAD_REQUEST, "No category with that name found."); + pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, + "No category with that name found."); return; } } else if(!user && strncmp(keyword, PROCESS_FILTER_USER, strlen(PROCESS_FILTER_USER)) == 0) { user = find_target_by_name(users_root_target, &keyword[strlen(PROCESS_FILTER_USER)]); if(!user) { - pluginsd_function_json_error(transaction, HTTP_RESP_BAD_REQUEST, "No user with that name found."); + pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, + "No user with that name found."); return; } } else if(strncmp(keyword, PROCESS_FILTER_GROUP, strlen(PROCESS_FILTER_GROUP)) == 0) { group = find_target_by_name(groups_root_target, &keyword[strlen(PROCESS_FILTER_GROUP)]); if(!group) { - pluginsd_function_json_error(transaction, HTTP_RESP_BAD_REQUEST, "No group with that name found."); + pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, + "No group with that name found."); return; } } @@ -4736,13 +4457,12 @@ static void function_processes(const char *transaction, char *function __maybe_u else { char msg[PLUGINSD_LINE_MAX]; snprintfz(msg, PLUGINSD_LINE_MAX, "Invalid parameter '%s'", keyword); - pluginsd_function_json_error(transaction, HTTP_RESP_BAD_REQUEST, msg); + pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, msg); return; } } time_t expires = now_realtime_sec() + update_every; - pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires); unsigned int cpu_divisor = time_factor * RATES_DETAIL / 100; unsigned int memory_divisor = 1024; @@ -5096,13 +4816,13 @@ static void function_processes(const char *transaction, char *function __maybe_u RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "KiB/s", LReads_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "LWrites", "Logical I/O Writes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "KiB/s", LWrites_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); #endif // I/O calls @@ -5110,12 +4830,12 @@ static void function_processes(const char *transaction, char *function __maybe_u RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "calls/s", RCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "WCalls", "I/O Write Calls", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "calls/s", WCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); // minor page faults buffer_rrdf_table_add_field(wb, field_id++, "MinFlt", "Minor Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, @@ -5153,7 +4873,7 @@ static void function_processes(const char *transaction, char *function __maybe_u RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMajFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); // open file descriptors buffer_rrdf_table_add_field(wb, field_id++, "FDsLimitPercent", "Percentage of Open Descriptors vs Limits", @@ -5165,24 +4885,24 @@ static void function_processes(const char *transaction, char *function __maybe_u RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", FDs_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "Files", "Open Files", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", Files_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "Pipes", "Open Pipes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", Pipes_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "Sockets", "Open Sockets", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", Sockets_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "iNotiFDs", "Open iNotify Descriptors", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", iNotiFDs_max, RRDF_FIELD_SORT_DESCENDING, @@ -5219,12 +4939,12 @@ static void function_processes(const char *transaction, char *function __maybe_u RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "processes", Processes_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "Threads", "Threads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "threads", Threads_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "Uptime", "Uptime in seconds", RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_DURATION_S, 2, "seconds", Uptime_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_MAX, @@ -5520,87 +5240,19 @@ static void function_processes(const char *transaction, char *function __maybe_u buffer_json_member_add_time_t(wb, "expires", expires); buffer_json_finalize(wb); - fwrite(buffer_tostring(wb), buffer_strlen(wb), 1, stdout); - buffer_free(wb); + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires, wb); - pluginsd_function_result_end_to_stdout(); + buffer_free(wb); } static bool apps_plugin_exit = false; -static void *reader_main(void *arg __maybe_unused) { - char buffer[PLUGINSD_LINE_MAX + 1]; - - char *s = NULL; - while(!apps_plugin_exit && (s = fgets(buffer, PLUGINSD_LINE_MAX, stdin))) { - - char *words[PLUGINSD_MAX_WORDS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(buffer, words, PLUGINSD_MAX_WORDS); - - const char *keyword = get_word(words, num_words, 0); - - if(keyword && strcmp(keyword, PLUGINSD_KEYWORD_FUNCTION) == 0) { - char *transaction = get_word(words, num_words, 1); - char *timeout_s = get_word(words, num_words, 2); - char *function = get_word(words, num_words, 3); - - if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) { - netdata_log_error("Received incomplete %s (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", - keyword, - transaction?transaction:"(unset)", - timeout_s?timeout_s:"(unset)", - function?function:"(unset)"); - } - else { - int timeout = str2i(timeout_s); - if(timeout <= 0) timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; - -// internal_error(true, "Received function '%s', transaction '%s', timeout %d", function, transaction, timeout); - - netdata_mutex_lock(&mutex); - - if(strncmp(function, "processes", strlen("processes")) == 0) - function_processes(transaction, function, buffer, PLUGINSD_LINE_MAX + 1, timeout); - else - pluginsd_function_json_error(transaction, HTTP_RESP_NOT_FOUND, "No function with this name found in apps.plugin."); - - fflush(stdout); - netdata_mutex_unlock(&mutex); - -// internal_error(true, "Done with function '%s', transaction '%s', timeout %d", function, transaction, timeout); - } - } - else - netdata_log_error("Received unknown command: %s", keyword?keyword:"(unset)"); - } - - if(!s || feof(stdin) || ferror(stdin)) { - apps_plugin_exit = true; - netdata_log_error("Received error on stdin."); - } - - exit(1); - return NULL; -} - int main(int argc, char **argv) { - // debug_flags = D_PROCFILE; - stderror = stderr; - clocks_init(); + nd_log_initialize_for_external_plugins("apps.plugin"); pagesize = (size_t)sysconf(_SC_PAGESIZE); - // set the name for logging - program_name = "apps.plugin"; - - // disable syslog for apps.plugin - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; - bool send_resource_usage = true; { const char *s = getenv("NETDATA_INTERNALS_MONITORING"); @@ -5686,10 +5338,17 @@ int main(int argc, char **argv) { all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max + 1); - netdata_thread_t reader_thread; - netdata_thread_create(&reader_thread, "APPS_READER", NETDATA_THREAD_OPTION_DONT_LOG, reader_main, NULL); - netdata_mutex_lock(&mutex); + // ------------------------------------------------------------------------ + // the event loop for functions + + struct functions_evloop_globals *wg = + functions_evloop_init(1, "APPS", &apps_and_stdout_mutex, &apps_plugin_exit); + + functions_evloop_add_function(wg, "processes", function_processes, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT); + + // ------------------------------------------------------------------------ + netdata_mutex_lock(&apps_and_stdout_mutex); APPS_PLUGIN_GLOBAL_FUNCTIONS(); usec_t step = update_every * USEC_PER_SEC; @@ -5697,7 +5356,7 @@ int main(int argc, char **argv) { heartbeat_t hb; heartbeat_init(&hb); for(; !apps_plugin_exit ; global_iterations_counter++) { - netdata_mutex_unlock(&mutex); + netdata_mutex_unlock(&apps_and_stdout_mutex); #ifdef NETDATA_PROFILING #warning "compiling for profiling" @@ -5708,17 +5367,15 @@ int main(int argc, char **argv) { #else usec_t dt = heartbeat_next(&hb, step); #endif - netdata_mutex_lock(&mutex); + netdata_mutex_lock(&apps_and_stdout_mutex); struct pollfd pollfd = { .fd = fileno(stdout), .events = POLLERR }; if (unlikely(poll(&pollfd, 1, 0) < 0)) { - netdata_mutex_unlock(&mutex); - netdata_thread_cancel(reader_thread); + netdata_mutex_unlock(&apps_and_stdout_mutex); fatal("Cannot check if a pipe is available"); } if (unlikely(pollfd.revents & POLLERR)) { - netdata_mutex_unlock(&mutex); - netdata_thread_cancel(reader_thread); + netdata_mutex_unlock(&apps_and_stdout_mutex); fatal("Received error on read pipe."); } @@ -5728,8 +5385,7 @@ int main(int argc, char **argv) { if(!collect_data_for_all_processes()) { netdata_log_error("Cannot collect /proc data for running processes. Disabling apps.plugin..."); printf("DISABLE\n"); - netdata_mutex_unlock(&mutex); - netdata_thread_cancel(reader_thread); + netdata_mutex_unlock(&apps_and_stdout_mutex); exit(1); } @@ -5743,21 +5399,18 @@ int main(int argc, char **argv) { send_proc_states_count(dt); #endif - // this is smart enough to show only newly added apps, when needed - send_charts_updates_to_netdata(apps_groups_root_target, "apps", "Apps"); - if(likely(enable_users_charts)) - send_charts_updates_to_netdata(users_root_target, "users", "Users"); + send_charts_updates_to_netdata(apps_groups_root_target, "app", "app_group", "Apps"); + send_collected_data_to_netdata(apps_groups_root_target, "app", dt); - if(likely(enable_groups_charts)) - send_charts_updates_to_netdata(groups_root_target, "groups", "User Groups"); - - send_collected_data_to_netdata(apps_groups_root_target, "apps", dt); - - if(likely(enable_users_charts)) - send_collected_data_to_netdata(users_root_target, "users", dt); + if (enable_users_charts) { + send_charts_updates_to_netdata(users_root_target, "user", "user", "Users"); + send_collected_data_to_netdata(users_root_target, "user", dt); + } - if(likely(enable_groups_charts)) - send_collected_data_to_netdata(groups_root_target, "groups", dt); + if (enable_groups_charts) { + send_charts_updates_to_netdata(groups_root_target, "usergroup", "user_group", "User Groups"); + send_collected_data_to_netdata(groups_root_target, "usergroup", dt); + } fflush(stdout); @@ -5765,5 +5418,5 @@ int main(int argc, char **argv) { debug_log("done Loop No %zu", global_iterations_counter); } - netdata_mutex_unlock(&mutex); + netdata_mutex_unlock(&apps_and_stdout_mutex); } diff --git a/collectors/apps.plugin/integrations/applications.md b/collectors/apps.plugin/integrations/applications.md new file mode 100644 index 00000000000000..e5219fcc2e226e --- /dev/null +++ b/collectors/apps.plugin/integrations/applications.md @@ -0,0 +1,114 @@ + + +# Applications + + + + + +Plugin: apps.plugin +Module: apps + + + +## Overview + +Monitor Applications for optimal software performance and resource usage. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per applications group + +These metrics refer to the application group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.cpu_utilization | user, system | percentage | +| app.cpu_guest_utilization | guest | percentage | +| app.cpu_context_switches | voluntary, involuntary | switches/s | +| app.mem_usage | rss | MiB | +| app.mem_private_usage | mem | MiB | +| app.vmem_usage | vmem | MiB | +| app.mem_page_faults | minor, major | pgfaults/s | +| app.swap_usage | swap | MiB | +| app.disk_physical_io | reads, writes | KiB/s | +| app.disk_logical_io | reads, writes | KiB/s | +| app.processes | processes | processes | +| app.threads | threads | threads | +| app.fds_open_limit | limit | percentage | +| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds | +| app.uptime | uptime | seconds | +| app.uptime_summary | min, avg, max | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/apps.plugin/integrations/user_groups.md b/collectors/apps.plugin/integrations/user_groups.md new file mode 100644 index 00000000000000..4ccbfc95fb476c --- /dev/null +++ b/collectors/apps.plugin/integrations/user_groups.md @@ -0,0 +1,114 @@ + + +# User Groups + + + + + +Plugin: apps.plugin +Module: groups + + + +## Overview + +This integration monitors resource utilization on a user groups context. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per user group + +These metrics refer to the user group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| user_group | The name of the user group. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| usergroup.cpu_utilization | user, system | percentage | +| usergroup.cpu_guest_utilization | guest | percentage | +| usergroup.cpu_context_switches | voluntary, involuntary | switches/s | +| usergroup.mem_usage | rss | MiB | +| usergroup.mem_private_usage | mem | MiB | +| usergroup.vmem_usage | vmem | MiB | +| usergroup.mem_page_faults | minor, major | pgfaults/s | +| usergroup.swap_usage | swap | MiB | +| usergroup.disk_physical_io | reads, writes | KiB/s | +| usergroup.disk_logical_io | reads, writes | KiB/s | +| usergroup.processes | processes | processes | +| usergroup.threads | threads | threads | +| usergroup.fds_open_limit | limit | percentage | +| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds | +| usergroup.uptime | uptime | seconds | +| usergroup.uptime_summary | min, avg, max | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/apps.plugin/integrations/users.md b/collectors/apps.plugin/integrations/users.md new file mode 100644 index 00000000000000..c151fd8a2d45f9 --- /dev/null +++ b/collectors/apps.plugin/integrations/users.md @@ -0,0 +1,114 @@ + + +# Users + + + + + +Plugin: apps.plugin +Module: users + + + +## Overview + +This integration monitors resource utilization on a user context. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per user + +These metrics refer to the user. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| user | The name of the user. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| user.cpu_utilization | user, system | percentage | +| user.cpu_guest_utilization | guest | percentage | +| user.cpu_context_switches | voluntary, involuntary | switches/s | +| user.mem_usage | rss | MiB | +| user.mem_private_usage | mem | MiB | +| user.vmem_usage | vmem | MiB | +| user.mem_page_faults | minor, major | pgfaults/s | +| user.swap_usage | swap | MiB | +| user.disk_physical_io | reads, writes | KiB/s | +| user.disk_logical_io | reads, writes | KiB/s | +| user.processes | processes | processes | +| user.threads | threads | threads | +| user.fds_open_limit | limit | percentage | +| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds | +| user.uptime | uptime | seconds | +| user.uptime_summary | min, avg, max | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/apps.plugin/metadata.yaml b/collectors/apps.plugin/metadata.yaml index 9794a5ea2064e4..f5f22853a53c41 100644 --- a/collectors/apps.plugin/metadata.yaml +++ b/collectors/apps.plugin/metadata.yaml @@ -67,160 +67,123 @@ modules: description: "" availability: [] scopes: - - name: global - description: "" - labels: [] + - name: applications group + description: These metrics refer to the application group. + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.cpu - description: Apps CPU Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.cpu_user - description: Apps CPU User Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.cpu_system - description: Apps CPU System Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.cpu_guest - description: Apps CPU Guest Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.mem - description: Apps Real Memory (w/o shared) - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.rss - description: Apps Resident Set Size (w/shared) - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.vmem - description: Apps Virtual Memory Size - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.swap - description: Apps Swap Memory - unit: "MiB" + - name: app.cpu_utilization + description: Apps CPU utilization (100% = 1 core) + unit: percentage chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.major_faults - description: Apps Major Page Faults (swap read) - unit: "page faults/s" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.minor_faults - description: Apps Minor Page Faults (swap read) - unit: "page faults/s" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.preads - description: Apps Disk Reads - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.pwrites - description: Apps Disk Writes - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.lreads - description: Apps Disk Logical Reads - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.lwrites - description: Apps I/O Logical Writes - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.threads - description: Apps Threads - unit: "threads" - chart_type: stacked + - name: user + - name: system + - name: app.cpu_guest_utilization + description: Apps CPU guest utilization (100% = 1 core) + unit: percentage + chart_type: line dimensions: - - name: a dimension per app group - - name: apps.processes - description: Apps Processes - unit: "processes" + - name: guest + - name: app.cpu_context_switches + description: Apps CPU context switches + unit: switches/s chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.voluntary_ctxt_switches - description: Apps Voluntary Context Switches - unit: "processes" - chart_type: stacked + - name: voluntary + - name: involuntary + - name: app.mem_usage + description: Apps memory RSS usage + unit: MiB + chart_type: line dimensions: - - name: a dimension per app group - - name: apps.involuntary_ctxt_switches - description: Apps Involuntary Context Switches - unit: "processes" + - name: rss + - name: app.mem_private_usage + description: Apps memory usage without shared + unit: MiB chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.uptime - description: Apps Carried Over Uptime - unit: "seconds" + - name: mem + - name: app.vmem_usage + description: Apps virtual memory size + unit: MiB chart_type: line dimensions: - - name: a dimension per app group - - name: apps.uptime_min - description: Apps Minimum Uptime - unit: "seconds" + - name: vmem + - name: app.mem_page_faults + description: Apps memory page faults + unit: pgfaults/s + chart_type: stacked + dimensions: + - name: minor + - name: major + - name: app.swap_usage + description: Apps swap usage + unit: MiB + chart_type: area + dimensions: + - name: swap + - name: app.disk_physical_io + description: Apps disk physical IO + unit: KiB/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: app.disk_logical_io + description: Apps disk logical IO + unit: KiB/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: app.processes + description: Apps processes + unit: processes chart_type: line dimensions: - - name: a dimension per app group - - name: apps.uptime_avg - description: Apps Average Uptime - unit: "seconds" + - name: processes + - name: app.threads + description: Apps threads + unit: threads chart_type: line dimensions: - - name: a dimension per app group - - name: apps.uptime_max - description: Apps Maximum Uptime - unit: "seconds" + - name: threads + - name: app.fds_open_limit + description: Apps open file descriptors limit + unit: percentage chart_type: line dimensions: - - name: a dimension per app group - - name: apps.files - description: Apps Open Files - unit: "open files" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: apps.sockets - description: Apps Open Sockets - unit: "open sockets" - chart_type: stacked + - name: limit + - name: app.fds_open + description: Apps open file descriptors + unit: fds + chart_type: stacked + dimensions: + - name: files + - name: sockets + - name: pipes + - name: inotifies + - name: event + - name: timer + - name: signal + - name: eventpolls + - name: other + - name: app.uptime + description: Apps uptime + unit: seconds + chart_type: line dimensions: - - name: a dimension per app group - - name: apps.pipes - description: Apps Open Pipes - unit: "open pipes" - chart_type: stacked + - name: uptime + - name: app.uptime_summary + description: Apps uptime summary + unit: seconds + chart_type: area dimensions: - - name: a dimension per app group + - name: min + - name: avg + - name: max - meta: plugin_name: apps.plugin module_name: groups @@ -289,160 +252,123 @@ modules: description: "" availability: [] scopes: - - name: global - description: "" - labels: [] + - name: user group + description: These metrics refer to the user group. + labels: + - name: user_group + description: The name of the user group. metrics: - - name: groups.cpu - description: User Groups CPU Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.cpu_user - description: User Groups CPU User Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.cpu_system - description: User Groups CPU System Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.cpu_guest - description: User Groups CPU Guest Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.mem - description: User Groups Real Memory (w/o shared) - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.rss - description: User Groups Resident Set Size (w/shared) - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.vmem - description: User Groups Virtual Memory Size - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.swap - description: User Groups Swap Memory - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.major_faults - description: User Groups Major Page Faults (swap read) - unit: "page faults/s" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.minor_faults - description: User Groups Page Faults (swap read) - unit: "page faults/s" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.preads - description: User Groups Disk Reads - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.pwrites - description: User Groups Disk Writes - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.lreads - description: User Groups Disk Logical Reads - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.lwrites - description: User Groups I/O Logical Writes - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.threads - description: User Groups Threads - unit: "threads" + - name: usergroup.cpu_utilization + description: User Groups CPU utilization (100% = 1 core) + unit: percentage chart_type: stacked dimensions: - - name: a dimension per user group - - name: groups.processes - description: User Groups Processes - unit: "processes" - chart_type: stacked - dimensions: - - name: a dimension per user group - - name: groups.voluntary_ctxt_switches - description: User Groups Voluntary Context Switches - unit: "processes" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: groups.involuntary_ctxt_switches - description: User Groups Involuntary Context Switches - unit: "processes" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: groups.uptime - description: User Groups Carried Over Uptime - unit: "seconds" + - name: user + - name: system + - name: usergroup.cpu_guest_utilization + description: User Groups CPU guest utilization (100% = 1 core) + unit: percentage chart_type: line dimensions: - - name: a dimension per user group - - name: groups.uptime_min - description: User Groups Minimum Uptime - unit: "seconds" + - name: guest + - name: usergroup.cpu_context_switches + description: User Groups CPU context switches + unit: switches/s + chart_type: stacked + dimensions: + - name: voluntary + - name: involuntary + - name: usergroup.mem_usage + description: User Groups memory RSS usage + unit: MiB + chart_type: area + dimensions: + - name: rss + - name: usergroup.mem_private_usage + description: User Groups memory usage without shared + unit: MiB + chart_type: area + dimensions: + - name: mem + - name: usergroup.vmem_usage + description: User Groups virtual memory size + unit: MiB chart_type: line dimensions: - - name: a dimension per user group - - name: groups.uptime_avg - description: User Groups Average Uptime - unit: "seconds" + - name: vmem + - name: usergroup.mem_page_faults + description: User Groups memory page faults + unit: pgfaults/s + chart_type: stacked + dimensions: + - name: minor + - name: major + - name: usergroup.swap_usage + description: User Groups swap usage + unit: MiB + chart_type: area + dimensions: + - name: swap + - name: usergroup.disk_physical_io + description: User Groups disk physical IO + unit: KiB/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: usergroup.disk_logical_io + description: User Groups disk logical IO + unit: KiB/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: usergroup.processes + description: User Groups processes + unit: processes chart_type: line dimensions: - - name: a dimension per user group - - name: groups.uptime_max - description: User Groups Maximum Uptime - unit: "seconds" + - name: processes + - name: usergroup.threads + description: User Groups threads + unit: threads chart_type: line dimensions: - - name: a dimension per user group - - name: groups.files - description: User Groups Open Files - unit: "open files" - chart_type: stacked + - name: threads + - name: usergroup.fds_open_limit + description: User Groups open file descriptors limit + unit: percentage + chart_type: line dimensions: - - name: a dimension per user group - - name: groups.sockets - description: User Groups Open Sockets - unit: "open sockets" - chart_type: stacked + - name: limit + - name: usergroup.fds_open + description: User Groups open file descriptors + unit: fds + chart_type: stacked + dimensions: + - name: files + - name: sockets + - name: pipes + - name: inotifies + - name: event + - name: timer + - name: signal + - name: eventpolls + - name: other + - name: usergroup.uptime + description: User Groups uptime + unit: seconds + chart_type: line dimensions: - - name: a dimension per user group - - name: groups.pipes - description: User Groups Open Pipes - unit: "open pipes" - chart_type: stacked + - name: uptime + - name: usergroup.uptime_summary + description: User Groups uptime summary + unit: seconds + chart_type: area dimensions: - - name: a dimension per user group + - name: min + - name: avg + - name: max - meta: plugin_name: apps.plugin module_name: users @@ -509,157 +435,120 @@ modules: description: "" availability: [] scopes: - - name: global - description: "" - labels: [] + - name: user + description: These metrics refer to the user. + labels: + - name: user + description: The name of the user. metrics: - - name: users.cpu - description: Users CPU Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.cpu_user - description: Users CPU User Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.cpu_system - description: Users CPU System Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.cpu_guest - description: Users CPU Guest Time (100% = 1 core) - unit: "percentage" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.mem - description: Users Real Memory (w/o shared) - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.rss - description: Users Resident Set Size (w/shared) - unit: "MiB" + - name: user.cpu_utilization + description: User CPU utilization (100% = 1 core) + unit: percentage chart_type: stacked dimensions: - - name: a dimension per user - - name: users.vmem - description: Users Virtual Memory Size - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.swap - description: Users Swap Memory - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.major_faults - description: Users Major Page Faults (swap read) - unit: "page faults/s" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.minor_faults - description: Users Page Faults (swap read) - unit: "page faults/s" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.preads - description: Users Disk Reads - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.pwrites - description: Users Disk Writes - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.lreads - description: Users Disk Logical Reads - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.lwrites - description: Users I/O Logical Writes - unit: "KiB/s" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.threads - description: Users Threads - unit: "threads" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.processes - description: Users Processes - unit: "processes" - chart_type: stacked - dimensions: - - name: a dimension per user - - name: users.voluntary_ctxt_switches - description: Users Voluntary Context Switches - unit: "processes" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: users.involuntary_ctxt_switches - description: Users Involuntary Context Switches - unit: "processes" - chart_type: stacked - dimensions: - - name: a dimension per app group - - name: users.uptime - description: Users Carried Over Uptime - unit: "seconds" + - name: user + - name: system + - name: user.cpu_guest_utilization + description: User CPU guest utilization (100% = 1 core) + unit: percentage chart_type: line dimensions: - - name: a dimension per user - - name: users.uptime_min - description: Users Minimum Uptime - unit: "seconds" + - name: guest + - name: user.cpu_context_switches + description: User CPU context switches + unit: switches/s + chart_type: stacked + dimensions: + - name: voluntary + - name: involuntary + - name: user.mem_usage + description: User memory RSS usage + unit: MiB + chart_type: area + dimensions: + - name: rss + - name: user.mem_private_usage + description: User memory usage without shared + unit: MiB + chart_type: area + dimensions: + - name: mem + - name: user.vmem_usage + description: User virtual memory size + unit: MiB chart_type: line dimensions: - - name: a dimension per user - - name: users.uptime_avg - description: Users Average Uptime - unit: "seconds" + - name: vmem + - name: user.mem_page_faults + description: User memory page faults + unit: pgfaults/s + chart_type: stacked + dimensions: + - name: minor + - name: major + - name: user.swap_usage + description: User swap usage + unit: MiB + chart_type: area + dimensions: + - name: swap + - name: user.disk_physical_io + description: User disk physical IO + unit: KiB/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: user.disk_logical_io + description: User disk logical IO + unit: KiB/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: user.processes + description: User processes + unit: processes chart_type: line dimensions: - - name: a dimension per user - - name: users.uptime_max - description: Users Maximum Uptime - unit: "seconds" + - name: processes + - name: user.threads + description: User threads + unit: threads chart_type: line dimensions: - - name: a dimension per user - - name: users.files - description: Users Open Files - unit: "open files" - chart_type: stacked + - name: threads + - name: user.fds_open_limit + description: User open file descriptors limit + unit: percentage + chart_type: line dimensions: - - name: a dimension per user - - name: users.sockets - description: Users Open Sockets - unit: "open sockets" - chart_type: stacked + - name: limit + - name: user.fds_open + description: User open file descriptors + unit: fds + chart_type: stacked + dimensions: + - name: files + - name: sockets + - name: pipes + - name: inotifies + - name: event + - name: timer + - name: signal + - name: eventpolls + - name: other + - name: user.uptime + description: User uptime + unit: seconds + chart_type: line dimensions: - - name: a dimension per user - - name: users.pipes - description: Users Open Pipes - unit: "open pipes" - chart_type: stacked + - name: uptime + - name: user.uptime_summary + description: User uptime summary + unit: seconds + chart_type: area dimensions: - - name: a dimension per user + - name: min + - name: avg + - name: max diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am index 354b9fbdc10644..0f6062420b2d1d 100644 --- a/collectors/cgroups.plugin/Makefile.am +++ b/collectors/cgroups.plugin/Makefile.am @@ -3,11 +3,21 @@ AUTOMAKE_OPTIONS = subdir-objects MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + cgroup-name.sh \ + cgroup-network-helper.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + dist_plugins_SCRIPTS = \ cgroup-name.sh \ cgroup-network-helper.sh \ $(NULL) dist_noinst_DATA = \ + cgroup-name.sh.in \ + cgroup-network-helper.sh.in \ README.md \ $(NULL) diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md index 2e4fff2303da78..ba6a20e5e05c2a 100644 --- a/collectors/cgroups.plugin/README.md +++ b/collectors/cgroups.plugin/README.md @@ -139,10 +139,10 @@ chart instead of `auto` to enable it permanently. For example: You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. -### Alarms +### Alerts -CPU and memory limits are watched and used to rise alarms. Memory usage for every cgroup is checked against `ram` -and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alarms is available in `health.d/cgroups.conf` +CPU and memory limits are watched and used to rise alerts. Memory usage for every cgroup is checked against `ram` +and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alerts is available in `health.d/cgroups.conf` file. ## Monitoring systemd services @@ -264,7 +264,7 @@ Network interfaces and cgroups (containers) are self-cleaned. When a network int a few errors in error.log complaining about files it cannot find, but immediately: 1. It will detect this is a removed container or network interface -2. It will freeze/pause all alarms for them +2. It will freeze/pause all alerts for them 3. It will mark their charts as obsolete 4. Obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone) 5. Existing dashboard sessions will continue to see them, but of course they will not refresh diff --git a/collectors/cgroups.plugin/cgroup-charts.c b/collectors/cgroups.plugin/cgroup-charts.c new file mode 100644 index 00000000000000..a89e8ac4545d3d --- /dev/null +++ b/collectors/cgroups.plugin/cgroup-charts.c @@ -0,0 +1,1526 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "cgroup-internals.h" + +void update_cpu_utilization_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_cpu; + + if (unlikely(!cg->st_cpu)) { + char *title; + char *context; + int prio; + + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services CPU utilization (100%% = 1 core)"; + context = "systemd.service.cpu.utilization"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD; + } else { + title = k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU)" : "CPU Usage (100%% = 1 core)"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu" : "cgroup.cpu"; + prio = cgroup_containers_chart_priority; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_cpu = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu", + NULL, + "cpu", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_STACKED); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + + if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + cg->st_cpu_rd_user = rrddim_add(chart, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); + cg->st_cpu_rd_system = rrddim_add(chart, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); + } else { + cg->st_cpu_rd_user = rrddim_add(chart, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL); + cg->st_cpu_rd_system = rrddim_add(chart, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL); + } + } + + rrddim_set_by_pointer(chart, cg->st_cpu_rd_user, (collected_number)cg->cpuacct_stat.user); + rrddim_set_by_pointer(chart, cg->st_cpu_rd_system, (collected_number)cg->cpuacct_stat.system); + rrdset_done(chart); +} + +void update_cpu_utilization_limit_chart(struct cgroup *cg, NETDATA_DOUBLE cpu_limit) { + if (is_cgroup_systemd_service(cg)) + return; + + RRDSET *chart = cg->st_cpu_limit; + + if (unlikely(!cg->st_cpu_limit)) { + char *title = "CPU Usage within the limits"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_limit" : "cgroup.cpu_limit"; + int prio = cgroup_containers_chart_priority - 1; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_cpu_limit = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu_limit", + NULL, + "cpu", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + + if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) + rrddim_add(chart, "used", NULL, 1, system_hz, RRD_ALGORITHM_ABSOLUTE); + else + rrddim_add(chart, "used", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); + cg->prev_cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100; + } + + NETDATA_DOUBLE cpu_usage = 0; + cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100; + NETDATA_DOUBLE cpu_used = 100 * (cpu_usage - cg->prev_cpu_usage) / (cpu_limit * cgroup_update_every); + + rrdset_isnot_obsolete___safe_from_collector_thread(chart); + + rrddim_set(chart, "used", (cpu_used > 0) ? (collected_number)cpu_used : 0); + + cg->prev_cpu_usage = cpu_usage; + + rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, cpu_limit); + rrdset_done(chart); +} + +void update_cpu_throttled_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + RRDSET *chart = cg->st_cpu_nr_throttled; + + if (unlikely(!cg->st_cpu_nr_throttled)) { + char *title = "CPU Throttled Runnable Periods"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttled" : "cgroup.throttled"; + int prio = cgroup_containers_chart_priority + 10; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_cpu_nr_throttled = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "throttled", + NULL, + "cpu", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "throttled", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set(chart, "throttled", (collected_number)cg->cpuacct_cpu_throttling.nr_throttled_perc); + rrdset_done(chart); +} + +void update_cpu_throttled_duration_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + RRDSET *chart = cg->st_cpu_throttled_time; + + if (unlikely(!cg->st_cpu_throttled_time)) { + char *title = "CPU Throttled Time Duration"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttled_duration" : "cgroup.throttled_duration"; + int prio = cgroup_containers_chart_priority + 15; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_cpu_throttled_time = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "throttled_duration", + NULL, + "cpu", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "duration", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set(chart, "duration", (collected_number)cg->cpuacct_cpu_throttling.throttled_time); + rrdset_done(chart); +} + +void update_cpu_shares_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + RRDSET *chart = cg->st_cpu_shares; + + if (unlikely(!cg->st_cpu_shares)) { + char *title = "CPU Time Relative Share"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_shares" : "cgroup.cpu_shares"; + int prio = cgroup_containers_chart_priority + 20; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_cpu_shares = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu_shares", + NULL, + "cpu", + context, + title, + "shares", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "shares", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set(chart, "shares", (collected_number)cg->cpuacct_cpu_shares.shares); + rrdset_done(chart); +} + +void update_cpu_per_core_usage_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + char id[RRD_ID_LENGTH_MAX + 1]; + unsigned int i; + + if (unlikely(!cg->st_cpu_per_core)) { + char *title = k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU) Per Core" : "CPU Usage (100%% = 1 core) Per Core"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_per_core" : "cgroup.cpu_per_core"; + int prio = cgroup_containers_chart_priority + 100; + + char buff[RRD_ID_LENGTH_MAX + 1]; + cg->st_cpu_per_core = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu_per_core", + NULL, + "cpu", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_STACKED); + + rrdset_update_rrdlabels(cg->st_cpu_per_core, cg->chart_labels); + + for (i = 0; i < cg->cpuacct_usage.cpus; i++) { + snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); + rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL); + } + } + + for (i = 0; i < cg->cpuacct_usage.cpus; i++) { + snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); + rrddim_set(cg->st_cpu_per_core, id, (collected_number)cg->cpuacct_usage.cpu_percpu[i]); + } + rrdset_done(cg->st_cpu_per_core); +} + +void update_mem_usage_detailed_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_mem; + + if (unlikely(!cg->st_mem)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Memory"; + context = "systemd.service.memory.ram.usage"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 15; + } else { + title = "Memory Usage"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem" : "cgroup.mem"; + prio = cgroup_containers_chart_priority + 220; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + + chart = cg->st_mem = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem", + NULL, + "mem", + context, + title, + "MiB", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_STACKED); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + + if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + rrddim_add(chart, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + if (cg->memory.detailed_has_swap) + rrddim_add(chart, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_add(chart, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } else { + rrddim_add(chart, "anon", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "kernel_stack", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "slab", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "sock", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "anon_thp", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + } + + if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + rrddim_set(chart, "cache", (collected_number)cg->memory.total_cache); + collected_number rss = (collected_number)(cg->memory.total_rss - cg->memory.total_rss_huge); + if (rss < 0) + rss = 0; + rrddim_set(chart, "rss", rss); + if (cg->memory.detailed_has_swap) + rrddim_set(chart, "swap", (collected_number)cg->memory.total_swap); + rrddim_set(chart, "rss_huge", (collected_number)cg->memory.total_rss_huge); + rrddim_set(chart, "mapped_file", (collected_number)cg->memory.total_mapped_file); + } else { + rrddim_set(chart, "anon", (collected_number)cg->memory.anon); + rrddim_set(chart, "kernel_stack", (collected_number)cg->memory.kernel_stack); + rrddim_set(chart, "slab", (collected_number)cg->memory.slab); + rrddim_set(chart, "sock", (collected_number)cg->memory.sock); + rrddim_set(chart, "anon_thp", (collected_number)cg->memory.anon_thp); + rrddim_set(chart, "file", (collected_number)cg->memory.total_mapped_file); + } + rrdset_done(chart); +} + +void update_mem_writeback_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_writeback; + + if (unlikely(!cg->st_writeback)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Writeback Memory"; + context = "systemd.service.memory.writeback"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20; + } else { + title = "Writeback Memory"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.writeback" : "cgroup.writeback"; + prio = cgroup_containers_chart_priority + 300; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_writeback = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "writeback", + NULL, + "mem", + context, + title, + "MiB", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_AREA); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + if (cg->memory.detailed_has_dirty) + rrddim_add(chart, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + + if (cg->memory.detailed_has_dirty) + rrddim_set(chart, "dirty", (collected_number)cg->memory.total_dirty); + rrddim_set(chart, "writeback", (collected_number)cg->memory.total_writeback); + rrdset_done(chart); +} + +void update_mem_activity_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_mem_activity; + + if (unlikely(!cg->st_mem_activity)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Memory Paging IO"; + context = "systemd.service.memory.paging.io"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30; + } else { + title = "Memory Activity"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_activity" : "cgroup.mem_activity"; + prio = cgroup_containers_chart_priority + 400; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_mem_activity = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem_activity", + NULL, + "mem", + context, + title, + "MiB/s", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + // FIXME: systemd just in, out + rrddim_add(chart, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(chart, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set(chart, "pgpgin", (collected_number)cg->memory.total_pgpgin); + rrddim_set(chart, "pgpgout", (collected_number)cg->memory.total_pgpgout); + rrdset_done(chart); +} + +void update_mem_pgfaults_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_pgfaults; + + if (unlikely(!cg->st_pgfaults)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Memory Page Faults"; + context = "systemd.service.memory.paging.faults"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 25; + } else { + title = "Memory Page Faults"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.pgfaults" : "cgroup.pgfaults"; + prio = cgroup_containers_chart_priority + 500; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_pgfaults = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "pgfaults", + NULL, + "mem", + context, + title, + "MiB/s", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(chart, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set(chart, "pgfault", (collected_number)cg->memory.total_pgfault); + rrddim_set(chart, "pgmajfault", (collected_number)cg->memory.total_pgmajfault); + rrdset_done(chart); +} + +void update_mem_usage_limit_chart(struct cgroup *cg, unsigned long long memory_limit) { + if (is_cgroup_systemd_service(cg)) + return; + + RRDSET *chart = cg->st_mem_usage_limit; + + if (unlikely(!cg->st_mem_usage_limit)) { + char *title = "Used RAM within the limits"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage_limit" : "cgroup.mem_usage_limit"; + int prio = cgroup_containers_chart_priority + 200; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_mem_usage_limit = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem_usage_limit", + NULL, + "mem", + context, + title, + "MiB", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_STACKED); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + + rrddim_add(chart, "available", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + + rrdset_isnot_obsolete___safe_from_collector_thread(chart); + + rrddim_set(chart, "available", (collected_number)(memory_limit - cg->memory.usage_in_bytes)); + rrddim_set(chart, "used", (collected_number)cg->memory.usage_in_bytes); + rrdset_done(chart); +} + +void update_mem_utilization_chart(struct cgroup *cg, unsigned long long memory_limit) { + if (is_cgroup_systemd_service(cg)) + return; + + RRDSET *chart = cg->st_mem_utilization; + + if (unlikely(!cg->st_mem_utilization)) { + char *title = "Memory Utilization"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_utilization" : "cgroup.mem_utilization"; + int prio = cgroup_containers_chart_priority + 199; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_mem_utilization = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem_utilization", + NULL, + "mem", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_AREA); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + + rrddim_add(chart, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrdset_isnot_obsolete___safe_from_collector_thread(chart); + collected_number util = (collected_number)(cg->memory.usage_in_bytes * 100 / memory_limit); + rrddim_set(chart, "utilization", util); + rrdset_done(chart); +} + +void update_mem_failcnt_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_mem_failcnt; + + if (unlikely(!cg->st_mem_failcnt)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Memory Limit Failures"; + context = "systemd.service.memory.failcnt"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10; + } else { + title = "Memory Limit Failures"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_failcnt" : "cgroup.mem_failcnt"; + prio = cgroup_containers_chart_priority + 250; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_mem_failcnt = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem_failcnt", + NULL, + "mem", + context, + title, + "count", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set(chart, "failures", (collected_number)cg->memory.failcnt); + rrdset_done(chart); +} + +void update_mem_usage_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_mem_usage; + + if (unlikely(!cg->st_mem_usage)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Used Memory"; + context = "systemd.service.memory.usage"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 5; + } else { + title = "Used Memory"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage" : "cgroup.mem_usage"; + prio = cgroup_containers_chart_priority + 210; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_mem_usage = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem_usage", + NULL, + "mem", + context, + title, + "MiB", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_STACKED); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + + cg->st_mem_rd_ram = rrddim_add(chart, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + cg->st_mem_rd_swap = rrddim_add(chart, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, cg->st_mem_rd_ram, (collected_number)cg->memory.usage_in_bytes); + + if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + rrddim_set_by_pointer( + chart, + cg->st_mem_rd_swap, + cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ? + (collected_number)(cg->memory.msw_usage_in_bytes - + (cg->memory.usage_in_bytes + cg->memory.total_inactive_file)) : + 0); + } else { + rrddim_set_by_pointer(chart, cg->st_mem_rd_swap, (collected_number)cg->memory.msw_usage_in_bytes); + } + + rrdset_done(chart); +} + +void update_io_serviced_bytes_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_io; + + if (unlikely(!cg->st_io)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Disk Read/Write Bandwidth"; + context = "systemd.service.disk.io"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 35; + } else { + title = "I/O Bandwidth (all disks)"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.io" : "cgroup.io"; + prio = cgroup_containers_chart_priority + 1200; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_io = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "io", + NULL, + "disk", + context, + title, + "KiB/s", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_AREA); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + cg->st_io_rd_read = rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + cg->st_io_rd_written = rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, cg->st_io_rd_read, (collected_number)cg->io_service_bytes.Read); + rrddim_set_by_pointer(chart, cg->st_io_rd_written, (collected_number)cg->io_service_bytes.Write); + rrdset_done(chart); +} + +void update_io_serviced_ops_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_serviced_ops; + + if (unlikely(!cg->st_serviced_ops)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Disk Read/Write Operations"; + context = "systemd.service.disk.iops"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40; + } else { + title = "Serviced I/O Operations (all disks)"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.serviced_ops" : "cgroup.serviced_ops"; + prio = cgroup_containers_chart_priority + 1200; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_serviced_ops = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "serviced_ops", + NULL, + "disk", + context, + title, + "operations/s", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set(chart, "read", (collected_number)cg->io_serviced.Read); + rrddim_set(chart, "write", (collected_number)cg->io_serviced.Write); + rrdset_done(chart); +} + +void update_throttle_io_serviced_bytes_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_throttle_io; + + if (unlikely(!cg->st_throttle_io)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Throttle Disk Read/Write Bandwidth"; + context = "systemd.service.disk.throttle.io"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 45; + } else { + title = "Throttle I/O Bandwidth (all disks)"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_io" : "cgroup.throttle_io"; + prio = cgroup_containers_chart_priority + 1200; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_throttle_io = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "throttle_io", + NULL, + "disk", + context, + title, + "KiB/s", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_AREA); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + + cg->st_throttle_io_rd_read = rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + cg->st_throttle_io_rd_written = rrddim_add(chart, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, cg->st_throttle_io_rd_read, (collected_number)cg->throttle_io_service_bytes.Read); + rrddim_set_by_pointer(chart, cg->st_throttle_io_rd_written, (collected_number)cg->throttle_io_service_bytes.Write); + rrdset_done(chart); +} + +void update_throttle_io_serviced_ops_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_throttle_serviced_ops; + + if (unlikely(!cg->st_throttle_serviced_ops)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Throttle Disk Read/Write Operations"; + context = "systemd.service.disk.throttle.iops"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50; + } else { + title = "Throttle Serviced I/O Operations (all disks)"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_serviced_ops" : "cgroup.throttle_serviced_ops"; + prio = cgroup_containers_chart_priority + 1200; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_throttle_serviced_ops = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "throttle_serviced_ops", + NULL, + "disk", + context, + title, + "operations/s", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set(chart, "read", (collected_number)cg->throttle_io_serviced.Read); + rrddim_set(chart, "write", (collected_number)cg->throttle_io_serviced.Write); + rrdset_done(chart); +} + +void update_io_queued_ops_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_queued_ops; + + if (unlikely(!cg->st_queued_ops)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Queued Disk Read/Write Operations"; + context = "systemd.service.disk.queued_iops"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 55; + } else { + title = "Queued I/O Operations (all disks)"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.queued_ops" : "cgroup.queued_ops"; + prio = cgroup_containers_chart_priority + 2000; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_queued_ops = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "queued_ops", + NULL, + "disk", + context, + title, + "operations", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set(chart, "read", (collected_number)cg->io_queued.Read); + rrddim_set(chart, "write", (collected_number)cg->io_queued.Write); + rrdset_done(chart); +} + +void update_io_merged_ops_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_merged_ops; + + if (unlikely(!cg->st_merged_ops)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Merged Disk Read/Write Operations"; + context = "systemd.service.disk.merged_iops"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60; + } else { + title = "Merged I/O Operations (all disks)"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.merged_ops" : "cgroup.merged_ops"; + prio = cgroup_containers_chart_priority + 2100; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_merged_ops = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "merged_ops", + NULL, + "disk", + context, + title, + "operations/s", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(chart, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set(chart, "read", (collected_number)cg->io_merged.Read); + rrddim_set(chart, "write", (collected_number)cg->io_merged.Write); + rrdset_done(chart); +} + +void update_cpu_some_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->cpu_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "CPU some pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure" : "cgroup.cpu_some_pressure"; + int prio = cgroup_containers_chart_priority + 2200; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu_some_pressure", + NULL, + "cpu", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_cpu_some_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->cpu_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "CPU some pressure stall time"; + char *context = + k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure_stall_time" : "cgroup.cpu_some_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2220; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu_some_pressure_stall_time", + NULL, + "cpu", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_cpu_full_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->cpu_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "CPU full pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure" : "cgroup.cpu_full_pressure"; + int prio = cgroup_containers_chart_priority + 2240; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu_full_pressure", + NULL, + "cpu", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_cpu_full_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->cpu_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "CPU full pressure stall time"; + char *context = + k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure_stall_time" : "cgroup.cpu_full_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2260; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "cpu_full_pressure_stall_time", + NULL, + "cpu", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_mem_some_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->memory_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "Memory some pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure" : "cgroup.memory_some_pressure"; + int prio = cgroup_containers_chart_priority + 2300; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem_some_pressure", + NULL, + "mem", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_mem_some_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->memory_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "Memory some pressure stall time"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure_stall_time" : + "cgroup.memory_some_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2320; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "memory_some_pressure_stall_time", + NULL, + "mem", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_mem_full_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->memory_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "Memory full pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure" : "cgroup.memory_full_pressure"; + int prio = cgroup_containers_chart_priority + 2340; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "mem_full_pressure", + NULL, + "mem", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_mem_full_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->memory_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "Memory full pressure stall time"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure_stall_time" : + "cgroup.memory_full_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2360; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "memory_full_pressure_stall_time", + NULL, + "mem", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_irq_some_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->irq_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "IRQ some pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure" : "cgroup.irq_some_pressure"; + int prio = cgroup_containers_chart_priority + 2310; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "irq_some_pressure", + NULL, + "interrupts", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_irq_some_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->irq_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "IRQ some pressure stall time"; + char *context = + k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure_stall_time" : "cgroup.irq_some_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2330; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "irq_some_pressure_stall_time", + NULL, + "interrupts", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_irq_full_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->irq_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "IRQ full pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure" : "cgroup.irq_full_pressure"; + int prio = cgroup_containers_chart_priority + 2350; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "irq_full_pressure", + NULL, + "interrupts", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_irq_full_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->irq_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "IRQ full pressure stall time"; + char *context = + k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure_stall_time" : "cgroup.irq_full_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2370; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "irq_full_pressure_stall_time", + NULL, + "interrupts", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_io_some_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->io_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "I/O some pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure" : "cgroup.io_some_pressure"; + int prio = cgroup_containers_chart_priority + 2400; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "io_some_pressure", + NULL, + "disk", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_io_some_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->io_pressure; + struct pressure_charts *pcs = &res->some; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "I/O some pressure stall time"; + char *context = + k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure_stall_time" : "cgroup.io_some_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2420; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "io_some_pressure_stall_time", + NULL, + "disk", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_io_full_pressure_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->io_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->share_time.st; + + if (unlikely(!pcs->share_time.st)) { + char *title = "I/O full pressure"; + char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure" : "cgroup.io_full_pressure"; + int prio = cgroup_containers_chart_priority + 2440; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->share_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "io_full_pressure", + NULL, + "disk", + context, + title, + "percentage", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100)); + rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100)); + rrdset_done(chart); +} + +void update_io_full_pressure_stall_time_chart(struct cgroup *cg) { + if (is_cgroup_systemd_service(cg)) + return; + + struct pressure *res = &cg->io_pressure; + struct pressure_charts *pcs = &res->full; + RRDSET *chart = pcs->total_time.st; + + if (unlikely(!pcs->total_time.st)) { + char *title = "I/O full pressure stall time"; + char *context = + k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure_stall_time" : "cgroup.io_full_pressure_stall_time"; + int prio = cgroup_containers_chart_priority + 2460; + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = pcs->total_time.st = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "io_full_pressure_stall_time", + NULL, + "disk", + context, + title, + "ms", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total)); + rrdset_done(chart); +} + +void update_pids_current_chart(struct cgroup *cg) { + RRDSET *chart = cg->st_pids; + + if (unlikely(!cg->st_pids)) { + char *title; + char *context; + int prio; + if (is_cgroup_systemd_service(cg)) { + title = "Systemd Services Number of Processes"; + context = "systemd.service.pids.current"; + prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70; + } else { + title = "Number of processes"; + context = k8s_is_kubepod(cg) ? "k8s.cgroup.pids_current" : "cgroup.pids_current"; + prio = cgroup_containers_chart_priority + 2150; + } + + char buff[RRD_ID_LENGTH_MAX + 1]; + chart = cg->st_pids = rrdset_create_localhost( + cgroup_chart_type(buff, cg), + "pids_current", + NULL, + "pids", + context, + title, + "pids", + PLUGIN_CGROUPS_NAME, + is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME, + prio, + cgroup_update_every, + RRDSET_TYPE_LINE); + + rrdset_update_rrdlabels(chart, cg->chart_labels); + cg->st_pids_rd_pids_current = rrddim_add(chart, "pids", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(chart, cg->st_pids_rd_pids_current, (collected_number)cg->pids.pids_current); + rrdset_done(chart); +} diff --git a/collectors/cgroups.plugin/cgroup-discovery.c b/collectors/cgroups.plugin/cgroup-discovery.c new file mode 100644 index 00000000000000..28c6d96cf93c50 --- /dev/null +++ b/collectors/cgroups.plugin/cgroup-discovery.c @@ -0,0 +1,1243 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "cgroup-internals.h" + +// discovery cgroup thread worker jobs +#define WORKER_DISCOVERY_INIT 0 +#define WORKER_DISCOVERY_FIND 1 +#define WORKER_DISCOVERY_PROCESS 2 +#define WORKER_DISCOVERY_PROCESS_RENAME 3 +#define WORKER_DISCOVERY_PROCESS_NETWORK 4 +#define WORKER_DISCOVERY_PROCESS_FIRST_TIME 5 +#define WORKER_DISCOVERY_UPDATE 6 +#define WORKER_DISCOVERY_CLEANUP 7 +#define WORKER_DISCOVERY_COPY 8 +#define WORKER_DISCOVERY_SHARE 9 +#define WORKER_DISCOVERY_LOCK 10 + +#if WORKER_UTILIZATION_MAX_JOB_TYPES < 11 +#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 11 +#endif + +struct cgroup *discovered_cgroup_root = NULL; + +char cgroup_chart_id_prefix[] = "cgroup_"; +char services_chart_id_prefix[] = "systemd_"; +char *cgroups_rename_script = NULL; + + +// ---------------------------------------------------------------------------- + +static inline void free_pressure(struct pressure *res) { + if (res->some.share_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->some.share_time.st); + if (res->some.total_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->some.total_time.st); + if (res->full.share_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->full.share_time.st); + if (res->full.total_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->full.total_time.st); + freez(res->filename); +} + +static inline void cgroup_free_network_interfaces(struct cgroup *cg) { + while(cg->interfaces) { + struct cgroup_network_interface *i = cg->interfaces; + cg->interfaces = i->next; + + // delete the registration of proc_net_dev rename + netdev_rename_device_del(i->host_device); + + freez((void *)i->host_device); + freez((void *)i->container_device); + freez((void *)i); + } +} + +static inline void cgroup_free(struct cgroup *cg) { + netdata_log_debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available"); + + cgroup_netdev_delete(cg); + + if(cg->st_cpu) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu); + if(cg->st_cpu_limit) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_limit); + if(cg->st_cpu_per_core) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_per_core); + if(cg->st_cpu_nr_throttled) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_nr_throttled); + if(cg->st_cpu_throttled_time) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_throttled_time); + if(cg->st_cpu_shares) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_shares); + if(cg->st_mem) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem); + if(cg->st_writeback) rrdset_is_obsolete___safe_from_collector_thread(cg->st_writeback); + if(cg->st_mem_activity) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_activity); + if(cg->st_pgfaults) rrdset_is_obsolete___safe_from_collector_thread(cg->st_pgfaults); + if(cg->st_mem_usage) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage); + if(cg->st_mem_usage_limit) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage_limit); + if(cg->st_mem_utilization) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_utilization); + if(cg->st_mem_failcnt) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_failcnt); + if(cg->st_io) rrdset_is_obsolete___safe_from_collector_thread(cg->st_io); + if(cg->st_serviced_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_serviced_ops); + if(cg->st_throttle_io) rrdset_is_obsolete___safe_from_collector_thread(cg->st_throttle_io); + if(cg->st_throttle_serviced_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_throttle_serviced_ops); + if(cg->st_queued_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_queued_ops); + if(cg->st_merged_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_merged_ops); + if(cg->st_pids) rrdset_is_obsolete___safe_from_collector_thread(cg->st_pids); + + freez(cg->filename_cpuset_cpus); + freez(cg->filename_cpu_cfs_period); + freez(cg->filename_cpu_cfs_quota); + freez(cg->filename_memory_limit); + freez(cg->filename_memoryswap_limit); + + cgroup_free_network_interfaces(cg); + + freez(cg->cpuacct_usage.cpu_percpu); + + freez(cg->cpuacct_stat.filename); + freez(cg->cpuacct_usage.filename); + freez(cg->cpuacct_cpu_throttling.filename); + freez(cg->cpuacct_cpu_shares.filename); + + arl_free(cg->memory.arl_base); + freez(cg->memory.filename_detailed); + freez(cg->memory.filename_failcnt); + freez(cg->memory.filename_usage_in_bytes); + freez(cg->memory.filename_msw_usage_in_bytes); + + freez(cg->io_service_bytes.filename); + freez(cg->io_serviced.filename); + + freez(cg->throttle_io_service_bytes.filename); + freez(cg->throttle_io_serviced.filename); + + freez(cg->io_merged.filename); + freez(cg->io_queued.filename); + freez(cg->pids.pids_current_filename); + + free_pressure(&cg->cpu_pressure); + free_pressure(&cg->io_pressure); + free_pressure(&cg->memory_pressure); + free_pressure(&cg->irq_pressure); + + freez(cg->id); + freez(cg->intermediate_id); + freez(cg->chart_id); + freez(cg->name); + + rrdlabels_destroy(cg->chart_labels); + + freez(cg); + + cgroup_root_count--; +} + +// ---------------------------------------------------------------------------- +// add/remove/find cgroup objects + +#define CGROUP_CHARTID_LINE_MAX 1024 + +static inline char *cgroup_chart_id_strdupz(const char *s) { + if(!s || !*s) s = "/"; + + if(*s == '/' && s[1] != '\0') s++; + + char *r = strdupz(s); + netdata_fix_chart_id(r); + + return r; +} + +// TODO: move the code to cgroup_chart_id_strdupz() when the renaming script is fixed +static inline void substitute_dots_in_id(char *s) { + // dots are used to distinguish chart type and id in streaming, so we should replace them + for (char *d = s; *d; d++) { + if (*d == '.') + *d = '-'; + } +} + +// ---------------------------------------------------------------------------- +// parse k8s labels + +char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data) { + // the first word, up to the first space is the name + char *name = strsep_skip_consecutive_separators(&data, " "); + + // the rest are key=value pairs separated by comma + while(data) { + char *pair = strsep_skip_consecutive_separators(&data, ","); + rrdlabels_add_pair(labels, pair, RRDLABEL_SRC_AUTO | RRDLABEL_SRC_K8S); + } + + return name; +} + +static inline void discovery_rename_cgroup(struct cgroup *cg) { + if (!cg->pending_renames) { + return; + } + cg->pending_renames--; + + netdata_log_debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id); + netdata_log_debug(D_CGROUP, "executing command %s \"%s\" for cgroup '%s'", cgroups_rename_script, cg->intermediate_id, cg->chart_id); + pid_t cgroup_pid; + + FILE *fp_child_input, *fp_child_output; + (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_rename_script, cg->id, cg->intermediate_id); + if (!fp_child_output) { + collector_error("CGROUP: cannot popen(%s \"%s\", \"r\").", cgroups_rename_script, cg->intermediate_id); + cg->pending_renames = 0; + cg->processed = 1; + return; + } + + char buffer[CGROUP_CHARTID_LINE_MAX + 1]; + char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp_child_output); + int exit_code = netdata_pclose(fp_child_input, fp_child_output, cgroup_pid); + + switch (exit_code) { + case 0: + cg->pending_renames = 0; + break; + + case 3: + cg->pending_renames = 0; + cg->processed = 1; + break; + + default: + break; + } + + if (cg->pending_renames || cg->processed) + return; + if (!new_name || !*new_name || *new_name == '\n') + return; + if (!(new_name = trim(new_name))) + return; + + if (!cg->chart_labels) + cg->chart_labels = rrdlabels_create(); + // read the new labels and remove the obsolete ones + rrdlabels_unmark_all(cg->chart_labels); + char *name = cgroup_parse_resolved_name_and_labels(cg->chart_labels, new_name); + rrdlabels_remove_all_unmarked(cg->chart_labels); + + freez(cg->name); + cg->name = strdupz(name); + + freez(cg->chart_id); + cg->chart_id = cgroup_chart_id_strdupz(name); + + substitute_dots_in_id(cg->chart_id); + cg->hash_chart_id = simple_hash(cg->chart_id); +} + +static void is_cgroup_procs_exist(netdata_ebpf_cgroup_shm_body_t *out, char *id) { + struct stat buf; + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_cpuset_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_blkio_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_memory_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_devices_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + out->path[0] = '\0'; + out->enabled = 0; +} + +static inline void convert_cgroup_to_systemd_service(struct cgroup *cg) { + char buffer[CGROUP_CHARTID_LINE_MAX + 1]; + cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE; + strncpyz(buffer, cg->id, CGROUP_CHARTID_LINE_MAX); + char *s = buffer; + + // skip to the last slash + size_t len = strlen(s); + while (len--) { + if (unlikely(s[len] == '/')) { + break; + } + } + if (len) { + s = &s[len + 1]; + } + + // remove extension + len = strlen(s); + while (len--) { + if (unlikely(s[len] == '.')) { + break; + } + } + if (len) { + s[len] = '\0'; + } + + freez(cg->name); + cg->name = strdupz(s); + + freez(cg->chart_id); + cg->chart_id = cgroup_chart_id_strdupz(s); + substitute_dots_in_id(cg->chart_id); + cg->hash_chart_id = simple_hash(cg->chart_id); +} + +static inline struct cgroup *discovery_cgroup_add(const char *id) { + netdata_log_debug(D_CGROUP, "adding to list, cgroup with id '%s'", id); + + struct cgroup *cg = callocz(1, sizeof(struct cgroup)); + + cg->id = strdupz(id); + cg->hash = simple_hash(cg->id); + + cg->name = strdupz(id); + + cg->intermediate_id = cgroup_chart_id_strdupz(id); + + cg->chart_id = cgroup_chart_id_strdupz(id); + substitute_dots_in_id(cg->chart_id); + cg->hash_chart_id = simple_hash(cg->chart_id); + + if (cgroup_use_unified_cgroups) { + cg->options |= CGROUP_OPTIONS_IS_UNIFIED; + } + + if (!discovered_cgroup_root) + discovered_cgroup_root = cg; + else { + struct cgroup *t; + for (t = discovered_cgroup_root; t->discovered_next; t = t->discovered_next) { + } + t->discovered_next = cg; + } + + return cg; +} + +static inline struct cgroup *discovery_cgroup_find(const char *id) { + netdata_log_debug(D_CGROUP, "searching for cgroup '%s'", id); + + uint32_t hash = simple_hash(id); + + struct cgroup *cg; + for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) { + if(hash == cg->hash && strcmp(id, cg->id) == 0) + break; + } + + netdata_log_debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found"); + return cg; +} + +static int calc_cgroup_depth(const char *id) { + int depth = 0; + const char *s; + for (s = id; *s; s++) { + depth += unlikely(*s == '/'); + } + return depth; +} + +static inline void discovery_find_cgroup_in_dir_callback(const char *dir) { + if (!dir || !*dir) { + dir = "/"; + } + netdata_log_debug(D_CGROUP, "examining cgroup dir '%s'", dir); + + struct cgroup *cg = discovery_cgroup_find(dir); + if (cg) { + cg->available = 1; + return; + } + + if (cgroup_root_count >= cgroup_root_max) { + collector_info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, dir); + return; + } + + if (cgroup_max_depth > 0) { + int depth = calc_cgroup_depth(dir); + if (depth > cgroup_max_depth) { + collector_info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth); + return; + } + } + + cg = discovery_cgroup_add(dir); + cg->available = 1; + cg->first_time_seen = 1; + cg->function_ready = false; + cgroup_root_count++; +} + +static inline int discovery_find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) { + if(!this) this = base; + netdata_log_debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base); + + size_t dirlen = strlen(this), baselen = strlen(base); + + int ret = -1; + int enabled = -1; + + const char *relative_path = &this[baselen]; + if(!*relative_path) relative_path = "/"; + + DIR *dir = opendir(this); + if(!dir) { + collector_error("CGROUP: cannot read directory '%s'", base); + return ret; + } + ret = 1; + + callback(relative_path); + + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR + && ( + (de->d_name[0] == '.' && de->d_name[1] == '\0') + || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + )) + continue; + + if(de->d_type == DT_DIR) { + if(enabled == -1) { + const char *r = relative_path; + if(*r == '\0') r = "/"; + + // do not decent in directories we are not interested + enabled = matches_search_cgroup_paths(r); + } + + if(enabled) { + char *s = mallocz(dirlen + strlen(de->d_name) + 2); + strcpy(s, this); + strcat(s, "/"); + strcat(s, de->d_name); + int ret2 = discovery_find_dir_in_subdirs(base, s, callback); + if(ret2 > 0) ret += ret2; + freez(s); + } + } + } + + closedir(dir); + return ret; +} + +static inline void discovery_mark_as_unavailable_all_cgroups() { + for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) { + cg->available = 0; + } +} + +static inline void discovery_update_filenames_cgroup_v1(struct cgroup *cg) { + char filename[FILENAME_MAX + 1]; + struct stat buf; + + // CPU + if (unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->cpuacct_stat.filename = strdupz(filename); + cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat; + snprintfz(filename, FILENAME_MAX, "%s%s/cpuset.cpus", cgroup_cpuset_base, cg->id); + cg->filename_cpuset_cpus = strdupz(filename); + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_period_us", cgroup_cpuacct_base, cg->id); + cg->filename_cpu_cfs_period = strdupz(filename); + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_quota_us", cgroup_cpuacct_base, cg->id); + cg->filename_cpu_cfs_quota = strdupz(filename); + } + } + // FIXME: remove usage_percpu + if (unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !is_cgroup_systemd_service(cg))) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->cpuacct_usage.filename = strdupz(filename); + cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage; + } + } + if (unlikely( + cgroup_enable_cpuacct_cpu_throttling && !cg->cpuacct_cpu_throttling.filename && + !is_cgroup_systemd_service(cg))) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_cpuacct_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->cpuacct_cpu_throttling.filename = strdupz(filename); + cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling; + } + } + if (unlikely( + cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename && !is_cgroup_systemd_service(cg))) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.shares", cgroup_cpuacct_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->cpuacct_cpu_shares.filename = strdupz(filename); + cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares; + } + } + + // Memory + if (unlikely( + (cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed && + (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory.filename_detailed = strdupz(filename); + cg->memory.enabled_detailed = + (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_AUTO; + } + } + if (unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory.filename_usage_in_bytes = strdupz(filename); + cg->memory.enabled_usage_in_bytes = cgroup_enable_memory; + snprintfz(filename, FILENAME_MAX, "%s%s/memory.limit_in_bytes", cgroup_memory_base, cg->id); + cg->filename_memory_limit = strdupz(filename); + } + } + if (unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.usage_in_bytes", cgroup_memory_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory.filename_msw_usage_in_bytes = strdupz(filename); + cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap; + snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.limit_in_bytes", cgroup_memory_base, cg->id); + cg->filename_memoryswap_limit = strdupz(filename); + } + } + if (unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory.filename_failcnt = strdupz(filename); + cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt; + } + } + + // Blkio + if (unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes_recursive", cgroup_blkio_base, cg->id); + if (unlikely(stat(filename, &buf) != -1)) { + cg->io_service_bytes.filename = strdupz(filename); + cg->io_service_bytes.enabled = cgroup_enable_blkio_io; + } else { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->io_service_bytes.filename = strdupz(filename); + cg->io_service_bytes.enabled = cgroup_enable_blkio_io; + } + } + } + if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced_recursive", cgroup_blkio_base, cg->id); + if (unlikely(stat(filename, &buf) != -1)) { + cg->io_serviced.filename = strdupz(filename); + cg->io_serviced.enabled = cgroup_enable_blkio_ops; + } else { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->io_serviced.filename = strdupz(filename); + cg->io_serviced.enabled = cgroup_enable_blkio_ops; + } + } + } + if (unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes_recursive", cgroup_blkio_base, cg->id); + if (unlikely(stat(filename, &buf) != -1)) { + cg->throttle_io_service_bytes.filename = strdupz(filename); + cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io; + } else { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->throttle_io_service_bytes.filename = strdupz(filename); + cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io; + } + } + } + if (unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced_recursive", cgroup_blkio_base, cg->id); + if (unlikely(stat(filename, &buf) != -1)) { + cg->throttle_io_serviced.filename = strdupz(filename); + cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops; + } else { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->throttle_io_serviced.filename = strdupz(filename); + cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops; + } + } + } + if (unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged_recursive", cgroup_blkio_base, cg->id); + if (unlikely(stat(filename, &buf) != -1)) { + cg->io_merged.filename = strdupz(filename); + cg->io_merged.enabled = cgroup_enable_blkio_merged_ops; + } else { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->io_merged.filename = strdupz(filename); + cg->io_merged.enabled = cgroup_enable_blkio_merged_ops; + } + } + } + if (unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued_recursive", cgroup_blkio_base, cg->id); + if (unlikely(stat(filename, &buf) != -1)) { + cg->io_queued.filename = strdupz(filename); + cg->io_queued.enabled = cgroup_enable_blkio_queued_ops; + } else { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->io_queued.filename = strdupz(filename); + cg->io_queued.enabled = cgroup_enable_blkio_queued_ops; + } + } + } + + // Pids + if (unlikely(!cg->pids.pids_current_filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/pids.current", cgroup_pids_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->pids.pids_current_filename = strdupz(filename); + } + } +} + +static inline void discovery_update_filenames_cgroup_v2(struct cgroup *cg) { + char filename[FILENAME_MAX + 1]; + struct stat buf; + + // CPU + if (unlikely((cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_cpu_throttling) && !cg->cpuacct_stat.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->cpuacct_stat.filename = strdupz(filename); + cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat; + cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling; + cg->filename_cpuset_cpus = NULL; + cg->filename_cpu_cfs_period = NULL; + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.max", cgroup_unified_base, cg->id); + cg->filename_cpu_cfs_quota = strdupz(filename); + } + } + if (unlikely(cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.weight", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->cpuacct_cpu_shares.filename = strdupz(filename); + cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares; + } + } + + // Memory + // FIXME: this if condition! + if (unlikely( + (cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed && + (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory.filename_detailed = strdupz(filename); + cg->memory.enabled_detailed = + (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_AUTO; + } + } + + if (unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.current", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory.filename_usage_in_bytes = strdupz(filename); + cg->memory.enabled_usage_in_bytes = cgroup_enable_memory; + snprintfz(filename, FILENAME_MAX, "%s%s/memory.max", cgroup_unified_base, cg->id); + cg->filename_memory_limit = strdupz(filename); + } + } + + if (unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.current", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory.filename_msw_usage_in_bytes = strdupz(filename); + cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap; + snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.max", cgroup_unified_base, cg->id); + cg->filename_memoryswap_limit = strdupz(filename); + } + } + + // Blkio + if (unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->io_service_bytes.filename = strdupz(filename); + cg->io_service_bytes.enabled = cgroup_enable_blkio_io; + } + } + + if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->io_serviced.filename = strdupz(filename); + cg->io_serviced.enabled = cgroup_enable_blkio_ops; + } + } + + // PSI + if (unlikely(cgroup_enable_pressure_cpu && !cg->cpu_pressure.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpu.pressure", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->cpu_pressure.filename = strdupz(filename); + cg->cpu_pressure.some.enabled = cgroup_enable_pressure_cpu; + cg->cpu_pressure.full.enabled = CONFIG_BOOLEAN_NO; + } + } + + if (unlikely((cgroup_enable_pressure_io_some || cgroup_enable_pressure_io_full) && !cg->io_pressure.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/io.pressure", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->io_pressure.filename = strdupz(filename); + cg->io_pressure.some.enabled = cgroup_enable_pressure_io_some; + cg->io_pressure.full.enabled = cgroup_enable_pressure_io_full; + } + } + + if (unlikely( + (cgroup_enable_pressure_memory_some || cgroup_enable_pressure_memory_full) && + !cg->memory_pressure.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.pressure", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->memory_pressure.filename = strdupz(filename); + cg->memory_pressure.some.enabled = cgroup_enable_pressure_memory_some; + cg->memory_pressure.full.enabled = cgroup_enable_pressure_memory_full; + } + } + + if (unlikely((cgroup_enable_pressure_irq_some || cgroup_enable_pressure_irq_full) && !cg->irq_pressure.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/irq.pressure", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->irq_pressure.filename = strdupz(filename); + cg->irq_pressure.some.enabled = cgroup_enable_pressure_irq_some; + cg->irq_pressure.full.enabled = cgroup_enable_pressure_irq_full; + } + } + + // Pids + if (unlikely(!cg->pids.pids_current_filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/pids.current", cgroup_unified_base, cg->id); + if (likely(stat(filename, &buf) != -1)) { + cg->pids.pids_current_filename = strdupz(filename); + } + } +} + +static inline void discovery_update_filenames_all_cgroups() { + for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) { + if (unlikely(!cg->available || !cg->enabled || cg->pending_renames)) + continue; + + if (!cgroup_use_unified_cgroups) + discovery_update_filenames_cgroup_v1(cg); + else if (likely(cgroup_unified_exist)) + discovery_update_filenames_cgroup_v2(cg); + } +} + +static inline void discovery_cleanup_all_cgroups() { + struct cgroup *cg = discovered_cgroup_root, *last = NULL; + + for(; cg ;) { + if(!cg->available) { + // enable the first duplicate cgroup + { + struct cgroup *t; + for (t = discovered_cgroup_root; t; t = t->discovered_next) { + if (t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && + (is_cgroup_systemd_service(t) == is_cgroup_systemd_service(cg)) && + t->hash_chart_id == cg->hash_chart_id && !strcmp(t->chart_id, cg->chart_id)) { + netdata_log_debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id); + t->enabled = 1; + t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE; + break; + } + } + } + + if(!last) + discovered_cgroup_root = cg->discovered_next; + else + last->discovered_next = cg->discovered_next; + + cgroup_free(cg); + + if(!last) + cg = discovered_cgroup_root; + else + cg = last->discovered_next; + } + else { + last = cg; + cg = cg->discovered_next; + } + } +} + +static inline void discovery_copy_discovered_cgroups_to_reader() { + netdata_log_debug(D_CGROUP, "copy discovered cgroups to the main group list"); + + struct cgroup *cg; + + for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) { + cg->next = cg->discovered_next; + } + + cgroup_root = discovered_cgroup_root; +} + +static inline void discovery_share_cgroups_with_ebpf() { + struct cgroup *cg; + int count; + struct stat buf; + + if (shm_mutex_cgroup_ebpf == SEM_FAILED) { + return; + } + sem_wait(shm_mutex_cgroup_ebpf); + + for (cg = cgroup_root, count = 0; cg; cg = cg->next, count++) { + netdata_ebpf_cgroup_shm_body_t *ptr = &shm_cgroup_ebpf.body[count]; + char *prefix = (is_cgroup_systemd_service(cg)) ? services_chart_id_prefix : cgroup_chart_id_prefix; + snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_id); + ptr->hash = simple_hash(ptr->name); + ptr->options = cg->options; + ptr->enabled = cg->enabled; + if (cgroup_use_unified_cgroups) { + snprintfz(ptr->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_unified_base, cg->id); + if (likely(stat(ptr->path, &buf) == -1)) { + ptr->path[0] = '\0'; + ptr->enabled = 0; + } + } else { + is_cgroup_procs_exist(ptr, cg->id); + } + + netdata_log_debug(D_CGROUP, "cgroup shared: NAME=%s, ENABLED=%d", ptr->name, ptr->enabled); + } + + shm_cgroup_ebpf.header->cgroup_root_count = count; + sem_post(shm_mutex_cgroup_ebpf); +} + +static inline void discovery_find_all_cgroups_v1() { + if (cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) { + if (discovery_find_dir_in_subdirs(cgroup_cpuacct_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { + cgroup_enable_cpuacct_stat = cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO; + collector_error("CGROUP: disabled cpu statistics."); + } + } + + if (cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || + cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) { + if (discovery_find_dir_in_subdirs(cgroup_blkio_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { + cgroup_enable_blkio_io = cgroup_enable_blkio_ops = cgroup_enable_blkio_throttle_io = + cgroup_enable_blkio_throttle_ops = cgroup_enable_blkio_merged_ops = cgroup_enable_blkio_queued_ops = + CONFIG_BOOLEAN_NO; + collector_error("CGROUP: disabled blkio statistics."); + } + } + + if (cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) { + if (discovery_find_dir_in_subdirs(cgroup_memory_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { + cgroup_enable_memory = cgroup_enable_detailed_memory = cgroup_enable_swap = cgroup_enable_memory_failcnt = + CONFIG_BOOLEAN_NO; + collector_error("CGROUP: disabled memory statistics."); + } + } + + if (cgroup_search_in_devices) { + if (discovery_find_dir_in_subdirs(cgroup_devices_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { + cgroup_search_in_devices = 0; + collector_error("CGROUP: disabled devices statistics."); + } + } +} + +static inline void discovery_find_all_cgroups_v2() { + if (discovery_find_dir_in_subdirs(cgroup_unified_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { + cgroup_unified_exist = CONFIG_BOOLEAN_NO; + collector_error("CGROUP: disabled unified cgroups statistics."); + } +} + +static int is_digits_only(const char *s) { + do { + if (!isdigit(*s++)) { + return 0; + } + } while (*s); + + return 1; +} + +static int is_cgroup_k8s_container(const char *id) { + // examples: + // https://github.com/netdata/netdata/blob/0fc101679dcd12f1cb8acdd07bb4c85d8e553e53/collectors/cgroups.plugin/cgroup-name.sh#L121-L147 + const char *p = id; + const char *pp = NULL; + int i = 0; + size_t l = 3; // pod + while ((p = strstr(p, "pod"))) { + i++; + p += l; + pp = p; + } + return !(i < 2 || !pp || !(pp = strchr(pp, '/')) || !pp++ || !*pp); +} + +#define TASK_COMM_LEN 16 + +static int k8s_get_container_first_proc_comm(const char *id, char *comm) { + if (!is_cgroup_k8s_container(id)) { + return 1; + } + + static procfile *ff = NULL; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/%s/cgroup.procs", cgroup_cpuacct_base, id); + + ff = procfile_reopen(ff, filename, NULL, CGROUP_PROCFILE_FLAG); + if (unlikely(!ff)) { + netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename); + return 1; + } + + ff = procfile_readall(ff); + if (unlikely(!ff)) { + netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename); + return 1; + } + + unsigned long lines = procfile_lines(ff); + if (likely(lines < 2)) { + return 1; + } + + char *pid = procfile_lineword(ff, 0, 0); + if (!pid || !*pid) { + return 1; + } + + snprintfz(filename, FILENAME_MAX, "%s/proc/%s/comm", netdata_configured_host_prefix, pid); + + ff = procfile_reopen(ff, filename, NULL, PROCFILE_FLAG_DEFAULT); + if (unlikely(!ff)) { + netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename); + return 1; + } + + ff = procfile_readall(ff); + if (unlikely(!ff)) { + netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename); + return 1; + } + + lines = procfile_lines(ff); + if (unlikely(lines != 2)) { + return 1; + } + + char *proc_comm = procfile_lineword(ff, 0, 0); + if (!proc_comm || !*proc_comm) { + return 1; + } + + strncpyz(comm, proc_comm, TASK_COMM_LEN); + return 0; +} + +static inline void discovery_process_first_time_seen_cgroup(struct cgroup *cg) { + if (!cg->first_time_seen) { + return; + } + cg->first_time_seen = 0; + + char comm[TASK_COMM_LEN + 1]; + + if (cg->container_orchestrator == CGROUPS_ORCHESTRATOR_UNSET) { + if (strstr(cg->id, "kubepods")) { + cg->container_orchestrator = CGROUPS_ORCHESTRATOR_K8S; + } else { + cg->container_orchestrator = CGROUPS_ORCHESTRATOR_UNKNOWN; + } + } + + if (is_inside_k8s && !k8s_get_container_first_proc_comm(cg->id, comm)) { + // container initialization may take some time when CPU % is high + // seen on GKE: comm is '6' before 'runc:[2:INIT]' (dunno if it could be another number) + if (is_digits_only(comm) || matches_entrypoint_parent_process_comm(comm)) { + cg->first_time_seen = 1; + return; + } + if (!strcmp(comm, "pause")) { + // a container that holds the network namespace for the pod + // we don't need to collect its metrics + cg->processed = 1; + return; + } + } + + if (cgroup_enable_systemd_services && matches_systemd_services_cgroups(cg->id)) { + netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'cgroups to match as systemd services'", cg->id, cg->chart_id); + convert_cgroup_to_systemd_service(cg); + return; + } + + if (matches_enabled_cgroup_renames(cg->id)) { + netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'run script to rename cgroups matching', will try to rename it", cg->id, cg->chart_id); + if (is_inside_k8s && is_cgroup_k8s_container(cg->id)) { + // it may take up to a minute for the K8s API to return data for the container + // tested on AWS K8s cluster with 100% CPU utilization + cg->pending_renames = 9; // 1.5 minute + } else { + cg->pending_renames = 2; + } + } +} + +static int discovery_is_cgroup_duplicate(struct cgroup *cg) { + // https://github.com/netdata/netdata/issues/797#issuecomment-241248884 + struct cgroup *c; + for (c = discovered_cgroup_root; c; c = c->discovered_next) { + if (c != cg && c->enabled && (is_cgroup_systemd_service(c) == is_cgroup_systemd_service(cg)) && + c->hash_chart_id == cg->hash_chart_id && !strcmp(c->chart_id, cg->chart_id)) { + collector_error( + "CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.", + cg->chart_id, + c->id, + cg->id); + return 1; + } + } + return 0; +} + +// ---------------------------------------------------------------------------- +// cgroup network interfaces + +#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 + +static inline void read_cgroup_network_interfaces(struct cgroup *cg) { + netdata_log_debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id); + + pid_t cgroup_pid; + char cgroup_identifier[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + + if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_cpuacct_base, cg->id); + } + else { + snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_unified_base, cg->id); + } + + netdata_log_debug(D_CGROUP, "executing cgroup_identifier %s --cgroup '%s' for cgroup '%s'", cgroups_network_interface_script, cgroup_identifier, cg->id); + FILE *fp_child_input, *fp_child_output; + (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_network_interface_script, "--cgroup", cgroup_identifier); + if(!fp_child_output) { + collector_error("CGROUP: cannot popen(%s --cgroup \"%s\", \"r\").", cgroups_network_interface_script, cgroup_identifier); + return; + } + + char *s; + char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp_child_output))) { + trim(s); + + if(*s && *s != '\n') { + char *t = s; + while(*t && *t != ' ') t++; + if(*t == ' ') { + *t = '\0'; + t++; + } + + if(!*s) { + collector_error("CGROUP: empty host interface returned by script"); + continue; + } + + if(!*t) { + collector_error("CGROUP: empty guest interface returned by script"); + continue; + } + + struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface)); + i->host_device = strdupz(s); + i->container_device = strdupz(t); + i->next = cg->interfaces; + cg->interfaces = i; + + collector_info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device); + + // register a device rename to proc_net_dev.c + netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id, cg->chart_labels, + k8s_is_kubepod(cg) ? "k8s." : "", cgroup_netdev_get(cg)); + } + } + + netdata_pclose(fp_child_input, fp_child_output, cgroup_pid); + // netdata_log_debug(D_CGROUP, "closed cgroup_identifier for cgroup '%s'", cg->id); +} + +static inline void discovery_process_cgroup(struct cgroup *cg) { + if (!cg->available || cg->processed) { + return; + } + + if (cg->first_time_seen) { + worker_is_busy(WORKER_DISCOVERY_PROCESS_FIRST_TIME); + discovery_process_first_time_seen_cgroup(cg); + if (unlikely(cg->first_time_seen || cg->processed)) { + return; + } + } + + if (cg->pending_renames) { + worker_is_busy(WORKER_DISCOVERY_PROCESS_RENAME); + discovery_rename_cgroup(cg); + if (unlikely(cg->pending_renames || cg->processed)) { + return; + } + } + + cg->processed = 1; + + if ((strlen(cg->chart_id) + strlen(cgroup_chart_id_prefix)) >= RRD_ID_LENGTH_MAX) { + collector_info("cgroup '%s' (chart id '%s') disabled because chart_id exceeds the limit (RRD_ID_LENGTH_MAX)", cg->id, cg->chart_id); + return; + } + + if (is_cgroup_systemd_service(cg)) { + if (discovery_is_cgroup_duplicate(cg)) { + cg->enabled = 0; + cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE; + return; + } + if (!cg->chart_labels) + cg->chart_labels = rrdlabels_create(); + rrdlabels_add(cg->chart_labels, "service_name", cg->name, RRDLABEL_SRC_AUTO); + cg->enabled = 1; + return; + } + + if (!(cg->enabled = matches_enabled_cgroup_names(cg->name))) { + netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups names matching'", cg->id, cg->name); + return; + } + + if (!(cg->enabled = matches_enabled_cgroup_paths(cg->id))) { + netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups matching'", cg->id, cg->name); + return; + } + + if (discovery_is_cgroup_duplicate(cg)) { + cg->enabled = 0; + cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE; + return; + } + + if (!cg->chart_labels) + cg->chart_labels = rrdlabels_create(); + + if (!k8s_is_kubepod(cg)) { + rrdlabels_add(cg->chart_labels, "cgroup_name", cg->name, RRDLABEL_SRC_AUTO); + if (!rrdlabels_exist(cg->chart_labels, "image")) + rrdlabels_add(cg->chart_labels, "image", "", RRDLABEL_SRC_AUTO); + } + + worker_is_busy(WORKER_DISCOVERY_PROCESS_NETWORK); + read_cgroup_network_interfaces(cg); +} + +static inline void discovery_find_all_cgroups() { + netdata_log_debug(D_CGROUP, "searching for cgroups"); + + worker_is_busy(WORKER_DISCOVERY_INIT); + discovery_mark_as_unavailable_all_cgroups(); + + worker_is_busy(WORKER_DISCOVERY_FIND); + if (!cgroup_use_unified_cgroups) { + discovery_find_all_cgroups_v1(); + } else { + discovery_find_all_cgroups_v2(); + } + + for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) { + worker_is_busy(WORKER_DISCOVERY_PROCESS); + discovery_process_cgroup(cg); + } + + worker_is_busy(WORKER_DISCOVERY_UPDATE); + discovery_update_filenames_all_cgroups(); + + worker_is_busy(WORKER_DISCOVERY_LOCK); + uv_mutex_lock(&cgroup_root_mutex); + + worker_is_busy(WORKER_DISCOVERY_CLEANUP); + discovery_cleanup_all_cgroups(); + + worker_is_busy(WORKER_DISCOVERY_COPY); + discovery_copy_discovered_cgroups_to_reader(); + + uv_mutex_unlock(&cgroup_root_mutex); + + worker_is_busy(WORKER_DISCOVERY_SHARE); + discovery_share_cgroups_with_ebpf(); + + netdata_log_debug(D_CGROUP, "done searching for cgroups"); +} + +void cgroup_discovery_worker(void *ptr) +{ + UNUSED(ptr); + + worker_register("CGROUPSDISC"); + worker_register_job_name(WORKER_DISCOVERY_INIT, "init"); + worker_register_job_name(WORKER_DISCOVERY_FIND, "find"); + worker_register_job_name(WORKER_DISCOVERY_PROCESS, "process"); + worker_register_job_name(WORKER_DISCOVERY_PROCESS_RENAME, "rename"); + worker_register_job_name(WORKER_DISCOVERY_PROCESS_NETWORK, "network"); + worker_register_job_name(WORKER_DISCOVERY_PROCESS_FIRST_TIME, "new"); + worker_register_job_name(WORKER_DISCOVERY_UPDATE, "update"); + worker_register_job_name(WORKER_DISCOVERY_CLEANUP, "cleanup"); + worker_register_job_name(WORKER_DISCOVERY_COPY, "copy"); + worker_register_job_name(WORKER_DISCOVERY_SHARE, "share"); + worker_register_job_name(WORKER_DISCOVERY_LOCK, "lock"); + + entrypoint_parent_process_comm = simple_pattern_create( + " runc:[* " // http://terenceli.github.io/%E6%8A%80%E6%9C%AF/2021/12/28/runc-internals-3) + " exe ", // https://github.com/falcosecurity/falco/blob/9d41b0a151b83693929d3a9c84f7c5c85d070d3a/rules/falco_rules.yaml#L1961 + NULL, + SIMPLE_PATTERN_EXACT, true); + + service_register(SERVICE_THREAD_TYPE_LIBUV, NULL, NULL, NULL, false); + + while (service_running(SERVICE_COLLECTORS)) { + worker_is_idle(); + + uv_mutex_lock(&discovery_thread.mutex); + uv_cond_wait(&discovery_thread.cond_var, &discovery_thread.mutex); + uv_mutex_unlock(&discovery_thread.mutex); + + if (unlikely(!service_running(SERVICE_COLLECTORS))) + break; + + discovery_find_all_cgroups(); + } + collector_info("discovery thread stopped"); + worker_unregister(); + service_exits(); + __atomic_store_n(&discovery_thread.exited,1,__ATOMIC_RELAXED); +} diff --git a/collectors/cgroups.plugin/cgroup-internals.h b/collectors/cgroups.plugin/cgroup-internals.h new file mode 100644 index 00000000000000..a6980224066b43 --- /dev/null +++ b/collectors/cgroups.plugin/cgroup-internals.h @@ -0,0 +1,514 @@ +#include "sys_fs_cgroup.h" + +#ifndef NETDATA_CGROUP_INTERNALS_H +#define NETDATA_CGROUP_INTERNALS_H 1 + +#ifdef NETDATA_INTERNAL_CHECKS +#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_DEFAULT +#else +#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_NO_ERROR_ON_FILE_IO +#endif + +struct blkio { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int delay_counter; + + char *filename; + + unsigned long long Read; + unsigned long long Write; +/* + unsigned long long Sync; + unsigned long long Async; + unsigned long long Total; +*/ +}; + +struct pids { + char *pids_current_filename; + int pids_current_updated; + unsigned long long pids_current; +}; + +// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt +struct memory { + ARL_BASE *arl_base; + ARL_ENTRY *arl_dirty; + ARL_ENTRY *arl_swap; + + int updated_detailed; + int updated_usage_in_bytes; + int updated_msw_usage_in_bytes; + int updated_failcnt; + + int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + int delay_counter_detailed; + int delay_counter_failcnt; + + char *filename_detailed; + char *filename_usage_in_bytes; + char *filename_msw_usage_in_bytes; + char *filename_failcnt; + + int detailed_has_dirty; + int detailed_has_swap; + + // detailed metrics +/* + unsigned long long cache; + unsigned long long rss; + unsigned long long rss_huge; + unsigned long long mapped_file; + unsigned long long writeback; + unsigned long long dirty; + unsigned long long swap; + unsigned long long pgpgin; + unsigned long long pgpgout; + unsigned long long pgfault; + unsigned long long pgmajfault; + unsigned long long inactive_anon; + unsigned long long active_anon; + unsigned long long inactive_file; + unsigned long long active_file; + unsigned long long unevictable; + unsigned long long hierarchical_memory_limit; +*/ + //unified cgroups metrics + unsigned long long anon; + unsigned long long kernel_stack; + unsigned long long slab; + unsigned long long sock; + // unsigned long long shmem; + unsigned long long anon_thp; + //unsigned long long file_writeback; + //unsigned long long file_dirty; + //unsigned long long file; + + unsigned long long total_cache; + unsigned long long total_rss; + unsigned long long total_rss_huge; + unsigned long long total_mapped_file; + unsigned long long total_writeback; + unsigned long long total_dirty; + unsigned long long total_swap; + unsigned long long total_pgpgin; + unsigned long long total_pgpgout; + unsigned long long total_pgfault; + unsigned long long total_pgmajfault; +/* + unsigned long long total_inactive_anon; + unsigned long long total_active_anon; +*/ + + unsigned long long total_inactive_file; + +/* + unsigned long long total_active_file; + unsigned long long total_unevictable; +*/ + + // single file metrics + unsigned long long usage_in_bytes; + unsigned long long msw_usage_in_bytes; + unsigned long long failcnt; +}; + +// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt +struct cpuacct_stat { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + char *filename; + + unsigned long long user; // v1, v2(user_usec) + unsigned long long system; // v1, v2(system_usec) +}; + +// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt +struct cpuacct_usage { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + char *filename; + + unsigned int cpus; + unsigned long long *cpu_percpu; +}; + +// represents cpuacct/cpu.stat, for v2 'cpuacct_stat' is used for 'user_usec', 'system_usec' +struct cpuacct_cpu_throttling { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + char *filename; + + unsigned long long nr_periods; + unsigned long long nr_throttled; + unsigned long long throttled_time; + + unsigned long long nr_throttled_perc; +}; + +// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu#sect-cfs +// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications_managing-monitoring-and-updating-the-kernel#proc_controlling-distribution-of-cpu-time-for-applications-by-adjusting-cpu-weight_using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications +struct cpuacct_cpu_shares { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + char *filename; + + unsigned long long shares; +}; + +struct cgroup_network_interface { + const char *host_device; + const char *container_device; + struct cgroup_network_interface *next; +}; + +enum cgroups_container_orchestrator { + CGROUPS_ORCHESTRATOR_UNSET, + CGROUPS_ORCHESTRATOR_UNKNOWN, + CGROUPS_ORCHESTRATOR_K8S +}; + + +// *** WARNING *** The fields are not thread safe. Take care of safe usage. +struct cgroup { + uint32_t options; + + int first_time_seen; // first time seen by the discoverer + int processed; // the discoverer is done processing a cgroup (resolved name, set 'enabled' option) + + char available; // found in the filesystem + char enabled; // enabled in the config + + bool function_ready; // true after the first iteration of chart creation/update + + char pending_renames; + + char *id; + uint32_t hash; + + char *intermediate_id; // TODO: remove it when the renaming script is fixed + + char *chart_id; + uint32_t hash_chart_id; + + // 'cgroup_name' label value. + // by default this is the *id (path), later changed to the resolved name (cgroup-name.sh) or systemd service name. + char *name; + + RRDLABELS *chart_labels; + + int container_orchestrator; + + struct cpuacct_stat cpuacct_stat; + struct cpuacct_usage cpuacct_usage; + struct cpuacct_cpu_throttling cpuacct_cpu_throttling; + struct cpuacct_cpu_shares cpuacct_cpu_shares; + + struct memory memory; + + struct blkio io_service_bytes; // bytes + struct blkio io_serviced; // operations + + struct blkio throttle_io_service_bytes; // bytes + struct blkio throttle_io_serviced; // operations + + struct blkio io_merged; // operations + struct blkio io_queued; // operations + + struct pids pids; + + struct cgroup_network_interface *interfaces; + + struct pressure cpu_pressure; + struct pressure io_pressure; + struct pressure memory_pressure; + struct pressure irq_pressure; + + // Cpu + RRDSET *st_cpu; + RRDDIM *st_cpu_rd_user; + RRDDIM *st_cpu_rd_system; + + RRDSET *st_cpu_limit; + RRDSET *st_cpu_per_core; + RRDSET *st_cpu_nr_throttled; + RRDSET *st_cpu_throttled_time; + RRDSET *st_cpu_shares; + + // Memory + RRDSET *st_mem; + RRDDIM *st_mem_rd_ram; + RRDDIM *st_mem_rd_swap; + + RRDSET *st_mem_utilization; + RRDSET *st_writeback; + RRDSET *st_mem_activity; + RRDSET *st_pgfaults; + RRDSET *st_mem_usage; + RRDSET *st_mem_usage_limit; + RRDSET *st_mem_failcnt; + + // Blkio + RRDSET *st_io; + RRDDIM *st_io_rd_read; + RRDDIM *st_io_rd_written; + + RRDSET *st_serviced_ops; + + RRDSET *st_throttle_io; + RRDDIM *st_throttle_io_rd_read; + RRDDIM *st_throttle_io_rd_written; + + RRDSET *st_throttle_serviced_ops; + + RRDSET *st_queued_ops; + RRDSET *st_merged_ops; + + // Pids + RRDSET *st_pids; + RRDDIM *st_pids_rd_pids_current; + + // per cgroup chart variables + char *filename_cpuset_cpus; + unsigned long long cpuset_cpus; + + char *filename_cpu_cfs_period; + unsigned long long cpu_cfs_period; + + char *filename_cpu_cfs_quota; + unsigned long long cpu_cfs_quota; + + const RRDSETVAR_ACQUIRED *chart_var_cpu_limit; + NETDATA_DOUBLE prev_cpu_usage; + + char *filename_memory_limit; + unsigned long long memory_limit; + const RRDSETVAR_ACQUIRED *chart_var_memory_limit; + + char *filename_memoryswap_limit; + unsigned long long memoryswap_limit; + const RRDSETVAR_ACQUIRED *chart_var_memoryswap_limit; + + const DICTIONARY_ITEM *cgroup_netdev_link; + + struct cgroup *next; + struct cgroup *discovered_next; + +}; + +struct discovery_thread { + uv_thread_t thread; + uv_mutex_t mutex; + uv_cond_t cond_var; + int exited; +}; + +extern struct discovery_thread discovery_thread; + +extern char *cgroups_rename_script; +extern char cgroup_chart_id_prefix[]; +extern char services_chart_id_prefix[]; +extern uv_mutex_t cgroup_root_mutex; + +void cgroup_discovery_worker(void *ptr); + +extern int is_inside_k8s; +extern long system_page_size; +extern int cgroup_enable_cpuacct_stat; +extern int cgroup_enable_cpuacct_usage; +extern int cgroup_enable_cpuacct_cpu_throttling; +extern int cgroup_enable_cpuacct_cpu_shares; +extern int cgroup_enable_memory; +extern int cgroup_enable_detailed_memory; +extern int cgroup_enable_memory_failcnt; +extern int cgroup_enable_swap; +extern int cgroup_enable_blkio_io; +extern int cgroup_enable_blkio_ops; +extern int cgroup_enable_blkio_throttle_io; +extern int cgroup_enable_blkio_throttle_ops; +extern int cgroup_enable_blkio_merged_ops; +extern int cgroup_enable_blkio_queued_ops; +extern int cgroup_enable_pressure_cpu; +extern int cgroup_enable_pressure_io_some; +extern int cgroup_enable_pressure_io_full; +extern int cgroup_enable_pressure_memory_some; +extern int cgroup_enable_pressure_memory_full; +extern int cgroup_enable_pressure_irq_some; +extern int cgroup_enable_pressure_irq_full; +extern int cgroup_enable_systemd_services; +extern int cgroup_enable_systemd_services_detailed_memory; +extern int cgroup_used_memory; +extern int cgroup_use_unified_cgroups; +extern int cgroup_unified_exist; +extern int cgroup_search_in_devices; +extern int cgroup_check_for_new_every; +extern int cgroup_update_every; +extern int cgroup_containers_chart_priority; +extern int cgroup_recheck_zero_blkio_every_iterations; +extern int cgroup_recheck_zero_mem_failcnt_every_iterations; +extern int cgroup_recheck_zero_mem_detailed_every_iterations; +extern char *cgroup_cpuacct_base; +extern char *cgroup_cpuset_base; +extern char *cgroup_blkio_base; +extern char *cgroup_memory_base; +extern char *cgroup_pids_base; +extern char *cgroup_devices_base; +extern char *cgroup_unified_base; +extern int cgroup_root_count; +extern int cgroup_root_max; +extern int cgroup_max_depth; +extern SIMPLE_PATTERN *enabled_cgroup_paths; +extern SIMPLE_PATTERN *enabled_cgroup_names; +extern SIMPLE_PATTERN *search_cgroup_paths; +extern SIMPLE_PATTERN *enabled_cgroup_renames; +extern SIMPLE_PATTERN *systemd_services_cgroups; +extern SIMPLE_PATTERN *entrypoint_parent_process_comm; +extern char *cgroups_network_interface_script; +extern int cgroups_check; +extern uint32_t Read_hash; +extern uint32_t Write_hash; +extern uint32_t user_hash; +extern uint32_t system_hash; +extern uint32_t user_usec_hash; +extern uint32_t system_usec_hash; +extern uint32_t nr_periods_hash; +extern uint32_t nr_throttled_hash; +extern uint32_t throttled_time_hash; +extern uint32_t throttled_usec_hash; +extern struct cgroup *cgroup_root; + +extern netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf; +extern int shm_fd_cgroup_ebpf; +extern sem_t *shm_mutex_cgroup_ebpf; + +enum cgroups_type { CGROUPS_AUTODETECT_FAIL, CGROUPS_V1, CGROUPS_V2 }; + +enum cgroups_systemd_setting { + SYSTEMD_CGROUP_ERR, + SYSTEMD_CGROUP_LEGACY, + SYSTEMD_CGROUP_HYBRID, + SYSTEMD_CGROUP_UNIFIED +}; + +struct cgroups_systemd_config_setting { + char *name; + enum cgroups_systemd_setting setting; +}; + +extern struct cgroups_systemd_config_setting cgroups_systemd_options[]; + +static inline int matches_enabled_cgroup_paths(char *id) { + return simple_pattern_matches(enabled_cgroup_paths, id); +} + +static inline int matches_enabled_cgroup_names(char *name) { + return simple_pattern_matches(enabled_cgroup_names, name); +} + +static inline int matches_enabled_cgroup_renames(char *id) { + return simple_pattern_matches(enabled_cgroup_renames, id); +} + +static inline int matches_systemd_services_cgroups(char *id) { + return simple_pattern_matches(systemd_services_cgroups, id); +} + +static inline int matches_search_cgroup_paths(const char *dir) { + return simple_pattern_matches(search_cgroup_paths, dir); +} + +static inline int matches_entrypoint_parent_process_comm(const char *comm) { + return simple_pattern_matches(entrypoint_parent_process_comm, comm); +} + +static inline int is_cgroup_systemd_service(struct cgroup *cg) { + return (int)(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE); +} + +static inline int k8s_is_kubepod(struct cgroup *cg) { + return cg->container_orchestrator == CGROUPS_ORCHESTRATOR_K8S; +} + +static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) { + buffer[0] = '\0'; + + if (cg->chart_id[0] == '\0' || (cg->chart_id[0] == '/' && cg->chart_id[1] == '\0')) + strncpy(buffer, "cgroup_root", RRD_ID_LENGTH_MAX); + else if (is_cgroup_systemd_service(cg)) + snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", services_chart_id_prefix, cg->chart_id); + else + snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", cgroup_chart_id_prefix, cg->chart_id); + + return buffer; +} + +#define RRDFUNCTIONS_CGTOP_HELP "View running containers" + +int cgroup_function_cgroup_top(BUFFER *wb, int timeout, const char *function, void *collector_data, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data); +int cgroup_function_systemd_top(BUFFER *wb, int timeout, const char *function, void *collector_data, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data); + +void cgroup_netdev_link_init(void); +const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg); +void cgroup_netdev_delete(struct cgroup *cg); + +void update_cpu_utilization_chart(struct cgroup *cg); +void update_cpu_utilization_limit_chart(struct cgroup *cg, NETDATA_DOUBLE cpu_limit); +void update_cpu_throttled_chart(struct cgroup *cg); +void update_cpu_throttled_duration_chart(struct cgroup *cg); +void update_cpu_shares_chart(struct cgroup *cg); +void update_cpu_per_core_usage_chart(struct cgroup *cg); + +void update_mem_usage_limit_chart(struct cgroup *cg, unsigned long long memory_limit); +void update_mem_utilization_chart(struct cgroup *cg, unsigned long long memory_limit); +void update_mem_usage_detailed_chart(struct cgroup *cg); +void update_mem_writeback_chart(struct cgroup *cg); +void update_mem_activity_chart(struct cgroup *cg); +void update_mem_pgfaults_chart(struct cgroup *cg); +void update_mem_failcnt_chart(struct cgroup *cg); +void update_mem_usage_chart(struct cgroup *cg); + +void update_io_serviced_bytes_chart(struct cgroup *cg); +void update_io_serviced_ops_chart(struct cgroup *cg); +void update_throttle_io_serviced_bytes_chart(struct cgroup *cg); +void update_throttle_io_serviced_ops_chart(struct cgroup *cg); +void update_io_queued_ops_chart(struct cgroup *cg); +void update_io_merged_ops_chart(struct cgroup *cg); + +void update_pids_current_chart(struct cgroup *cg); + +void update_cpu_some_pressure_chart(struct cgroup *cg); +void update_cpu_some_pressure_stall_time_chart(struct cgroup *cg); +void update_cpu_full_pressure_chart(struct cgroup *cg); +void update_cpu_full_pressure_stall_time_chart(struct cgroup *cg); + +void update_mem_some_pressure_chart(struct cgroup *cg); +void update_mem_some_pressure_stall_time_chart(struct cgroup *cg); +void update_mem_full_pressure_chart(struct cgroup *cg); +void update_mem_full_pressure_stall_time_chart(struct cgroup *cg); + +void update_irq_some_pressure_chart(struct cgroup *cg); +void update_irq_some_pressure_stall_time_chart(struct cgroup *cg); +void update_irq_full_pressure_chart(struct cgroup *cg); +void update_irq_full_pressure_stall_time_chart(struct cgroup *cg); + +void update_io_some_pressure_chart(struct cgroup *cg); +void update_io_some_pressure_stall_time_chart(struct cgroup *cg); +void update_io_full_pressure_chart(struct cgroup *cg); +void update_io_full_pressure_stall_time_chart(struct cgroup *cg); + +#endif // NETDATA_CGROUP_INTERNALS_H \ No newline at end of file diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh.in similarity index 90% rename from collectors/cgroups.plugin/cgroup-name.sh rename to collectors/cgroups.plugin/cgroup-name.sh.in index 6edd9d9f0444de..0f8b63256b5356 100755 --- a/collectors/cgroups.plugin/cgroup-name.sh +++ b/collectors/cgroups.plugin/cgroup-name.sh.in @@ -3,48 +3,115 @@ # netdata # real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis +# (C) 2023 Netdata Inc. # SPDX-License-Identifier: GPL-3.0-or-later # # Script to find a better name for cgroups # -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin:@sbindir_POST@" export LC_ALL=C +cmd_line="'${0}' $(printf "'%s' " "${@}")" + # ----------------------------------------------------------------------------- +# logging PROGRAM_NAME="$(basename "${0}")" -logdate() { - date "+%Y-%m-%d %H:%M:%S" +# these should be the same with syslog() priorities +NDLP_EMERG=0 # system is unusable +NDLP_ALERT=1 # action must be taken immediately +NDLP_CRIT=2 # critical conditions +NDLP_ERR=3 # error conditions +NDLP_WARN=4 # warning conditions +NDLP_NOTICE=5 # normal but significant condition +NDLP_INFO=6 # informational +NDLP_DEBUG=7 # debug-level messages + +# the max (numerically) log level we will log +LOG_LEVEL=$NDLP_INFO + +set_log_min_priority() { + case "${NETDATA_LOG_LEVEL,,}" in + "emerg" | "emergency") + LOG_LEVEL=$NDLP_EMERG + ;; + + "alert") + LOG_LEVEL=$NDLP_ALERT + ;; + + "crit" | "critical") + LOG_LEVEL=$NDLP_CRIT + ;; + + "err" | "error") + LOG_LEVEL=$NDLP_ERR + ;; + + "warn" | "warning") + LOG_LEVEL=$NDLP_WARN + ;; + + "notice") + LOG_LEVEL=$NDLP_NOTICE + ;; + + "info") + LOG_LEVEL=$NDLP_INFO + ;; + + "debug") + LOG_LEVEL=$NDLP_DEBUG + ;; + esac } -log() { - local status="${1}" - shift +set_log_min_priority - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" +log() { + local level="${1}" + shift 1 + + [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return + + systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" +log() { + local level="${1}" + shift 1 + + [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return + + systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <&2 "BASH version 4 or later is required (this is ${BASH_VERSION})." + exit 1 +fi # ----------------------------------------------------------------------------- # parse the arguments @@ -81,7 +152,10 @@ do case "${1}" in --cgroup) cgroup="${2}"; shift 1;; --pid|-p) pid="${2}"; shift 1;; - --debug|debug) debug=1;; + --debug|debug) + debug=1 + LOG_LEVEL=$NDLP_DEBUG + ;; *) fatal "Cannot understand argument '${1}'";; esac diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c index a490df3945ae6d..79c78732a56fb6 100644 --- a/collectors/cgroups.plugin/cgroup-network.c +++ b/collectors/cgroups.plugin/cgroup-network.c @@ -10,10 +10,16 @@ #include #endif -char environment_variable2[FILENAME_MAX + 50] = ""; +char env_netdata_host_prefix[FILENAME_MAX + 50] = ""; +char env_netdata_log_method[FILENAME_MAX + 50] = ""; +char env_netdata_log_format[FILENAME_MAX + 50] = ""; +char env_netdata_log_level[FILENAME_MAX + 50] = ""; char *environment[] = { "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin", - environment_variable2, + env_netdata_host_prefix, + env_netdata_log_method, + env_netdata_log_format, + env_netdata_log_level, NULL }; @@ -286,7 +292,8 @@ int switch_namespace(const char *prefix, pid_t pid) { pid_t read_pid_from_cgroup_file(const char *filename) { int fd = open(filename, procfile_open_flags); if(fd == -1) { - collector_error("Cannot open pid_from_cgroup() file '%s'.", filename); + if (errno != ENOENT) + collector_error("Cannot open pid_from_cgroup() file '%s'.", filename); return 0; } @@ -646,12 +653,11 @@ void usage(void) { } int main(int argc, char **argv) { - stderror = stderr; pid_t pid = 0; - program_name = argv[0]; program_version = VERSION; - error_log_syslog = 0; + clocks_init(); + nd_log_initialize_for_external_plugins("cgroup-network"); // since cgroup-network runs as root, prevent it from opening symbolic links procfile_open_flags = O_RDONLY|O_NOFOLLOW; @@ -669,7 +675,20 @@ int main(int argc, char **argv) { // build a safe environment for our script // the first environment variable is a fixed PATH= - snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix); + snprintfz(env_netdata_host_prefix, sizeof(env_netdata_host_prefix) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix); + + char *s; + + s = getenv("NETDATA_LOG_METHOD"); + snprintfz(env_netdata_log_method, sizeof(env_netdata_log_method) - 1, "NETDATA_LOG_METHOD=%s", nd_log_method_for_external_plugins(s)); + + s = getenv("NETDATA_LOG_FORMAT"); + if (s) + snprintfz(env_netdata_log_format, sizeof(env_netdata_log_format) - 1, "NETDATA_LOG_FORMAT=%s", s); + + s = getenv("NETDATA_LOG_LEVEL"); + if (s) + snprintfz(env_netdata_log_level, sizeof(env_netdata_log_level) - 1, "NETDATA_LOG_LEVEL=%s", s); // ------------------------------------------------------------------------ diff --git a/collectors/cgroups.plugin/cgroup-top.c b/collectors/cgroups.plugin/cgroup-top.c new file mode 100644 index 00000000000000..0e64b908d83340 --- /dev/null +++ b/collectors/cgroups.plugin/cgroup-top.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "cgroup-internals.h" + +struct cgroup_netdev_link { + size_t read_slot; + NETDATA_DOUBLE received[2]; + NETDATA_DOUBLE sent[2]; +}; + +static DICTIONARY *cgroup_netdev_link_dict = NULL; + +void cgroup_netdev_link_init(void) { + cgroup_netdev_link_dict = dictionary_create_advanced(DICT_OPTION_FIXED_SIZE|DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(struct cgroup_netdev_link)); +} + +const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg) { + if(cg->cgroup_netdev_link) + return cg->cgroup_netdev_link; + + + struct cgroup_netdev_link t = { + .read_slot = 0, + .received = { NAN, NAN }, + .sent = { NAN, NAN }, + }; + + cg->cgroup_netdev_link = dictionary_set_and_acquire_item(cgroup_netdev_link_dict, cg->id, &t, sizeof(struct cgroup_netdev_link)); + return dictionary_acquired_item_dup(cgroup_netdev_link_dict, cg->cgroup_netdev_link); +} + +void cgroup_netdev_delete(struct cgroup *cg) { + if(cg->cgroup_netdev_link) { + dictionary_acquired_item_release(cgroup_netdev_link_dict, cg->cgroup_netdev_link); + dictionary_del(cgroup_netdev_link_dict, cg->id); + dictionary_garbage_collect(cgroup_netdev_link_dict); + } +} + +void cgroup_netdev_release(const DICTIONARY_ITEM *link) { + if(link) + dictionary_acquired_item_release(cgroup_netdev_link_dict, link); +} + +const void *cgroup_netdev_dup(const DICTIONARY_ITEM *link) { + return dictionary_acquired_item_dup(cgroup_netdev_link_dict, link); +} + +void cgroup_netdev_reset_all(void) { + struct cgroup_netdev_link *t; + dfe_start_read(cgroup_netdev_link_dict, t) { + if(t->read_slot >= 1) { + t->read_slot = 0; + t->received[1] = NAN; + t->sent[1] = NAN; + } + else { + t->read_slot = 1; + t->received[0] = NAN; + t->sent[0] = NAN; + } + } + dfe_done(t); +} + +void cgroup_netdev_add_bandwidth(const DICTIONARY_ITEM *link, NETDATA_DOUBLE received, NETDATA_DOUBLE sent) { + if(!link) + return; + + struct cgroup_netdev_link *t = dictionary_acquired_item_value(link); + + size_t slot = (t->read_slot) ? 0 : 1; + + if(isnan(t->received[slot])) + t->received[slot] = received; + else + t->received[slot] += received; + + if(isnan(t->sent[slot])) + t->sent[slot] = sent; + else + t->sent[slot] += sent; +} + +void cgroup_netdev_get_bandwidth(struct cgroup *cg, NETDATA_DOUBLE *received, NETDATA_DOUBLE *sent) { + if(!cg->cgroup_netdev_link) { + *received = NAN; + *sent = NAN; + return; + } + + struct cgroup_netdev_link *t = dictionary_acquired_item_value(cg->cgroup_netdev_link); + + size_t slot = (t->read_slot) ? 1 : 0; + + *received = t->received[slot]; + *sent = t->sent[slot]; +} + +int cgroup_function_cgroup_top(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused, + void *collector_data __maybe_unused, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused, + void *register_canceller_cb_data __maybe_unused) { + + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost)); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_CGTOP_HELP); + buffer_json_member_add_array(wb, "data"); + + double max_pids = 0.0; + double max_cpu = 0.0; + double max_ram = 0.0; + double max_disk_io_read = 0.0; + double max_disk_io_written = 0.0; + double max_net_received = 0.0; + double max_net_sent = 0.0; + + RRDDIM *rd = NULL; + + uv_mutex_lock(&cgroup_root_mutex); + + for(struct cgroup *cg = cgroup_root; cg ; cg = cg->next) { + if(unlikely(!cg->enabled || cg->pending_renames || !cg->function_ready || is_cgroup_systemd_service(cg))) + continue; + + buffer_json_add_array_item_array(wb); + + buffer_json_add_array_item_string(wb, cg->name); // Name + + if(k8s_is_kubepod(cg)) + buffer_json_add_array_item_string(wb, "k8s"); // Kind + else + buffer_json_add_array_item_string(wb, "cgroup"); // Kind + + double pids_current = rrddim_get_last_stored_value(cg->st_pids_rd_pids_current, &max_pids, 1.0); + + double cpu = NAN; + if (cg->st_cpu_rd_user && cg->st_cpu_rd_system) { + cpu = cg->st_cpu_rd_user->collector.last_stored_value + cg->st_cpu_rd_system->collector.last_stored_value; + max_cpu = MAX(max_cpu, cpu); + } + + double ram = rrddim_get_last_stored_value(cg->st_mem_rd_ram, &max_ram, 1.0); + + rd = cg->st_throttle_io_rd_read ? cg->st_throttle_io_rd_read : cg->st_io_rd_read; + double disk_io_read = rrddim_get_last_stored_value(rd, &max_disk_io_read, 1024.0); + rd = cg->st_throttle_io_rd_written ? cg->st_throttle_io_rd_written : cg->st_io_rd_written; + double disk_io_written = rrddim_get_last_stored_value(rd, &max_disk_io_written, 1024.0); + + NETDATA_DOUBLE received, sent; + cgroup_netdev_get_bandwidth(cg, &received, &sent); + if (!isnan(received) && !isnan(sent)) { + received /= 1000.0; + sent /= 1000.0; + max_net_received = MAX(max_net_received, received); + max_net_sent = MAX(max_net_sent, sent); + } + + buffer_json_add_array_item_double(wb, pids_current); + buffer_json_add_array_item_double(wb, cpu); + buffer_json_add_array_item_double(wb, ram); + buffer_json_add_array_item_double(wb, disk_io_read); + buffer_json_add_array_item_double(wb, disk_io_written); + buffer_json_add_array_item_double(wb, received); + buffer_json_add_array_item_double(wb, sent); + + buffer_json_array_close(wb); + } + + uv_mutex_unlock(&cgroup_root_mutex); + + buffer_json_array_close(wb); // data + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + // Node + buffer_rrdf_table_add_field(wb, field_id++, "Name", "CGROUP Name", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + + // Kind + buffer_rrdf_table_add_field(wb, field_id++, "Kind", "CGROUP Kind", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // PIDs + buffer_rrdf_table_add_field(wb, field_id++, "PIDs", "Number of Processes Currently in the CGROUP", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "pids", max_pids, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // CPU + buffer_rrdf_table_add_field(wb, field_id++, "CPU", "CPU Usage", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "%", max_cpu, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // RAM + buffer_rrdf_table_add_field(wb, field_id++, "RAM", "RAM Usage", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_ram, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // Disk IO Reads + buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Disk Read Data", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_disk_io_read, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // Disk IO Writes + buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Disk Written Data", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_disk_io_written, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // Network Received + buffer_rrdf_table_add_field(wb, field_id++, "Received", "Network Traffic Received", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Mbps", max_net_received, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // Network Sent + buffer_rrdf_table_add_field(wb, field_id++, "Sent", "Network Traffic Sent ", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Mbps", max_net_sent, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + } + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "CPU"); + + buffer_json_member_add_object(wb, "charts"); + { + buffer_json_member_add_object(wb, "CPU"); + { + buffer_json_member_add_string(wb, "name", "CPU"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "CPU"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "Memory"); + { + buffer_json_member_add_string(wb, "name", "Memory"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "RAM"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "Traffic"); + { + buffer_json_member_add_string(wb, "name", "Traffic"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Received"); + buffer_json_add_array_item_string(wb, "Sent"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "CPU"); + buffer_json_add_array_item_string(wb, "Name"); + buffer_json_array_close(wb); + + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Memory"); + buffer_json_add_array_item_string(wb, "Name"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_object(wb, "group_by"); + { + buffer_json_member_add_object(wb, "Kind"); + { + buffer_json_member_add_string(wb, "name", "Kind"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Kind"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // group_by + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + int response = HTTP_RESP_OK; + if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) { + buffer_flush(wb); + response = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + + if(result_cb) + result_cb(wb, response, result_cb_data); + + return response; +} + +int cgroup_function_systemd_top(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused, + void *collector_data __maybe_unused, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused, + void *register_canceller_cb_data __maybe_unused) { + + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost)); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_CGTOP_HELP); + buffer_json_member_add_array(wb, "data"); + + double max_pids = 0.0; + double max_cpu = 0.0; + double max_ram = 0.0; + double max_disk_io_read = 0.0; + double max_disk_io_written = 0.0; + + RRDDIM *rd = NULL; + + uv_mutex_lock(&cgroup_root_mutex); + + for(struct cgroup *cg = cgroup_root; cg ; cg = cg->next) { + if(unlikely(!cg->enabled || cg->pending_renames || !cg->function_ready || !is_cgroup_systemd_service(cg))) + continue; + + buffer_json_add_array_item_array(wb); + + buffer_json_add_array_item_string(wb, cg->name); + + double pids_current = rrddim_get_last_stored_value(cg->st_pids_rd_pids_current, &max_pids, 1.0); + + double cpu = NAN; + if (cg->st_cpu_rd_user && cg->st_cpu_rd_system) { + cpu = cg->st_cpu_rd_user->collector.last_stored_value + cg->st_cpu_rd_system->collector.last_stored_value; + max_cpu = MAX(max_cpu, cpu); + } + + double ram = rrddim_get_last_stored_value(cg->st_mem_rd_ram, &max_ram, 1.0); + + rd = cg->st_throttle_io_rd_read ? cg->st_throttle_io_rd_read : cg->st_io_rd_read; + double disk_io_read = rrddim_get_last_stored_value(rd, &max_disk_io_read, 1024.0); + rd = cg->st_throttle_io_rd_written ? cg->st_throttle_io_rd_written : cg->st_io_rd_written; + double disk_io_written = rrddim_get_last_stored_value(rd, &max_disk_io_written, 1024.0); + + buffer_json_add_array_item_double(wb, pids_current); + buffer_json_add_array_item_double(wb, cpu); + buffer_json_add_array_item_double(wb, ram); + buffer_json_add_array_item_double(wb, disk_io_read); + buffer_json_add_array_item_double(wb, disk_io_written); + + buffer_json_array_close(wb); + } + + uv_mutex_unlock(&cgroup_root_mutex); + + buffer_json_array_close(wb); // data + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + // Node + buffer_rrdf_table_add_field(wb, field_id++, "Name", "Systemd Service Name", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + + // PIDs + buffer_rrdf_table_add_field(wb, field_id++, "PIDs", "Number of Processes Currently in the CGROUP", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "pids", max_pids, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // CPU + buffer_rrdf_table_add_field(wb, field_id++, "CPU", "CPU Usage", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "%", max_cpu, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // RAM + buffer_rrdf_table_add_field(wb, field_id++, "RAM", "RAM Usage", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_ram, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // Disk IO Reads + buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Disk Read Data", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_disk_io_read, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + // Disk IO Writes + buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Disk Written Data", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_disk_io_written, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + } + + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "CPU"); + + buffer_json_member_add_object(wb, "charts"); + { + buffer_json_member_add_object(wb, "CPU"); + { + buffer_json_member_add_string(wb, "name", "CPU"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "CPU"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "Memory"); + { + buffer_json_member_add_string(wb, "name", "Memory"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "RAM"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "CPU"); + buffer_json_add_array_item_string(wb, "Name"); + buffer_json_array_close(wb); + + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Memory"); + buffer_json_add_array_item_string(wb, "Name"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + int response = HTTP_RESP_OK; + if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) { + buffer_flush(wb); + response = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + + if(result_cb) + result_cb(wb, response, result_cb_data); + + return response; +} diff --git a/collectors/cgroups.plugin/integrations/containers.md b/collectors/cgroups.plugin/integrations/containers.md new file mode 100644 index 00000000000000..6273d1e918e560 --- /dev/null +++ b/collectors/cgroups.plugin/integrations/containers.md @@ -0,0 +1,169 @@ + + +# Containers + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor Containers for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.cpu_limit | used | percentage | +| cgroup.cpu | user, system | percentage | +| cgroup.cpu_per_core | a dimension per core | percentage | +| cgroup.throttled | throttled | percentage | +| cgroup.throttled_duration | duration | ms | +| cgroup.cpu_shares | shares | shares | +| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB | +| cgroup.writeback | dirty, writeback | MiB | +| cgroup.mem_activity | in, out | MiB/s | +| cgroup.pgfaults | pgfault, swap | MiB/s | +| cgroup.mem_usage | ram, swap | MiB | +| cgroup.mem_usage_limit | available, used | MiB | +| cgroup.mem_utilization | utilization | percentage | +| cgroup.mem_failcnt | failures | count | +| cgroup.io | read, write | KiB/s | +| cgroup.serviced_ops | read, write | operations/s | +| cgroup.throttle_io | read, write | KiB/s | +| cgroup.throttle_serviced_ops | read, write | operations/s | +| cgroup.queued_ops | read, write | operations | +| cgroup.merged_ops | read, write | operations/s | +| cgroup.cpu_some_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_some_pressure_stall_time | time | ms | +| cgroup.cpu_full_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_full_pressure_stall_time | time | ms | +| cgroup.memory_some_pressure | some10, some60, some300 | percentage | +| cgroup.memory_some_pressure_stall_time | time | ms | +| cgroup.memory_full_pressure | some10, some60, some300 | percentage | +| cgroup.memory_full_pressure_stall_time | time | ms | +| cgroup.io_some_pressure | some10, some60, some300 | percentage | +| cgroup.io_some_pressure_stall_time | time | ms | +| cgroup.io_full_pressure | some10, some60, some300 | percentage | +| cgroup.io_full_pressure_stall_time | time | ms | +| cgroup.pids_current | pids | pids | + +### Per cgroup network device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | +| device | The name of the host network interface linked to the container's network interface. | +| container_device | Container network interface name. | +| interface_type | Network interface type. Always "virtual" for the containers. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.net_net | received, sent | kilobits/s | +| cgroup.net_packets | received, sent, multicast | pps | +| cgroup.net_errors | inbound, outbound | errors/s | +| cgroup.net_drops | inbound, outbound | errors/s | +| cgroup.net_fifo | receive, transmit | errors/s | +| cgroup.net_compressed | receive, sent | pps | +| cgroup.net_events | frames, collisions, carrier | events/s | +| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| cgroup.net_carrier | up, down | state | +| cgroup.net_mtu | mtu | octets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes | +| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization | +| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/integrations/kubernetes_containers.md b/collectors/cgroups.plugin/integrations/kubernetes_containers.md new file mode 100644 index 00000000000000..9be32a12a1493c --- /dev/null +++ b/collectors/cgroups.plugin/integrations/kubernetes_containers.md @@ -0,0 +1,183 @@ + + +# Kubernetes Containers + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor Containers for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per k8s cgroup + +These metrics refer to the Pod container. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| k8s_node_name | Node name. The value of _pod.spec.nodeName_. | +| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. | +| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. | +| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. | +| k8s_pod_name | Pod name. The value of _pod.metadata.name_. | +| k8s_container_name | Container name. The value of _pod.spec.containers.name_. | +| k8s_kind | Instance kind: "pod" or "container". | +| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). | +| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s.cgroup.cpu_limit | used | percentage | +| k8s.cgroup.cpu | user, system | percentage | +| k8s.cgroup.cpu_per_core | a dimension per core | percentage | +| k8s.cgroup.throttled | throttled | percentage | +| k8s.cgroup.throttled_duration | duration | ms | +| k8s.cgroup.cpu_shares | shares | shares | +| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB | +| k8s.cgroup.writeback | dirty, writeback | MiB | +| k8s.cgroup.mem_activity | in, out | MiB/s | +| k8s.cgroup.pgfaults | pgfault, swap | MiB/s | +| k8s.cgroup.mem_usage | ram, swap | MiB | +| k8s.cgroup.mem_usage_limit | available, used | MiB | +| k8s.cgroup.mem_utilization | utilization | percentage | +| k8s.cgroup.mem_failcnt | failures | count | +| k8s.cgroup.io | read, write | KiB/s | +| k8s.cgroup.serviced_ops | read, write | operations/s | +| k8s.cgroup.throttle_io | read, write | KiB/s | +| k8s.cgroup.throttle_serviced_ops | read, write | operations/s | +| k8s.cgroup.queued_ops | read, write | operations | +| k8s.cgroup.merged_ops | read, write | operations/s | +| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage | +| k8s.cgroup.cpu_some_pressure_stall_time | time | ms | +| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage | +| k8s.cgroup.cpu_full_pressure_stall_time | time | ms | +| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage | +| k8s.cgroup.memory_some_pressure_stall_time | time | ms | +| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage | +| k8s.cgroup.memory_full_pressure_stall_time | time | ms | +| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage | +| k8s.cgroup.io_some_pressure_stall_time | time | ms | +| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage | +| k8s.cgroup.io_full_pressure_stall_time | time | ms | +| k8s.cgroup.pids_current | pids | pids | + +### Per k8s cgroup network device + +These metrics refer to the Pod container network interface. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | The name of the host network interface linked to the container's network interface. | +| container_device | Container network interface name. | +| interface_type | Network interface type. Always "virtual" for the containers. | +| k8s_node_name | Node name. The value of _pod.spec.nodeName_. | +| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. | +| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. | +| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. | +| k8s_pod_name | Pod name. The value of _pod.metadata.name_. | +| k8s_container_name | Container name. The value of _pod.spec.containers.name_. | +| k8s_kind | Instance kind: "pod" or "container". | +| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). | +| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s.cgroup.net_net | received, sent | kilobits/s | +| k8s.cgroup.net_packets | received, sent, multicast | pps | +| k8s.cgroup.net_errors | inbound, outbound | errors/s | +| k8s.cgroup.net_drops | inbound, outbound | errors/s | +| k8s.cgroup.net_fifo | receive, transmit | errors/s | +| k8s.cgroup.net_compressed | receive, sent | pps | +| k8s.cgroup.net_events | frames, collisions, carrier | events/s | +| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| k8s.cgroup.net_carrier | up, down | state | +| k8s.cgroup.net_mtu | mtu | octets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes | +| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization | +| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/integrations/libvirt_containers.md b/collectors/cgroups.plugin/integrations/libvirt_containers.md new file mode 100644 index 00000000000000..fed4546984623c --- /dev/null +++ b/collectors/cgroups.plugin/integrations/libvirt_containers.md @@ -0,0 +1,169 @@ + + +# Libvirt Containers + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor Libvirt for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.cpu_limit | used | percentage | +| cgroup.cpu | user, system | percentage | +| cgroup.cpu_per_core | a dimension per core | percentage | +| cgroup.throttled | throttled | percentage | +| cgroup.throttled_duration | duration | ms | +| cgroup.cpu_shares | shares | shares | +| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB | +| cgroup.writeback | dirty, writeback | MiB | +| cgroup.mem_activity | in, out | MiB/s | +| cgroup.pgfaults | pgfault, swap | MiB/s | +| cgroup.mem_usage | ram, swap | MiB | +| cgroup.mem_usage_limit | available, used | MiB | +| cgroup.mem_utilization | utilization | percentage | +| cgroup.mem_failcnt | failures | count | +| cgroup.io | read, write | KiB/s | +| cgroup.serviced_ops | read, write | operations/s | +| cgroup.throttle_io | read, write | KiB/s | +| cgroup.throttle_serviced_ops | read, write | operations/s | +| cgroup.queued_ops | read, write | operations | +| cgroup.merged_ops | read, write | operations/s | +| cgroup.cpu_some_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_some_pressure_stall_time | time | ms | +| cgroup.cpu_full_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_full_pressure_stall_time | time | ms | +| cgroup.memory_some_pressure | some10, some60, some300 | percentage | +| cgroup.memory_some_pressure_stall_time | time | ms | +| cgroup.memory_full_pressure | some10, some60, some300 | percentage | +| cgroup.memory_full_pressure_stall_time | time | ms | +| cgroup.io_some_pressure | some10, some60, some300 | percentage | +| cgroup.io_some_pressure_stall_time | time | ms | +| cgroup.io_full_pressure | some10, some60, some300 | percentage | +| cgroup.io_full_pressure_stall_time | time | ms | +| cgroup.pids_current | pids | pids | + +### Per cgroup network device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | +| device | The name of the host network interface linked to the container's network interface. | +| container_device | Container network interface name. | +| interface_type | Network interface type. Always "virtual" for the containers. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.net_net | received, sent | kilobits/s | +| cgroup.net_packets | received, sent, multicast | pps | +| cgroup.net_errors | inbound, outbound | errors/s | +| cgroup.net_drops | inbound, outbound | errors/s | +| cgroup.net_fifo | receive, transmit | errors/s | +| cgroup.net_compressed | receive, sent | pps | +| cgroup.net_events | frames, collisions, carrier | events/s | +| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| cgroup.net_carrier | up, down | state | +| cgroup.net_mtu | mtu | octets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes | +| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization | +| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/integrations/lxc_containers.md b/collectors/cgroups.plugin/integrations/lxc_containers.md new file mode 100644 index 00000000000000..3f05ffd5fe204b --- /dev/null +++ b/collectors/cgroups.plugin/integrations/lxc_containers.md @@ -0,0 +1,169 @@ + + +# LXC Containers + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor LXC Containers for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.cpu_limit | used | percentage | +| cgroup.cpu | user, system | percentage | +| cgroup.cpu_per_core | a dimension per core | percentage | +| cgroup.throttled | throttled | percentage | +| cgroup.throttled_duration | duration | ms | +| cgroup.cpu_shares | shares | shares | +| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB | +| cgroup.writeback | dirty, writeback | MiB | +| cgroup.mem_activity | in, out | MiB/s | +| cgroup.pgfaults | pgfault, swap | MiB/s | +| cgroup.mem_usage | ram, swap | MiB | +| cgroup.mem_usage_limit | available, used | MiB | +| cgroup.mem_utilization | utilization | percentage | +| cgroup.mem_failcnt | failures | count | +| cgroup.io | read, write | KiB/s | +| cgroup.serviced_ops | read, write | operations/s | +| cgroup.throttle_io | read, write | KiB/s | +| cgroup.throttle_serviced_ops | read, write | operations/s | +| cgroup.queued_ops | read, write | operations | +| cgroup.merged_ops | read, write | operations/s | +| cgroup.cpu_some_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_some_pressure_stall_time | time | ms | +| cgroup.cpu_full_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_full_pressure_stall_time | time | ms | +| cgroup.memory_some_pressure | some10, some60, some300 | percentage | +| cgroup.memory_some_pressure_stall_time | time | ms | +| cgroup.memory_full_pressure | some10, some60, some300 | percentage | +| cgroup.memory_full_pressure_stall_time | time | ms | +| cgroup.io_some_pressure | some10, some60, some300 | percentage | +| cgroup.io_some_pressure_stall_time | time | ms | +| cgroup.io_full_pressure | some10, some60, some300 | percentage | +| cgroup.io_full_pressure_stall_time | time | ms | +| cgroup.pids_current | pids | pids | + +### Per cgroup network device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | +| device | The name of the host network interface linked to the container's network interface. | +| container_device | Container network interface name. | +| interface_type | Network interface type. Always "virtual" for the containers. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.net_net | received, sent | kilobits/s | +| cgroup.net_packets | received, sent, multicast | pps | +| cgroup.net_errors | inbound, outbound | errors/s | +| cgroup.net_drops | inbound, outbound | errors/s | +| cgroup.net_fifo | receive, transmit | errors/s | +| cgroup.net_compressed | receive, sent | pps | +| cgroup.net_events | frames, collisions, carrier | events/s | +| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| cgroup.net_carrier | up, down | state | +| cgroup.net_mtu | mtu | octets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes | +| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization | +| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/integrations/ovirt_containers.md b/collectors/cgroups.plugin/integrations/ovirt_containers.md new file mode 100644 index 00000000000000..5771aeea1b797d --- /dev/null +++ b/collectors/cgroups.plugin/integrations/ovirt_containers.md @@ -0,0 +1,169 @@ + + +# oVirt Containers + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor oVirt for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.cpu_limit | used | percentage | +| cgroup.cpu | user, system | percentage | +| cgroup.cpu_per_core | a dimension per core | percentage | +| cgroup.throttled | throttled | percentage | +| cgroup.throttled_duration | duration | ms | +| cgroup.cpu_shares | shares | shares | +| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB | +| cgroup.writeback | dirty, writeback | MiB | +| cgroup.mem_activity | in, out | MiB/s | +| cgroup.pgfaults | pgfault, swap | MiB/s | +| cgroup.mem_usage | ram, swap | MiB | +| cgroup.mem_usage_limit | available, used | MiB | +| cgroup.mem_utilization | utilization | percentage | +| cgroup.mem_failcnt | failures | count | +| cgroup.io | read, write | KiB/s | +| cgroup.serviced_ops | read, write | operations/s | +| cgroup.throttle_io | read, write | KiB/s | +| cgroup.throttle_serviced_ops | read, write | operations/s | +| cgroup.queued_ops | read, write | operations | +| cgroup.merged_ops | read, write | operations/s | +| cgroup.cpu_some_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_some_pressure_stall_time | time | ms | +| cgroup.cpu_full_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_full_pressure_stall_time | time | ms | +| cgroup.memory_some_pressure | some10, some60, some300 | percentage | +| cgroup.memory_some_pressure_stall_time | time | ms | +| cgroup.memory_full_pressure | some10, some60, some300 | percentage | +| cgroup.memory_full_pressure_stall_time | time | ms | +| cgroup.io_some_pressure | some10, some60, some300 | percentage | +| cgroup.io_some_pressure_stall_time | time | ms | +| cgroup.io_full_pressure | some10, some60, some300 | percentage | +| cgroup.io_full_pressure_stall_time | time | ms | +| cgroup.pids_current | pids | pids | + +### Per cgroup network device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | +| device | The name of the host network interface linked to the container's network interface. | +| container_device | Container network interface name. | +| interface_type | Network interface type. Always "virtual" for the containers. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.net_net | received, sent | kilobits/s | +| cgroup.net_packets | received, sent, multicast | pps | +| cgroup.net_errors | inbound, outbound | errors/s | +| cgroup.net_drops | inbound, outbound | errors/s | +| cgroup.net_fifo | receive, transmit | errors/s | +| cgroup.net_compressed | receive, sent | pps | +| cgroup.net_events | frames, collisions, carrier | events/s | +| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| cgroup.net_carrier | up, down | state | +| cgroup.net_mtu | mtu | octets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes | +| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization | +| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/integrations/proxmox_containers.md b/collectors/cgroups.plugin/integrations/proxmox_containers.md new file mode 100644 index 00000000000000..1804a40ca0a7af --- /dev/null +++ b/collectors/cgroups.plugin/integrations/proxmox_containers.md @@ -0,0 +1,169 @@ + + +# Proxmox Containers + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor Proxmox for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.cpu_limit | used | percentage | +| cgroup.cpu | user, system | percentage | +| cgroup.cpu_per_core | a dimension per core | percentage | +| cgroup.throttled | throttled | percentage | +| cgroup.throttled_duration | duration | ms | +| cgroup.cpu_shares | shares | shares | +| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB | +| cgroup.writeback | dirty, writeback | MiB | +| cgroup.mem_activity | in, out | MiB/s | +| cgroup.pgfaults | pgfault, swap | MiB/s | +| cgroup.mem_usage | ram, swap | MiB | +| cgroup.mem_usage_limit | available, used | MiB | +| cgroup.mem_utilization | utilization | percentage | +| cgroup.mem_failcnt | failures | count | +| cgroup.io | read, write | KiB/s | +| cgroup.serviced_ops | read, write | operations/s | +| cgroup.throttle_io | read, write | KiB/s | +| cgroup.throttle_serviced_ops | read, write | operations/s | +| cgroup.queued_ops | read, write | operations | +| cgroup.merged_ops | read, write | operations/s | +| cgroup.cpu_some_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_some_pressure_stall_time | time | ms | +| cgroup.cpu_full_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_full_pressure_stall_time | time | ms | +| cgroup.memory_some_pressure | some10, some60, some300 | percentage | +| cgroup.memory_some_pressure_stall_time | time | ms | +| cgroup.memory_full_pressure | some10, some60, some300 | percentage | +| cgroup.memory_full_pressure_stall_time | time | ms | +| cgroup.io_some_pressure | some10, some60, some300 | percentage | +| cgroup.io_some_pressure_stall_time | time | ms | +| cgroup.io_full_pressure | some10, some60, some300 | percentage | +| cgroup.io_full_pressure_stall_time | time | ms | +| cgroup.pids_current | pids | pids | + +### Per cgroup network device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | +| device | The name of the host network interface linked to the container's network interface. | +| container_device | Container network interface name. | +| interface_type | Network interface type. Always "virtual" for the containers. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.net_net | received, sent | kilobits/s | +| cgroup.net_packets | received, sent, multicast | pps | +| cgroup.net_errors | inbound, outbound | errors/s | +| cgroup.net_drops | inbound, outbound | errors/s | +| cgroup.net_fifo | receive, transmit | errors/s | +| cgroup.net_compressed | receive, sent | pps | +| cgroup.net_events | frames, collisions, carrier | events/s | +| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| cgroup.net_carrier | up, down | state | +| cgroup.net_mtu | mtu | octets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes | +| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization | +| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/integrations/systemd_services.md b/collectors/cgroups.plugin/integrations/systemd_services.md new file mode 100644 index 00000000000000..0ce9063669b998 --- /dev/null +++ b/collectors/cgroups.plugin/integrations/systemd_services.md @@ -0,0 +1,112 @@ + + +# Systemd Services + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor Containers for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per systemd service + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| service_name | Service name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| systemd.service.cpu.utilization | user, system | percentage | +| systemd.service.memory.usage | ram, swap | MiB | +| systemd.service.memory.failcnt | fail | failures/s | +| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB | +| systemd.service.memory.writeback | writeback, dirty | MiB | +| systemd.service.memory.paging.faults | minor, major | MiB/s | +| systemd.service.memory.paging.io | in, out | MiB/s | +| systemd.service.disk.io | read, write | KiB/s | +| systemd.service.disk.iops | read, write | operations/s | +| systemd.service.disk.throttle.io | read, write | KiB/s | +| systemd.service.disk.throttle.iops | read, write | operations/s | +| systemd.service.disk.queued_iops | read, write | operations/s | +| systemd.service.disk.merged_iops | read, write | operations/s | +| systemd.service.pids.current | pids | pids | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/integrations/virtual_machines.md b/collectors/cgroups.plugin/integrations/virtual_machines.md new file mode 100644 index 00000000000000..6a64923c47d8b1 --- /dev/null +++ b/collectors/cgroups.plugin/integrations/virtual_machines.md @@ -0,0 +1,169 @@ + + +# Virtual Machines + + + + + +Plugin: cgroups.plugin +Module: /sys/fs/cgroup + + + +## Overview + +Monitor Virtual Machines for performance, resource usage, and health status. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.cpu_limit | used | percentage | +| cgroup.cpu | user, system | percentage | +| cgroup.cpu_per_core | a dimension per core | percentage | +| cgroup.throttled | throttled | percentage | +| cgroup.throttled_duration | duration | ms | +| cgroup.cpu_shares | shares | shares | +| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB | +| cgroup.writeback | dirty, writeback | MiB | +| cgroup.mem_activity | in, out | MiB/s | +| cgroup.pgfaults | pgfault, swap | MiB/s | +| cgroup.mem_usage | ram, swap | MiB | +| cgroup.mem_usage_limit | available, used | MiB | +| cgroup.mem_utilization | utilization | percentage | +| cgroup.mem_failcnt | failures | count | +| cgroup.io | read, write | KiB/s | +| cgroup.serviced_ops | read, write | operations/s | +| cgroup.throttle_io | read, write | KiB/s | +| cgroup.throttle_serviced_ops | read, write | operations/s | +| cgroup.queued_ops | read, write | operations | +| cgroup.merged_ops | read, write | operations/s | +| cgroup.cpu_some_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_some_pressure_stall_time | time | ms | +| cgroup.cpu_full_pressure | some10, some60, some300 | percentage | +| cgroup.cpu_full_pressure_stall_time | time | ms | +| cgroup.memory_some_pressure | some10, some60, some300 | percentage | +| cgroup.memory_some_pressure_stall_time | time | ms | +| cgroup.memory_full_pressure | some10, some60, some300 | percentage | +| cgroup.memory_full_pressure_stall_time | time | ms | +| cgroup.io_some_pressure | some10, some60, some300 | percentage | +| cgroup.io_some_pressure_stall_time | time | ms | +| cgroup.io_full_pressure | some10, some60, some300 | percentage | +| cgroup.io_full_pressure_stall_time | time | ms | +| cgroup.pids_current | pids | pids | + +### Per cgroup network device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container name or group path if name resolution fails. | +| image | Docker/Podman container image name. | +| device | The name of the host network interface linked to the container's network interface. | +| container_device | Container network interface name. | +| interface_type | Network interface type. Always "virtual" for the containers. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.net_net | received, sent | kilobits/s | +| cgroup.net_packets | received, sent, multicast | pps | +| cgroup.net_errors | inbound, outbound | errors/s | +| cgroup.net_drops | inbound, outbound | errors/s | +| cgroup.net_fifo | receive, transmit | errors/s | +| cgroup.net_compressed | receive, sent | pps | +| cgroup.net_events | frames, collisions, carrier | events/s | +| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| cgroup.net_carrier | up, down | state | +| cgroup.net_mtu | mtu | octets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes | +| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization | +| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/cgroups.plugin/metadata.yaml b/collectors/cgroups.plugin/metadata.yaml index b342d30a3bf926..a1abbb5a94fe15 100644 --- a/collectors/cgroups.plugin/metadata.yaml +++ b/collectors/cgroups.plugin/metadata.yaml @@ -86,9 +86,9 @@ modules: description: "" labels: - name: container_name - description: TBD + description: The container name or group path if name resolution fails. - name: image - description: TBD + description: Docker/Podman container image name. metrics: - name: cgroup.cpu_limit description: CPU Usage within the limits @@ -310,17 +310,25 @@ modules: chart_type: line dimensions: - name: time + - name: cgroup.pids_current + description: Number of processes + unit: "pids" + chart_type: line + dimensions: + - name: pids - name: cgroup network device description: "" labels: - name: container_name - description: TBD + description: The container name or group path if name resolution fails. - name: image - description: TBD + description: Docker/Podman container image name. - name: device - description: TBD + description: "The name of the host network interface linked to the container's network interface." + - name: container_device + description: Container network interface name. - name: interface_type - description: TBD + description: 'Network interface type. Always "virtual" for the containers.' metrics: - name: cgroup.net_net description: Bandwidth @@ -406,7 +414,7 @@ modules: link: https://kubernetes.io/ icon_filename: kubernetes.svg categories: - - data-collection.containers-and-vms + #- data-collection.containers-and-vms - data-collection.kubernetes keywords: - k8s @@ -445,30 +453,26 @@ modules: availability: [] scopes: - name: k8s cgroup - description: "" + description: These metrics refer to the Pod container. labels: + - name: k8s_node_name + description: 'Node name. The value of _pod.spec.nodeName_.' - name: k8s_namespace - description: TBD - - name: k8s_pod_name - description: TBD - - name: k8s_pod_uid - description: TBD + description: 'Namespace name. The value of _pod.metadata.namespace_.' - name: k8s_controller_kind - description: TBD + description: 'Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_.' - name: k8s_controller_name - description: TBD - - name: k8s_node_name - description: TBD + description: 'Controller name.The value of _pod.OwnerReferences.Controller.Name_.' + - name: k8s_pod_name + description: 'Pod name. The value of _pod.metadata.name_.' - name: k8s_container_name - description: TBD - - name: k8s_container_id - description: TBD + description: 'Container name. The value of _pod.spec.containers.name_.' - name: k8s_kind - description: TBD + description: 'Instance kind: "pod" or "container".' - name: k8s_qos_class - description: TBD + description: 'QoS class (guaranteed, burstable, besteffort).' - name: k8s_cluster_id - description: TBD + description: 'Cluster ID. The value of kube-system namespace _namespace.metadata.uid_.' metrics: - name: k8s.cgroup.cpu_limit description: CPU Usage within the limits @@ -690,35 +694,39 @@ modules: chart_type: line dimensions: - name: time + - name: k8s.cgroup.pids_current + description: Number of processes + unit: "pids" + chart_type: line + dimensions: + - name: pids - name: k8s cgroup network device - description: "" + description: These metrics refer to the Pod container network interface. labels: - name: device - description: TBD + description: "The name of the host network interface linked to the container's network interface." + - name: container_device + description: Container network interface name. - name: interface_type - description: TBD + description: 'Network interface type. Always "virtual" for the containers.' + - name: k8s_node_name + description: 'Node name. The value of _pod.spec.nodeName_.' - name: k8s_namespace - description: TBD - - name: k8s_pod_name - description: TBD - - name: k8s_pod_uid - description: TBD + description: 'Namespace name. The value of _pod.metadata.namespace_.' - name: k8s_controller_kind - description: TBD + description: 'Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_.' - name: k8s_controller_name - description: TBD - - name: k8s_node_name - description: TBD + description: 'Controller name.The value of _pod.OwnerReferences.Controller.Name_.' + - name: k8s_pod_name + description: 'Pod name. The value of _pod.metadata.name_.' - name: k8s_container_name - description: TBD - - name: k8s_container_id - description: TBD + description: 'Container name. The value of _pod.spec.containers.name_.' - name: k8s_kind - description: TBD + description: 'Instance kind: "pod" or "container".' - name: k8s_qos_class - description: TBD + description: 'QoS class (guaranteed, burstable, besteffort).' - name: k8s_cluster_id - description: TBD + description: 'Cluster ID. The value of kube-system namespace _namespace.metadata.uid_.' metrics: - name: k8s.cgroup.net_net description: Bandwidth @@ -821,154 +829,110 @@ modules: description: "" availability: [] scopes: - - name: global + - name: systemd service description: "" - labels: [] + labels: + - name: service_name + description: Service name metrics: - - name: services.cpu + - name: systemd.service.cpu.utilization description: Systemd Services CPU utilization (100% = 1 core) - unit: "percentage" + unit: percentage chart_type: stacked dimensions: - - name: a dimension per systemd service - - name: services.mem_usage + - name: user + - name: system + - name: systemd.service.memory.usage description: Systemd Services Used Memory - unit: "MiB" + unit: MiB chart_type: stacked dimensions: - - name: a dimension per systemd service - - name: services.mem_rss - description: Systemd Services RSS Memory - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_mapped - description: Systemd Services Mapped Memory - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_cache - description: Systemd Services Cache Memory - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_writeback - description: Systemd Services Writeback Memory - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_pgfault - description: Systemd Services Memory Minor Page Faults - unit: "MiB/s" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_pgmajfault - description: Systemd Services Memory Major Page Faults - unit: "MiB/s" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_pgpgin - description: Systemd Services Memory Charging Activity - unit: "MiB/s" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_pgpgout - description: Systemd Services Memory Uncharging Activity - unit: "MiB/s" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.mem_failcnt + - name: ram + - name: swap + - name: systemd.service.memory.failcnt description: Systemd Services Memory Limit Failures - unit: "failures" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.swap_usage - description: Systemd Services Swap Memory Used - unit: "MiB" - chart_type: stacked - dimensions: - - name: a dimension per systemd service - - name: services.io_read - description: Systemd Services Disk Read Bandwidth - unit: "KiB/s" - chart_type: stacked + unit: failures/s + chart_type: line dimensions: - - name: a dimension per systemd service - - name: services.io_write - description: Systemd Services Disk Write Bandwidth - unit: "KiB/s" + - name: fail + - name: systemd.service.memory.ram.usage + description: Systemd Services Memory + unit: MiB chart_type: stacked dimensions: - - name: a dimension per systemd service - - name: services.io_ops_read - description: Systemd Services Disk Read Operations - unit: "operations/s" + - name: rss + - name: cache + - name: mapped_file + - name: rss_huge + - name: systemd.service.memory.writeback + description: Systemd Services Writeback Memory + unit: MiB chart_type: stacked dimensions: - - name: a dimension per systemd service - - name: services.io_ops_write - description: Systemd Services Disk Write Operations - unit: "operations/s" - chart_type: stacked + - name: writeback + - name: dirty + - name: systemd.service.memory.paging.faults + description: Systemd Services Memory Minor and Major Page Faults + unit: MiB/s + chart_type: area dimensions: - - name: a dimension per systemd service - - name: services.throttle_io_read - description: Systemd Services Throttle Disk Read Bandwidth - unit: "KiB/s" - chart_type: stacked + - name: minor + - name: major + - name: systemd.service.memory.paging.io + description: Systemd Services Memory Paging IO + unit: MiB/s + chart_type: area dimensions: - - name: a dimension per systemd service - - name: services.services.throttle_io_write - description: Systemd Services Throttle Disk Write Bandwidth - unit: "KiB/s" - chart_type: stacked + - name: in + - name: out + - name: systemd.service.disk.io + description: Systemd Services Disk Read/Write Bandwidth + unit: KiB/s + chart_type: area dimensions: - - name: a dimension per systemd service - - name: services.throttle_io_ops_read - description: Systemd Services Throttle Disk Read Operations - unit: "operations/s" - chart_type: stacked + - name: read + - name: write + - name: systemd.service.disk.iops + description: Systemd Services Disk Read/Write Operations + unit: operations/s + chart_type: line dimensions: - - name: a dimension per systemd service - - name: throttle_io_ops_write - description: Systemd Services Throttle Disk Write Operations - unit: "operations/s" - chart_type: stacked + - name: read + - name: write + - name: systemd.service.disk.throttle.io + description: Systemd Services Throttle Disk Read/Write Bandwidth + unit: KiB/s + chart_type: area dimensions: - - name: a dimension per systemd service - - name: services.queued_io_ops_read - description: Systemd Services Queued Disk Read Operations - unit: "operations/s" - chart_type: stacked + - name: read + - name: write + - name: systemd.service.disk.throttle.iops + description: Systemd Services Throttle Disk Read/Write Operations + unit: operations/s + chart_type: line dimensions: - - name: a dimension per systemd service - - name: services.queued_io_ops_write - description: Systemd Services Queued Disk Write Operations - unit: "operations/s" - chart_type: stacked + - name: read + - name: write + - name: systemd.service.disk.queued_iops + description: Systemd Services Queued Disk Read/Write Operations + unit: operations/s + chart_type: line dimensions: - - name: a dimension per systemd service - - name: services.merged_io_ops_read - description: Systemd Services Merged Disk Read Operations - unit: "operations/s" - chart_type: stacked + - name: read + - name: write + - name: systemd.service.disk.merged_iops + description: Systemd Services Merged Disk Read/Write Operations + unit: operations/s + chart_type: line dimensions: - - name: a dimension per systemd service - - name: services.merged_io_ops_write - description: Systemd Services Merged Disk Write Operations - unit: "operations/s" - chart_type: stacked + - name: read + - name: write + - name: systemd.service.pids.current + description: Systemd Services Number of Processes + unit: pids + chart_type: line dimensions: - - name: a dimension per systemd service + - name: pids - <<: *module meta: <<: *meta diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c index 9c7488c827dc6d..705edf6f748fc2 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -1,148 +1,98 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "sys_fs_cgroup.h" - -#define PLUGIN_CGROUPS_NAME "cgroups.plugin" -#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd" -#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup" - -#ifdef NETDATA_INTERNAL_CHECKS -#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_DEFAULT -#else -#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_NO_ERROR_ON_FILE_IO -#endif +#include "cgroup-internals.h" // main cgroups thread worker jobs #define WORKER_CGROUPS_LOCK 0 #define WORKER_CGROUPS_READ 1 #define WORKER_CGROUPS_CHART 2 -// discovery cgroup thread worker jobs -#define WORKER_DISCOVERY_INIT 0 -#define WORKER_DISCOVERY_FIND 1 -#define WORKER_DISCOVERY_PROCESS 2 -#define WORKER_DISCOVERY_PROCESS_RENAME 3 -#define WORKER_DISCOVERY_PROCESS_NETWORK 4 -#define WORKER_DISCOVERY_PROCESS_FIRST_TIME 5 -#define WORKER_DISCOVERY_UPDATE 6 -#define WORKER_DISCOVERY_CLEANUP 7 -#define WORKER_DISCOVERY_COPY 8 -#define WORKER_DISCOVERY_SHARE 9 -#define WORKER_DISCOVERY_LOCK 10 - -#if WORKER_UTILIZATION_MAX_JOB_TYPES < 11 -#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 11 -#endif - // ---------------------------------------------------------------------------- // cgroup globals +unsigned long long host_ram_total = 0; +int is_inside_k8s = 0; +long system_page_size = 4096; // system will be queried via sysconf() in configuration() +int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO; +int cgroup_enable_cpuacct_cpu_throttling = CONFIG_BOOLEAN_YES; +int cgroup_enable_cpuacct_cpu_shares = CONFIG_BOOLEAN_NO; +int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_pressure_cpu = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_pressure_irq_some = CONFIG_BOOLEAN_NO; +int cgroup_enable_pressure_irq_full = CONFIG_BOOLEAN_AUTO; +int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES; +int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO; +int cgroup_used_memory = CONFIG_BOOLEAN_YES; +int cgroup_use_unified_cgroups = CONFIG_BOOLEAN_NO; +int cgroup_unified_exist = CONFIG_BOOLEAN_AUTO; +int cgroup_search_in_devices = 1; +int cgroup_check_for_new_every = 10; +int cgroup_update_every = 1; +int cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS; +int cgroup_recheck_zero_blkio_every_iterations = 10; +int cgroup_recheck_zero_mem_failcnt_every_iterations = 10; +int cgroup_recheck_zero_mem_detailed_every_iterations = 10; +char *cgroup_cpuacct_base = NULL; +char *cgroup_cpuset_base = NULL; +char *cgroup_blkio_base = NULL; +char *cgroup_memory_base = NULL; +char *cgroup_devices_base = NULL; +char *cgroup_pids_base = NULL; +char *cgroup_unified_base = NULL; +int cgroup_root_count = 0; +int cgroup_root_max = 1000; +int cgroup_max_depth = 0; +SIMPLE_PATTERN *enabled_cgroup_paths = NULL; +SIMPLE_PATTERN *enabled_cgroup_names = NULL; +SIMPLE_PATTERN *search_cgroup_paths = NULL; +SIMPLE_PATTERN *enabled_cgroup_renames = NULL; +SIMPLE_PATTERN *systemd_services_cgroups = NULL; +SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL; +char *cgroups_network_interface_script = NULL; +int cgroups_check = 0; +uint32_t Read_hash = 0; +uint32_t Write_hash = 0; +uint32_t user_hash = 0; +uint32_t system_hash = 0; +uint32_t user_usec_hash = 0; +uint32_t system_usec_hash = 0; +uint32_t nr_periods_hash = 0; +uint32_t nr_throttled_hash = 0; +uint32_t throttled_time_hash = 0; +uint32_t throttled_usec_hash = 0; -static char cgroup_chart_id_prefix[] = "cgroup_"; - -static int is_inside_k8s = 0; - -static long system_page_size = 4096; // system will be queried via sysconf() in configuration() - -static int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_cpuacct_cpu_throttling = CONFIG_BOOLEAN_YES; -static int cgroup_enable_cpuacct_cpu_shares = CONFIG_BOOLEAN_NO; -static int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_pressure_cpu = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_pressure_irq_some = CONFIG_BOOLEAN_NO; -static int cgroup_enable_pressure_irq_full = CONFIG_BOOLEAN_AUTO; - -static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES; -static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO; -static int cgroup_used_memory = CONFIG_BOOLEAN_YES; - -static int cgroup_use_unified_cgroups = CONFIG_BOOLEAN_NO; -static int cgroup_unified_exist = CONFIG_BOOLEAN_AUTO; - -static int cgroup_search_in_devices = 1; - -static int cgroup_check_for_new_every = 10; -static int cgroup_update_every = 1; -static int cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS; - -static int cgroup_recheck_zero_blkio_every_iterations = 10; -static int cgroup_recheck_zero_mem_failcnt_every_iterations = 10; -static int cgroup_recheck_zero_mem_detailed_every_iterations = 10; - -static char *cgroup_cpuacct_base = NULL; -static char *cgroup_cpuset_base = NULL; -static char *cgroup_blkio_base = NULL; -static char *cgroup_memory_base = NULL; -static char *cgroup_devices_base = NULL; -static char *cgroup_unified_base = NULL; - -static int cgroup_root_count = 0; -static int cgroup_root_max = 1000; -static int cgroup_max_depth = 0; - -static SIMPLE_PATTERN *enabled_cgroup_paths = NULL; -static SIMPLE_PATTERN *enabled_cgroup_names = NULL; -static SIMPLE_PATTERN *search_cgroup_paths = NULL; -static SIMPLE_PATTERN *enabled_cgroup_renames = NULL; -static SIMPLE_PATTERN *systemd_services_cgroups = NULL; - -static SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL; - -static char *cgroups_rename_script = NULL; -static char *cgroups_network_interface_script = NULL; - -static int cgroups_check = 0; - -static uint32_t Read_hash = 0; -static uint32_t Write_hash = 0; -static uint32_t user_hash = 0; -static uint32_t system_hash = 0; -static uint32_t user_usec_hash = 0; -static uint32_t system_usec_hash = 0; -static uint32_t nr_periods_hash = 0; -static uint32_t nr_throttled_hash = 0; -static uint32_t throttled_time_hash = 0; -static uint32_t throttled_usec_hash = 0; - -enum cgroups_type { CGROUPS_AUTODETECT_FAIL, CGROUPS_V1, CGROUPS_V2 }; - -enum cgroups_systemd_setting { - SYSTEMD_CGROUP_ERR, - SYSTEMD_CGROUP_LEGACY, - SYSTEMD_CGROUP_HYBRID, - SYSTEMD_CGROUP_UNIFIED -}; - -struct cgroups_systemd_config_setting { - char *name; - enum cgroups_systemd_setting setting; -}; +// *** WARNING *** The fields are not thread safe. Take care of safe usage. +struct cgroup *cgroup_root = NULL; +uv_mutex_t cgroup_root_mutex; -static struct cgroups_systemd_config_setting cgroups_systemd_options[] = { - { .name = "legacy", .setting = SYSTEMD_CGROUP_LEGACY }, - { .name = "hybrid", .setting = SYSTEMD_CGROUP_HYBRID }, - { .name = "unified", .setting = SYSTEMD_CGROUP_UNIFIED }, - { .name = NULL, .setting = SYSTEMD_CGROUP_ERR }, +struct cgroups_systemd_config_setting cgroups_systemd_options[] = { + { .name = "legacy", .setting = SYSTEMD_CGROUP_LEGACY }, + { .name = "hybrid", .setting = SYSTEMD_CGROUP_HYBRID }, + { .name = "unified", .setting = SYSTEMD_CGROUP_UNIFIED }, + { .name = NULL, .setting = SYSTEMD_CGROUP_ERR }, }; // Shared memory with information from detected cgroups netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL}; -static int shm_fd_cgroup_ebpf = -1; +int shm_fd_cgroup_ebpf = -1; sem_t *shm_mutex_cgroup_ebpf = SEM_FAILED; +struct discovery_thread discovery_thread; + + /* on Fed systemd is not in PATH for some reason */ #define SYSTEMD_CMD_RHEL "/usr/lib/systemd/systemd --version" #define SYSTEMD_HIERARCHY_STRING "default-hierarchy=" @@ -361,54 +311,70 @@ void read_cgroup_plugin_configuration() { cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_NO; mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct"); - if(!mi) { + if (!mi) + mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct"); + if (!mi) { collector_error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct"); s = "/sys/fs/cgroup/cpuacct"; - } - else s = mi->mount_point; + } else + s = mi->mount_point; set_cgroup_base_path(filename, s); cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename); mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuset"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuset"); - if(!mi) { + if (!mi) + mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuset"); + if (!mi) { collector_error("CGROUP: cannot find cpuset mountinfo. Assuming default: /sys/fs/cgroup/cpuset"); s = "/sys/fs/cgroup/cpuset"; - } - else s = mi->mount_point; + } else + s = mi->mount_point; set_cgroup_base_path(filename, s); cgroup_cpuset_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuset", filename); mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio"); - if(!mi) { + if (!mi) + mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio"); + if (!mi) { collector_error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio"); s = "/sys/fs/cgroup/blkio"; - } - else s = mi->mount_point; + } else + s = mi->mount_point; set_cgroup_base_path(filename, s); cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename); mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory"); - if(!mi) { + if (!mi) + mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory"); + if (!mi) { collector_error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory"); s = "/sys/fs/cgroup/memory"; - } - else s = mi->mount_point; + } else + s = mi->mount_point; set_cgroup_base_path(filename, s); cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename); mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices"); - if(!mi) { + if (!mi) + mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices"); + if (!mi) { collector_error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices"); s = "/sys/fs/cgroup/devices"; - } - else s = mi->mount_point; + } else + s = mi->mount_point; set_cgroup_base_path(filename, s); cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename); + + mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "pids"); + if (!mi) + mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "pids"); + if (!mi) { + collector_error("CGROUP: cannot find pids mountinfo. Assuming default: /sys/fs/cgroup/pids"); + s = "/sys/fs/cgroup/pids"; + } else + s = mi->mount_point; + set_cgroup_base_path(filename, s); + cgroup_pids_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/pids", filename); } else { //cgroup_enable_cpuacct_stat = @@ -428,22 +394,19 @@ void read_cgroup_plugin_configuration() { cgroup_used_memory = CONFIG_BOOLEAN_NO; //unified cgroups use different values //TODO: can there be more than 1 cgroup2 mount point? - mi = mountinfo_find_by_filesystem_super_option(root, "cgroup2", "rw"); //there is no cgroup2 specific super option - for now use 'rw' option - if(mi) - netdata_log_debug(D_CGROUP, "found unified cgroup root using super options, with path: '%s'", mi->mount_point); - if(!mi) { + //there is no cgroup2 specific super option - for now use 'rw' option + mi = mountinfo_find_by_filesystem_super_option(root, "cgroup2", "rw"); + if (!mi) { mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup2", "cgroup"); - if(mi) - netdata_log_debug(D_CGROUP, "found unified cgroup root using mountsource info, with path: '%s'", mi->mount_point); } - if(!mi) { + if (!mi) { collector_error("CGROUP: cannot find cgroup2 mountinfo. Assuming default: /sys/fs/cgroup"); s = "/sys/fs/cgroup"; - } - else s = mi->mount_point; + } else + s = mi->mount_point; + set_cgroup_base_path(filename, s); cgroup_unified_base = config_get("plugin:cgroups", "path to unified cgroups", filename); - netdata_log_debug(D_CGROUP, "using cgroup root: '%s'", cgroup_unified_base); } cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max); @@ -620,422 +583,6 @@ void netdata_cgroup_ebpf_initialize_shm() shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME); } -// ---------------------------------------------------------------------------- -// cgroup objects - -struct blkio { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int delay_counter; - - char *filename; - - unsigned long long Read; - unsigned long long Write; -/* - unsigned long long Sync; - unsigned long long Async; - unsigned long long Total; -*/ -}; - -// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt -struct memory { - ARL_BASE *arl_base; - ARL_ENTRY *arl_dirty; - ARL_ENTRY *arl_swap; - - int updated_detailed; - int updated_usage_in_bytes; - int updated_msw_usage_in_bytes; - int updated_failcnt; - - int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - int delay_counter_detailed; - int delay_counter_failcnt; - - char *filename_detailed; - char *filename_usage_in_bytes; - char *filename_msw_usage_in_bytes; - char *filename_failcnt; - - int detailed_has_dirty; - int detailed_has_swap; - - // detailed metrics -/* - unsigned long long cache; - unsigned long long rss; - unsigned long long rss_huge; - unsigned long long mapped_file; - unsigned long long writeback; - unsigned long long dirty; - unsigned long long swap; - unsigned long long pgpgin; - unsigned long long pgpgout; - unsigned long long pgfault; - unsigned long long pgmajfault; - unsigned long long inactive_anon; - unsigned long long active_anon; - unsigned long long inactive_file; - unsigned long long active_file; - unsigned long long unevictable; - unsigned long long hierarchical_memory_limit; -*/ - //unified cgroups metrics - unsigned long long anon; - unsigned long long kernel_stack; - unsigned long long slab; - unsigned long long sock; - unsigned long long shmem; - unsigned long long anon_thp; - //unsigned long long file_writeback; - //unsigned long long file_dirty; - //unsigned long long file; - - unsigned long long total_cache; - unsigned long long total_rss; - unsigned long long total_rss_huge; - unsigned long long total_mapped_file; - unsigned long long total_writeback; - unsigned long long total_dirty; - unsigned long long total_swap; - unsigned long long total_pgpgin; - unsigned long long total_pgpgout; - unsigned long long total_pgfault; - unsigned long long total_pgmajfault; -/* - unsigned long long total_inactive_anon; - unsigned long long total_active_anon; -*/ - - unsigned long long total_inactive_file; - -/* - unsigned long long total_active_file; - unsigned long long total_unevictable; -*/ - - // single file metrics - unsigned long long usage_in_bytes; - unsigned long long msw_usage_in_bytes; - unsigned long long failcnt; -}; - -// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt -struct cpuacct_stat { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - char *filename; - - unsigned long long user; // v1, v2(user_usec) - unsigned long long system; // v1, v2(system_usec) -}; - -// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt -struct cpuacct_usage { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - char *filename; - - unsigned int cpus; - unsigned long long *cpu_percpu; -}; - -// represents cpuacct/cpu.stat, for v2 'cpuacct_stat' is used for 'user_usec', 'system_usec' -struct cpuacct_cpu_throttling { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - char *filename; - - unsigned long long nr_periods; - unsigned long long nr_throttled; - unsigned long long throttled_time; - - unsigned long long nr_throttled_perc; -}; - -// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu#sect-cfs -// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications_managing-monitoring-and-updating-the-kernel#proc_controlling-distribution-of-cpu-time-for-applications-by-adjusting-cpu-weight_using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications -struct cpuacct_cpu_shares { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - char *filename; - - unsigned long long shares; -}; - -struct cgroup_network_interface { - const char *host_device; - const char *container_device; - struct cgroup_network_interface *next; -}; - -enum cgroups_container_orchestrator { - CGROUPS_ORCHESTRATOR_UNSET, - CGROUPS_ORCHESTRATOR_UNKNOWN, - CGROUPS_ORCHESTRATOR_K8S -}; - -// *** WARNING *** The fields are not thread safe. Take care of safe usage. -struct cgroup { - uint32_t options; - - int first_time_seen; // first time seen by the discoverer - int processed; // the discoverer is done processing a cgroup (resolved name, set 'enabled' option) - - char available; // found in the filesystem - char enabled; // enabled in the config - - char pending_renames; - char *intermediate_id; // TODO: remove it when the renaming script is fixed - - char *id; - uint32_t hash; - - char *chart_id; - uint32_t hash_chart; - - char *chart_title; - - DICTIONARY *chart_labels; - - int container_orchestrator; - - struct cpuacct_stat cpuacct_stat; - struct cpuacct_usage cpuacct_usage; - struct cpuacct_cpu_throttling cpuacct_cpu_throttling; - struct cpuacct_cpu_shares cpuacct_cpu_shares; - - struct memory memory; - - struct blkio io_service_bytes; // bytes - struct blkio io_serviced; // operations - - struct blkio throttle_io_service_bytes; // bytes - struct blkio throttle_io_serviced; // operations - - struct blkio io_merged; // operations - struct blkio io_queued; // operations - - struct cgroup_network_interface *interfaces; - - struct pressure cpu_pressure; - struct pressure io_pressure; - struct pressure memory_pressure; - struct pressure irq_pressure; - - // per cgroup charts - RRDSET *st_cpu; - RRDSET *st_cpu_limit; - RRDSET *st_cpu_per_core; - RRDSET *st_cpu_nr_throttled; - RRDSET *st_cpu_throttled_time; - RRDSET *st_cpu_shares; - - RRDSET *st_mem; - RRDSET *st_mem_utilization; - RRDSET *st_writeback; - RRDSET *st_mem_activity; - RRDSET *st_pgfaults; - RRDSET *st_mem_usage; - RRDSET *st_mem_usage_limit; - RRDSET *st_mem_failcnt; - - RRDSET *st_io; - RRDSET *st_serviced_ops; - RRDSET *st_throttle_io; - RRDSET *st_throttle_serviced_ops; - RRDSET *st_queued_ops; - RRDSET *st_merged_ops; - - // per cgroup chart variables - char *filename_cpuset_cpus; - unsigned long long cpuset_cpus; - - char *filename_cpu_cfs_period; - unsigned long long cpu_cfs_period; - - char *filename_cpu_cfs_quota; - unsigned long long cpu_cfs_quota; - - const RRDSETVAR_ACQUIRED *chart_var_cpu_limit; - NETDATA_DOUBLE prev_cpu_usage; - - char *filename_memory_limit; - unsigned long long memory_limit; - const RRDSETVAR_ACQUIRED *chart_var_memory_limit; - - char *filename_memoryswap_limit; - unsigned long long memoryswap_limit; - const RRDSETVAR_ACQUIRED *chart_var_memoryswap_limit; - - // services - RRDDIM *rd_cpu; - RRDDIM *rd_mem_usage; - RRDDIM *rd_mem_failcnt; - RRDDIM *rd_swap_usage; - - RRDDIM *rd_mem_detailed_cache; - RRDDIM *rd_mem_detailed_rss; - RRDDIM *rd_mem_detailed_mapped; - RRDDIM *rd_mem_detailed_writeback; - RRDDIM *rd_mem_detailed_pgpgin; - RRDDIM *rd_mem_detailed_pgpgout; - RRDDIM *rd_mem_detailed_pgfault; - RRDDIM *rd_mem_detailed_pgmajfault; - - RRDDIM *rd_io_service_bytes_read; - RRDDIM *rd_io_serviced_read; - RRDDIM *rd_throttle_io_read; - RRDDIM *rd_throttle_io_serviced_read; - RRDDIM *rd_io_queued_read; - RRDDIM *rd_io_merged_read; - - RRDDIM *rd_io_service_bytes_write; - RRDDIM *rd_io_serviced_write; - RRDDIM *rd_throttle_io_write; - RRDDIM *rd_throttle_io_serviced_write; - RRDDIM *rd_io_queued_write; - RRDDIM *rd_io_merged_write; - - struct cgroup *next; - struct cgroup *discovered_next; - -} *cgroup_root = NULL; - -uv_mutex_t cgroup_root_mutex; - -struct cgroup *discovered_cgroup_root = NULL; - -struct discovery_thread { - uv_thread_t thread; - uv_mutex_t mutex; - uv_cond_t cond_var; - int start_discovery; - int exited; -} discovery_thread; - -// --------------------------------------------------------------------------------------------- - -static inline int matches_enabled_cgroup_paths(char *id) { - return simple_pattern_matches(enabled_cgroup_paths, id); -} - -static inline int matches_enabled_cgroup_names(char *name) { - return simple_pattern_matches(enabled_cgroup_names, name); -} - -static inline int matches_enabled_cgroup_renames(char *id) { - return simple_pattern_matches(enabled_cgroup_renames, id); -} - -static inline int matches_systemd_services_cgroups(char *id) { - return simple_pattern_matches(systemd_services_cgroups, id); -} - -static inline int matches_search_cgroup_paths(const char *dir) { - return simple_pattern_matches(search_cgroup_paths, dir); -} - -static inline int matches_entrypoint_parent_process_comm(const char *comm) { - return simple_pattern_matches(entrypoint_parent_process_comm, comm); -} - -static inline int is_cgroup_systemd_service(struct cgroup *cg) { - return (cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE); -} - -// --------------------------------------------------------------------------------------------- -static int k8s_is_kubepod(struct cgroup *cg) { - return cg->container_orchestrator == CGROUPS_ORCHESTRATOR_K8S; -} - -static int k8s_is_container(const char *id) { - // examples: - // https://github.com/netdata/netdata/blob/0fc101679dcd12f1cb8acdd07bb4c85d8e553e53/collectors/cgroups.plugin/cgroup-name.sh#L121-L147 - const char *p = id; - const char *pp = NULL; - int i = 0; - size_t l = 3; // pod - while ((p = strstr(p, "pod"))) { - i++; - p += l; - pp = p; - } - return !(i < 2 || !pp || !(pp = strchr(pp, '/')) || !pp++ || !*pp); -} - -#define TASK_COMM_LEN 16 - -static int k8s_get_container_first_proc_comm(const char *id, char *comm) { - if (!k8s_is_container(id)) { - return 1; - } - - static procfile *ff = NULL; - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/%s/cgroup.procs", cgroup_cpuacct_base, id); - - ff = procfile_reopen(ff, filename, NULL, CGROUP_PROCFILE_FLAG); - if (unlikely(!ff)) { - netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename); - return 1; - } - - ff = procfile_readall(ff); - if (unlikely(!ff)) { - netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename); - return 1; - } - - unsigned long lines = procfile_lines(ff); - if (likely(lines < 2)) { - return 1; - } - - char *pid = procfile_lineword(ff, 0, 0); - if (!pid || !*pid) { - return 1; - } - - snprintfz(filename, FILENAME_MAX, "%s/proc/%s/comm", netdata_configured_host_prefix, pid); - - ff = procfile_reopen(ff, filename, NULL, PROCFILE_FLAG_DEFAULT); - if (unlikely(!ff)) { - netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename); - return 1; - } - - ff = procfile_readall(ff); - if (unlikely(!ff)) { - netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename); - return 1; - } - - lines = procfile_lines(ff); - if (unlikely(lines != 2)) { - return 1; - } - - char *proc_comm = procfile_lineword(ff, 0, 0); - if (!proc_comm || !*proc_comm) { - return 1; - } - - strncpyz(comm, proc_comm, TASK_COMM_LEN); - return 0; -} - // --------------------------------------------------------------------------------------------- static unsigned long long calc_delta(unsigned long long curr, unsigned long long prev) { @@ -1049,16 +596,7 @@ static unsigned long long calc_percentage(unsigned long long value, unsigned lon if (total == 0) { return 0; } - return (NETDATA_DOUBLE)value / (NETDATA_DOUBLE)total * 100; -} - -static int calc_cgroup_depth(const char *id) { - int depth = 0; - const char *s; - for (s = id; *s; s++) { - depth += unlikely(*s == '/'); - } - return depth; + return (unsigned long long)((NETDATA_DOUBLE)value / (NETDATA_DOUBLE)total * 100); } // ---------------------------------------------------------------------------- @@ -1622,6 +1160,15 @@ static inline void cgroup_read_memory(struct memory *mem, char parent_cg_is_unif } } +static void cgroup_read_pids_current(struct pids *pids) { + pids->pids_current_updated = 0; + + if (unlikely(!pids->pids_current_filename)) + return; + + pids->pids_current_updated = !read_single_number_file(pids->pids_current_filename, &pids->pids_current); +} + static inline void read_cgroup(struct cgroup *cg) { netdata_log_debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id); if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { @@ -1636,6 +1183,7 @@ static inline void read_cgroup(struct cgroup *cg) { cgroup_read_blkio(&cg->throttle_io_serviced); cgroup_read_blkio(&cg->io_merged); cgroup_read_blkio(&cg->io_queued); + cgroup_read_pids_current(&cg->pids); } else { //TODO: io_service_bytes and io_serviced use same file merge into 1 function @@ -1648,6 +1196,7 @@ static inline void read_cgroup(struct cgroup *cg) { cgroup2_read_pressure(&cg->memory_pressure); cgroup2_read_pressure(&cg->irq_pressure); cgroup_read_memory(&cg->memory, 1); + cgroup_read_pids_current(&cg->pids); } } @@ -1662,2092 +1211,182 @@ static inline void read_all_discovered_cgroups(struct cgroup *root) { } } -// ---------------------------------------------------------------------------- -// cgroup network interfaces +// update CPU and memory limits -#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 -static inline void read_cgroup_network_interfaces(struct cgroup *cg) { - netdata_log_debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title); +static inline void update_cpu_limits(char **filename, unsigned long long *value, struct cgroup *cg) { + if(*filename) { + int ret = -1; - pid_t cgroup_pid; - char cgroup_identifier[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + if(value == &cg->cpuset_cpus) { + unsigned long ncpus = read_cpuset_cpus(*filename, get_system_cpus()); + if(ncpus) { + *value = ncpus; + ret = 0; + } + } + else if(value == &cg->cpu_cfs_period || value == &cg->cpu_cfs_quota) { + ret = read_single_number_file(*filename, value); + } + else ret = -1; - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_cpuacct_base, cg->id); - } - else { - snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_unified_base, cg->id); + if(ret) { + collector_error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename); + freez(*filename); + *filename = NULL; + } } +} - netdata_log_debug(D_CGROUP, "executing cgroup_identifier %s --cgroup '%s' for cgroup '%s'", cgroups_network_interface_script, cgroup_identifier, cg->id); - FILE *fp_child_input, *fp_child_output; - (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_network_interface_script, "--cgroup", cgroup_identifier); - if(!fp_child_output) { - collector_error("CGROUP: cannot popen(%s --cgroup \"%s\", \"r\").", cgroups_network_interface_script, cgroup_identifier); - return; - } +static inline void update_cpu_limits2(struct cgroup *cg) { + if(cg->filename_cpu_cfs_quota){ + static procfile *ff = NULL; - char *s; - char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; - while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp_child_output))) { - trim(s); - - if(*s && *s != '\n') { - char *t = s; - while(*t && *t != ' ') t++; - if(*t == ' ') { - *t = '\0'; - t++; - } + ff = procfile_reopen(ff, cg->filename_cpu_cfs_quota, NULL, CGROUP_PROCFILE_FLAG); + if(unlikely(!ff)) { + goto cpu_limits2_err; + } - if(!*s) { - collector_error("CGROUP: empty host interface returned by script"); - continue; - } + ff = procfile_readall(ff); + if(unlikely(!ff)) { + goto cpu_limits2_err; + } - if(!*t) { - collector_error("CGROUP: empty guest interface returned by script"); - continue; - } + unsigned long lines = procfile_lines(ff); - struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface)); - i->host_device = strdupz(s); - i->container_device = strdupz(t); - i->next = cg->interfaces; - cg->interfaces = i; + if (unlikely(lines < 1)) { + collector_error("CGROUP: file '%s' should have 1 lines.", cg->filename_cpu_cfs_quota); + return; + } - collector_info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device); + cg->cpu_cfs_period = str2ull(procfile_lineword(ff, 0, 1), NULL); + cg->cpuset_cpus = get_system_cpus(); - // register a device rename to proc_net_dev.c - netdev_rename_device_add( - i->host_device, i->container_device, cg->chart_id, cg->chart_labels, k8s_is_kubepod(cg) ? "k8s." : ""); + char *s = "max\n\0"; + if(strcmp(s, procfile_lineword(ff, 0, 0)) == 0){ + cg->cpu_cfs_quota = cg->cpu_cfs_period * cg->cpuset_cpus; + } else { + cg->cpu_cfs_quota = str2ull(procfile_lineword(ff, 0, 0), NULL); } - } + netdata_log_debug(D_CGROUP, "CPU limits values: %llu %llu %llu", cg->cpu_cfs_period, cg->cpuset_cpus, cg->cpu_cfs_quota); + return; + +cpu_limits2_err: + collector_error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, cg->filename_cpu_cfs_quota); + freez(cg->filename_cpu_cfs_quota); + cg->filename_cpu_cfs_quota = NULL; - netdata_pclose(fp_child_input, fp_child_output, cgroup_pid); - // netdata_log_debug(D_CGROUP, "closed cgroup_identifier for cgroup '%s'", cg->id); + } } -static inline void free_cgroup_network_interfaces(struct cgroup *cg) { - while(cg->interfaces) { - struct cgroup_network_interface *i = cg->interfaces; - cg->interfaces = i->next; +static inline int update_memory_limits(struct cgroup *cg) { + char **filename = &cg->filename_memory_limit; + const RRDSETVAR_ACQUIRED **chart_var = &cg->chart_var_memory_limit; + unsigned long long *value = &cg->memory_limit; - // delete the registration of proc_net_dev rename - netdev_rename_device_del(i->host_device); + if(*filename) { + if(unlikely(!*chart_var)) { + *chart_var = rrdsetvar_custom_chart_variable_add_and_acquire(cg->st_mem_usage, "memory_limit"); + if(!*chart_var) { + collector_error("Cannot create cgroup %s chart variable '%s'. Will not update its limit anymore.", cg->id, "memory_limit"); + freez(*filename); + *filename = NULL; + } + } - freez((void *)i->host_device); - freez((void *)i->container_device); - freez((void *)i); + if(*filename && *chart_var) { + if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + if(read_single_number_file(*filename, value)) { + collector_error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename); + freez(*filename); + *filename = NULL; + } + else { + rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0)); + return 1; + } + } else { + char buffer[30 + 1]; + int ret = read_file(*filename, buffer, 30); + if(ret) { + collector_error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename); + freez(*filename); + *filename = NULL; + return 0; + } + char *s = "max\n\0"; + if(strcmp(s, buffer) == 0){ + *value = UINT64_MAX; + rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0)); + return 1; + } + *value = str2ull(buffer, NULL); + rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0)); + return 1; + } + } } + return 0; } // ---------------------------------------------------------------------------- -// add/remove/find cgroup objects +// generate charts -#define CGROUP_CHARTID_LINE_MAX 1024 +void update_cgroup_systemd_services_charts() { + for (struct cgroup *cg = cgroup_root; cg; cg = cg->next) { + if (unlikely(!cg->enabled || cg->pending_renames || !is_cgroup_systemd_service(cg))) + continue; -static inline char *cgroup_title_strdupz(const char *s) { - if(!s || !*s) s = "/"; - - if(*s == '/' && s[1] != '\0') s++; - - char *r = strdupz(s); - netdata_fix_chart_name(r); - - return r; -} - -static inline char *cgroup_chart_id_strdupz(const char *s) { - if(!s || !*s) s = "/"; - - if(*s == '/' && s[1] != '\0') s++; - - char *r = strdupz(s); - netdata_fix_chart_id(r); - - return r; -} - -// TODO: move the code to cgroup_chart_id_strdupz() when the renaming script is fixed -static inline void substitute_dots_in_id(char *s) { - // dots are used to distinguish chart type and id in streaming, so we should replace them - for (char *d = s; *d; d++) { - if (*d == '.') - *d = '-'; - } -} - -// ---------------------------------------------------------------------------- -// parse k8s labels - -char *cgroup_parse_resolved_name_and_labels(DICTIONARY *labels, char *data) { - // the first word, up to the first space is the name - char *name = strsep_skip_consecutive_separators(&data, " "); - - // the rest are key=value pairs separated by comma - while(data) { - char *pair = strsep_skip_consecutive_separators(&data, ","); - rrdlabels_add_pair(labels, pair, RRDLABEL_SRC_AUTO| RRDLABEL_SRC_K8S); - } - - return name; -} - -// ---------------------------------------------------------------------------- - -static inline void free_pressure(struct pressure *res) { - if (res->some.share_time.st) rrdset_is_obsolete(res->some.share_time.st); - if (res->some.total_time.st) rrdset_is_obsolete(res->some.total_time.st); - if (res->full.share_time.st) rrdset_is_obsolete(res->full.share_time.st); - if (res->full.total_time.st) rrdset_is_obsolete(res->full.total_time.st); - freez(res->filename); -} - -static inline void cgroup_free(struct cgroup *cg) { - netdata_log_debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available"); - - if(cg->st_cpu) rrdset_is_obsolete(cg->st_cpu); - if(cg->st_cpu_limit) rrdset_is_obsolete(cg->st_cpu_limit); - if(cg->st_cpu_per_core) rrdset_is_obsolete(cg->st_cpu_per_core); - if(cg->st_cpu_nr_throttled) rrdset_is_obsolete(cg->st_cpu_nr_throttled); - if(cg->st_cpu_throttled_time) rrdset_is_obsolete(cg->st_cpu_throttled_time); - if(cg->st_cpu_shares) rrdset_is_obsolete(cg->st_cpu_shares); - if(cg->st_mem) rrdset_is_obsolete(cg->st_mem); - if(cg->st_writeback) rrdset_is_obsolete(cg->st_writeback); - if(cg->st_mem_activity) rrdset_is_obsolete(cg->st_mem_activity); - if(cg->st_pgfaults) rrdset_is_obsolete(cg->st_pgfaults); - if(cg->st_mem_usage) rrdset_is_obsolete(cg->st_mem_usage); - if(cg->st_mem_usage_limit) rrdset_is_obsolete(cg->st_mem_usage_limit); - if(cg->st_mem_utilization) rrdset_is_obsolete(cg->st_mem_utilization); - if(cg->st_mem_failcnt) rrdset_is_obsolete(cg->st_mem_failcnt); - if(cg->st_io) rrdset_is_obsolete(cg->st_io); - if(cg->st_serviced_ops) rrdset_is_obsolete(cg->st_serviced_ops); - if(cg->st_throttle_io) rrdset_is_obsolete(cg->st_throttle_io); - if(cg->st_throttle_serviced_ops) rrdset_is_obsolete(cg->st_throttle_serviced_ops); - if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops); - if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops); - - freez(cg->filename_cpuset_cpus); - freez(cg->filename_cpu_cfs_period); - freez(cg->filename_cpu_cfs_quota); - freez(cg->filename_memory_limit); - freez(cg->filename_memoryswap_limit); - - free_cgroup_network_interfaces(cg); - - freez(cg->cpuacct_usage.cpu_percpu); - - freez(cg->cpuacct_stat.filename); - freez(cg->cpuacct_usage.filename); - freez(cg->cpuacct_cpu_throttling.filename); - freez(cg->cpuacct_cpu_shares.filename); - - arl_free(cg->memory.arl_base); - freez(cg->memory.filename_detailed); - freez(cg->memory.filename_failcnt); - freez(cg->memory.filename_usage_in_bytes); - freez(cg->memory.filename_msw_usage_in_bytes); - - freez(cg->io_service_bytes.filename); - freez(cg->io_serviced.filename); - - freez(cg->throttle_io_service_bytes.filename); - freez(cg->throttle_io_serviced.filename); - - freez(cg->io_merged.filename); - freez(cg->io_queued.filename); - - free_pressure(&cg->cpu_pressure); - free_pressure(&cg->io_pressure); - free_pressure(&cg->memory_pressure); - free_pressure(&cg->irq_pressure); - - freez(cg->id); - freez(cg->intermediate_id); - freez(cg->chart_id); - freez(cg->chart_title); - - rrdlabels_destroy(cg->chart_labels); - - freez(cg); - - cgroup_root_count--; -} - -// ---------------------------------------------------------------------------- - -static inline void discovery_rename_cgroup(struct cgroup *cg) { - if (!cg->pending_renames) { - return; - } - cg->pending_renames--; - - netdata_log_debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title); - netdata_log_debug(D_CGROUP, "executing command %s \"%s\" for cgroup '%s'", cgroups_rename_script, cg->intermediate_id, cg->chart_id); - pid_t cgroup_pid; - - FILE *fp_child_input, *fp_child_output; - (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_rename_script, cg->id, cg->intermediate_id); - if (!fp_child_output) { - collector_error("CGROUP: cannot popen(%s \"%s\", \"r\").", cgroups_rename_script, cg->intermediate_id); - cg->pending_renames = 0; - cg->processed = 1; - return; - } - - char buffer[CGROUP_CHARTID_LINE_MAX + 1]; - char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp_child_output); - int exit_code = netdata_pclose(fp_child_input, fp_child_output, cgroup_pid); - - switch (exit_code) { - case 0: - cg->pending_renames = 0; - break; - - case 3: - cg->pending_renames = 0; - cg->processed = 1; - break; - } - - if (cg->pending_renames || cg->processed) - return; - if (!new_name || !*new_name || *new_name == '\n') - return; - if (!(new_name = trim(new_name))) - return; - - char *name = new_name; - - if (!cg->chart_labels) - cg->chart_labels = rrdlabels_create(); - // read the new labels and remove the obsolete ones - rrdlabels_unmark_all(cg->chart_labels); - name = cgroup_parse_resolved_name_and_labels(cg->chart_labels, new_name); - rrdlabels_remove_all_unmarked(cg->chart_labels); - - freez(cg->chart_title); - cg->chart_title = cgroup_title_strdupz(name); - - freez(cg->chart_id); - cg->chart_id = cgroup_chart_id_strdupz(name); - - substitute_dots_in_id(cg->chart_id); - cg->hash_chart = simple_hash(cg->chart_id); -} - -static void is_cgroup_procs_exist(netdata_ebpf_cgroup_shm_body_t *out, char *id) { - struct stat buf; - - snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_cpuset_base, id); - if (likely(stat(out->path, &buf) == 0)) { - return; - } - - snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_blkio_base, id); - if (likely(stat(out->path, &buf) == 0)) { - return; - } - - snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_memory_base, id); - if (likely(stat(out->path, &buf) == 0)) { - return; - } - - snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_devices_base, id); - if (likely(stat(out->path, &buf) == 0)) { - return; - } - - out->path[0] = '\0'; - out->enabled = 0; -} - -static inline void convert_cgroup_to_systemd_service(struct cgroup *cg) { - char buffer[CGROUP_CHARTID_LINE_MAX + 1]; - cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE; - strncpyz(buffer, cg->id, CGROUP_CHARTID_LINE_MAX); - char *s = buffer; - - // skip to the last slash - size_t len = strlen(s); - while (len--) { - if (unlikely(s[len] == '/')) { - break; - } - } - if (len) { - s = &s[len + 1]; - } - - // remove extension - len = strlen(s); - while (len--) { - if (unlikely(s[len] == '.')) { - break; - } - } - if (len) { - s[len] = '\0'; - } - - freez(cg->chart_title); - cg->chart_title = cgroup_title_strdupz(s); -} - -static inline struct cgroup *discovery_cgroup_add(const char *id) { - netdata_log_debug(D_CGROUP, "adding to list, cgroup with id '%s'", id); - - struct cgroup *cg = callocz(1, sizeof(struct cgroup)); - cg->id = strdupz(id); - cg->hash = simple_hash(cg->id); - cg->chart_title = cgroup_title_strdupz(id); - cg->intermediate_id = cgroup_chart_id_strdupz(id); - cg->chart_id = cgroup_chart_id_strdupz(id); - substitute_dots_in_id(cg->chart_id); - cg->hash_chart = simple_hash(cg->chart_id); - if (cgroup_use_unified_cgroups) { - cg->options |= CGROUP_OPTIONS_IS_UNIFIED; - } - - if (!discovered_cgroup_root) - discovered_cgroup_root = cg; - else { - struct cgroup *t; - for (t = discovered_cgroup_root; t->discovered_next; t = t->discovered_next) { - } - t->discovered_next = cg; - } - - return cg; -} - -static inline struct cgroup *discovery_cgroup_find(const char *id) { - netdata_log_debug(D_CGROUP, "searching for cgroup '%s'", id); - - uint32_t hash = simple_hash(id); - - struct cgroup *cg; - for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) { - if(hash == cg->hash && strcmp(id, cg->id) == 0) - break; - } - - netdata_log_debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found"); - return cg; -} - -static inline void discovery_find_cgroup_in_dir_callback(const char *dir) { - if (!dir || !*dir) { - dir = "/"; - } - netdata_log_debug(D_CGROUP, "examining cgroup dir '%s'", dir); - - struct cgroup *cg = discovery_cgroup_find(dir); - if (cg) { - cg->available = 1; - return; - } - - if (cgroup_root_count >= cgroup_root_max) { - collector_info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, dir); - return; - } - - if (cgroup_max_depth > 0) { - int depth = calc_cgroup_depth(dir); - if (depth > cgroup_max_depth) { - collector_info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth); - return; - } - } - - cg = discovery_cgroup_add(dir); - cg->available = 1; - cg->first_time_seen = 1; - cgroup_root_count++; -} - -static inline int discovery_find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) { - if(!this) this = base; - netdata_log_debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base); - - size_t dirlen = strlen(this), baselen = strlen(base); - - int ret = -1; - int enabled = -1; - - const char *relative_path = &this[baselen]; - if(!*relative_path) relative_path = "/"; - - DIR *dir = opendir(this); - if(!dir) { - collector_error("CGROUP: cannot read directory '%s'", base); - return ret; - } - ret = 1; - - callback(relative_path); - - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR - && ( - (de->d_name[0] == '.' && de->d_name[1] == '\0') - || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - )) - continue; - - if(de->d_type == DT_DIR) { - if(enabled == -1) { - const char *r = relative_path; - if(*r == '\0') r = "/"; - - // do not decent in directories we are not interested - enabled = matches_search_cgroup_paths(r); - } - - if(enabled) { - char *s = mallocz(dirlen + strlen(de->d_name) + 2); - strcpy(s, this); - strcat(s, "/"); - strcat(s, de->d_name); - int ret2 = discovery_find_dir_in_subdirs(base, s, callback); - if(ret2 > 0) ret += ret2; - freez(s); - } - } - } - - closedir(dir); - return ret; -} - -static inline void discovery_mark_all_cgroups_as_unavailable() { - netdata_log_debug(D_CGROUP, "marking all cgroups as not available"); - struct cgroup *cg; - for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) { - cg->available = 0; - } -} - -static inline void discovery_update_filenames() { - struct cgroup *cg; - struct stat buf; - for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) { - if(unlikely(!cg->available || !cg->enabled || cg->pending_renames)) - continue; - - netdata_log_debug(D_CGROUP, "checking paths for cgroup '%s'", cg->id); - - // check for newly added cgroups - // and update the filenames they read - char filename[FILENAME_MAX + 1]; - if(!cgroup_use_unified_cgroups) { - if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->cpuacct_stat.filename = strdupz(filename); - cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat; - snprintfz(filename, FILENAME_MAX, "%s%s/cpuset.cpus", cgroup_cpuset_base, cg->id); - cg->filename_cpuset_cpus = strdupz(filename); - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_period_us", cgroup_cpuacct_base, cg->id); - cg->filename_cpu_cfs_period = strdupz(filename); - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_quota_us", cgroup_cpuacct_base, cg->id); - cg->filename_cpu_cfs_quota = strdupz(filename); - netdata_log_debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename); - } - else - netdata_log_debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !is_cgroup_systemd_service(cg))) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->cpuacct_usage.filename = strdupz(filename); - cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage; - netdata_log_debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename); - } - else - netdata_log_debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - if(unlikely(cgroup_enable_cpuacct_cpu_throttling && !cg->cpuacct_cpu_throttling.filename && !is_cgroup_systemd_service(cg))) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_cpuacct_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->cpuacct_cpu_throttling.filename = strdupz(filename); - cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling; - netdata_log_debug(D_CGROUP, "cpu.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_cpu_throttling.filename); - } - else - netdata_log_debug(D_CGROUP, "cpu.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - if (unlikely( - cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename && - !is_cgroup_systemd_service(cg))) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.shares", cgroup_cpuacct_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->cpuacct_cpu_shares.filename = strdupz(filename); - cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares; - netdata_log_debug( - D_CGROUP, "cpu.shares filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_cpu_shares.filename); - } else - netdata_log_debug(D_CGROUP, "cpu.shares file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed && (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_detailed = strdupz(filename); - cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO; - netdata_log_debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed); - } - else - netdata_log_debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_usage_in_bytes = strdupz(filename); - cg->memory.enabled_usage_in_bytes = cgroup_enable_memory; - netdata_log_debug(D_CGROUP, "memory.usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes); - snprintfz(filename, FILENAME_MAX, "%s%s/memory.limit_in_bytes", cgroup_memory_base, cg->id); - cg->filename_memory_limit = strdupz(filename); - } - else - netdata_log_debug(D_CGROUP, "memory.usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.usage_in_bytes", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_msw_usage_in_bytes = strdupz(filename); - cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap; - snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.limit_in_bytes", cgroup_memory_base, cg->id); - cg->filename_memoryswap_limit = strdupz(filename); - netdata_log_debug(D_CGROUP, "memory.msw_usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes); - } - else - netdata_log_debug(D_CGROUP, "memory.msw_usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_failcnt = strdupz(filename); - cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt; - netdata_log_debug(D_CGROUP, "memory.failcnt filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_failcnt); - } - else - netdata_log_debug(D_CGROUP, "memory.failcnt file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes_recursive", cgroup_blkio_base, cg->id); - if (unlikely(stat(filename, &buf) != -1)) { - cg->io_service_bytes.filename = strdupz(filename); - cg->io_service_bytes.enabled = cgroup_enable_blkio_io; - netdata_log_debug(D_CGROUP, "blkio.io_service_bytes_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_service_bytes_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename); - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->io_service_bytes.filename = strdupz(filename); - cg->io_service_bytes.enabled = cgroup_enable_blkio_io; - netdata_log_debug(D_CGROUP, "blkio.io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - } - } - - if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced_recursive", cgroup_blkio_base, cg->id); - if (unlikely(stat(filename, &buf) != -1)) { - cg->io_serviced.filename = strdupz(filename); - cg->io_serviced.enabled = cgroup_enable_blkio_ops; - netdata_log_debug(D_CGROUP, "blkio.io_serviced_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_serviced_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename); - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->io_serviced.filename = strdupz(filename); - cg->io_serviced.enabled = cgroup_enable_blkio_ops; - netdata_log_debug(D_CGROUP, "blkio.io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - } - } - - if (unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes_recursive", cgroup_blkio_base, cg->id); - if (unlikely(stat(filename, &buf) != -1)) { - cg->throttle_io_service_bytes.filename = strdupz(filename); - cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io; - netdata_log_debug(D_CGROUP,"blkio.throttle.io_service_bytes_recursive filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.throttle.io_service_bytes_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename); - snprintfz( - filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->throttle_io_service_bytes.filename = strdupz(filename); - cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io; - netdata_log_debug(D_CGROUP, "blkio.throttle.io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.throttle.io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - } - } - - if (unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced_recursive", cgroup_blkio_base, cg->id); - if (unlikely(stat(filename, &buf) != -1)) { - cg->throttle_io_serviced.filename = strdupz(filename); - cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops; - netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced_recursive filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename); - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->throttle_io_serviced.filename = strdupz(filename); - cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops; - netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - } - } - - if (unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged_recursive", cgroup_blkio_base, cg->id); - if (unlikely(stat(filename, &buf) != -1)) { - cg->io_merged.filename = strdupz(filename); - cg->io_merged.enabled = cgroup_enable_blkio_merged_ops; - netdata_log_debug(D_CGROUP, "blkio.io_merged_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_merged_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename); - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->io_merged.filename = strdupz(filename); - cg->io_merged.enabled = cgroup_enable_blkio_merged_ops; - netdata_log_debug(D_CGROUP, "blkio.io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - } - } - - if (unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued_recursive", cgroup_blkio_base, cg->id); - if (unlikely(stat(filename, &buf) != -1)) { - cg->io_queued.filename = strdupz(filename); - cg->io_queued.enabled = cgroup_enable_blkio_queued_ops; - netdata_log_debug(D_CGROUP, "blkio.io_queued_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_queued_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename); - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->io_queued.filename = strdupz(filename); - cg->io_queued.enabled = cgroup_enable_blkio_queued_ops; - netdata_log_debug(D_CGROUP, "blkio.io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename); - } else { - netdata_log_debug(D_CGROUP, "blkio.io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - } - } - } - else if(likely(cgroup_unified_exist)) { - if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->io_service_bytes.filename = strdupz(filename); - cg->io_service_bytes.enabled = cgroup_enable_blkio_io; - netdata_log_debug(D_CGROUP, "io.stat filename for unified cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename); - } else - netdata_log_debug(D_CGROUP, "io.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename); - } - if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->io_serviced.filename = strdupz(filename); - cg->io_serviced.enabled = cgroup_enable_blkio_ops; - netdata_log_debug(D_CGROUP, "io.stat filename for unified cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename); - } else - netdata_log_debug(D_CGROUP, "io.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename); - } - if (unlikely( - (cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_cpu_throttling) && - !cg->cpuacct_stat.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_unified_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->cpuacct_stat.filename = strdupz(filename); - cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat; - cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling; - cg->filename_cpuset_cpus = NULL; - cg->filename_cpu_cfs_period = NULL; - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.max", cgroup_unified_base, cg->id); - cg->filename_cpu_cfs_quota = strdupz(filename); - netdata_log_debug(D_CGROUP, "cpu.stat filename for unified cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename); - } - else - netdata_log_debug(D_CGROUP, "cpu.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename); - } - if (unlikely(cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.weight", cgroup_unified_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->cpuacct_cpu_shares.filename = strdupz(filename); - cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares; - netdata_log_debug(D_CGROUP, "cpu.weight filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_cpu_shares.filename); - } else - netdata_log_debug(D_CGROUP, "cpu.weight file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed && (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_unified_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_detailed = strdupz(filename); - cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO; - netdata_log_debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed); - } - else - netdata_log_debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.current", cgroup_unified_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_usage_in_bytes = strdupz(filename); - cg->memory.enabled_usage_in_bytes = cgroup_enable_memory; - netdata_log_debug(D_CGROUP, "memory.current filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes); - snprintfz(filename, FILENAME_MAX, "%s%s/memory.max", cgroup_unified_base, cg->id); - cg->filename_memory_limit = strdupz(filename); - } - else - netdata_log_debug(D_CGROUP, "memory.current file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.current", cgroup_unified_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_msw_usage_in_bytes = strdupz(filename); - cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap; - snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.max", cgroup_unified_base, cg->id); - cg->filename_memoryswap_limit = strdupz(filename); - netdata_log_debug(D_CGROUP, "memory.swap.current filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes); - } - else - netdata_log_debug(D_CGROUP, "memory.swap file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if (unlikely(cgroup_enable_pressure_cpu && !cg->cpu_pressure.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpu.pressure", cgroup_unified_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->cpu_pressure.filename = strdupz(filename); - cg->cpu_pressure.some.enabled = cgroup_enable_pressure_cpu; - cg->cpu_pressure.full.enabled = CONFIG_BOOLEAN_NO; - netdata_log_debug(D_CGROUP, "cpu.pressure filename for cgroup '%s': '%s'", cg->id, cg->cpu_pressure.filename); - } else { - netdata_log_debug(D_CGROUP, "cpu.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename); - } - } - - if (unlikely((cgroup_enable_pressure_io_some || cgroup_enable_pressure_io_full) && !cg->io_pressure.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/io.pressure", cgroup_unified_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->io_pressure.filename = strdupz(filename); - cg->io_pressure.some.enabled = cgroup_enable_pressure_io_some; - cg->io_pressure.full.enabled = cgroup_enable_pressure_io_full; - netdata_log_debug(D_CGROUP, "io.pressure filename for cgroup '%s': '%s'", cg->id, cg->io_pressure.filename); - } else { - netdata_log_debug(D_CGROUP, "io.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename); - } - } - - if (unlikely((cgroup_enable_pressure_memory_some || cgroup_enable_pressure_memory_full) && !cg->memory_pressure.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.pressure", cgroup_unified_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->memory_pressure.filename = strdupz(filename); - cg->memory_pressure.some.enabled = cgroup_enable_pressure_memory_some; - cg->memory_pressure.full.enabled = cgroup_enable_pressure_memory_full; - netdata_log_debug(D_CGROUP, "memory.pressure filename for cgroup '%s': '%s'", cg->id, cg->memory_pressure.filename); - } else { - netdata_log_debug(D_CGROUP, "memory.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename); - } - } - - if (unlikely((cgroup_enable_pressure_irq_some || cgroup_enable_pressure_irq_full) && !cg->irq_pressure.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/irq.pressure", cgroup_unified_base, cg->id); - if (likely(stat(filename, &buf) != -1)) { - cg->irq_pressure.filename = strdupz(filename); - cg->irq_pressure.some.enabled = cgroup_enable_pressure_irq_some; - cg->irq_pressure.full.enabled = cgroup_enable_pressure_irq_full; - netdata_log_debug(D_CGROUP, "irq.pressure filename for cgroup '%s': '%s'", cg->id, cg->irq_pressure.filename); - } else { - netdata_log_debug(D_CGROUP, "irq.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename); - } - } - } - } -} - -static inline void discovery_cleanup_all_cgroups() { - struct cgroup *cg = discovered_cgroup_root, *last = NULL; - - for(; cg ;) { - if(!cg->available) { - // enable the first duplicate cgroup - { - struct cgroup *t; - for(t = discovered_cgroup_root; t ; t = t->discovered_next) { - if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) { - netdata_log_debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id); - t->enabled = 1; - t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE; - break; - } - } - } - - if(!last) - discovered_cgroup_root = cg->discovered_next; - else - last->discovered_next = cg->discovered_next; - - cgroup_free(cg); - - if(!last) - cg = discovered_cgroup_root; - else - cg = last->discovered_next; - } - else { - last = cg; - cg = cg->discovered_next; - } - } -} - -static inline void discovery_copy_discovered_cgroups_to_reader() { - netdata_log_debug(D_CGROUP, "copy discovered cgroups to the main group list"); - - struct cgroup *cg; - - for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) { - cg->next = cg->discovered_next; - } - - cgroup_root = discovered_cgroup_root; -} - -static inline void discovery_share_cgroups_with_ebpf() { - struct cgroup *cg; - int count; - struct stat buf; - - if (shm_mutex_cgroup_ebpf == SEM_FAILED) { - return; - } - sem_wait(shm_mutex_cgroup_ebpf); - - for (cg = cgroup_root, count = 0; cg; cg = cg->next, count++) { - netdata_ebpf_cgroup_shm_body_t *ptr = &shm_cgroup_ebpf.body[count]; - char *prefix = (is_cgroup_systemd_service(cg)) ? "" : "cgroup_"; - snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_title); - ptr->hash = simple_hash(ptr->name); - ptr->options = cg->options; - ptr->enabled = cg->enabled; - if (cgroup_use_unified_cgroups) { - snprintfz(ptr->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_unified_base, cg->id); - if (likely(stat(ptr->path, &buf) == -1)) { - ptr->path[0] = '\0'; - ptr->enabled = 0; - } - } else { - is_cgroup_procs_exist(ptr, cg->id); - } - - netdata_log_debug(D_CGROUP, "cgroup shared: NAME=%s, ENABLED=%d", ptr->name, ptr->enabled); - } - - shm_cgroup_ebpf.header->cgroup_root_count = count; - sem_post(shm_mutex_cgroup_ebpf); -} - -static inline void discovery_find_all_cgroups_v1() { - if (cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) { - if (discovery_find_dir_in_subdirs(cgroup_cpuacct_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { - cgroup_enable_cpuacct_stat = cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO; - collector_error("CGROUP: disabled cpu statistics."); - } - } - - if (cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || - cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) { - if (discovery_find_dir_in_subdirs(cgroup_blkio_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { - cgroup_enable_blkio_io = cgroup_enable_blkio_ops = cgroup_enable_blkio_throttle_io = - cgroup_enable_blkio_throttle_ops = cgroup_enable_blkio_merged_ops = cgroup_enable_blkio_queued_ops = - CONFIG_BOOLEAN_NO; - collector_error("CGROUP: disabled blkio statistics."); - } - } - - if (cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) { - if (discovery_find_dir_in_subdirs(cgroup_memory_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { - cgroup_enable_memory = cgroup_enable_detailed_memory = cgroup_enable_swap = cgroup_enable_memory_failcnt = - CONFIG_BOOLEAN_NO; - collector_error("CGROUP: disabled memory statistics."); - } - } - - if (cgroup_search_in_devices) { - if (discovery_find_dir_in_subdirs(cgroup_devices_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { - cgroup_search_in_devices = 0; - collector_error("CGROUP: disabled devices statistics."); - } - } -} - -static inline void discovery_find_all_cgroups_v2() { - if (discovery_find_dir_in_subdirs(cgroup_unified_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) { - cgroup_unified_exist = CONFIG_BOOLEAN_NO; - collector_error("CGROUP: disabled unified cgroups statistics."); - } -} - -static int is_digits_only(const char *s) { - do { - if (!isdigit(*s++)) { - return 0; - } - } while (*s); - - return 1; -} - -static inline void discovery_process_first_time_seen_cgroup(struct cgroup *cg) { - if (!cg->first_time_seen) { - return; - } - cg->first_time_seen = 0; - - char comm[TASK_COMM_LEN + 1]; - - if (cg->container_orchestrator == CGROUPS_ORCHESTRATOR_UNSET) { - if (strstr(cg->id, "kubepods")) { - cg->container_orchestrator = CGROUPS_ORCHESTRATOR_K8S; - } else { - cg->container_orchestrator = CGROUPS_ORCHESTRATOR_UNKNOWN; - } - } - - if (is_inside_k8s && !k8s_get_container_first_proc_comm(cg->id, comm)) { - // container initialization may take some time when CPU % is high - // seen on GKE: comm is '6' before 'runc:[2:INIT]' (dunno if it could be another number) - if (is_digits_only(comm) || matches_entrypoint_parent_process_comm(comm)) { - cg->first_time_seen = 1; - return; - } - if (!strcmp(comm, "pause")) { - // a container that holds the network namespace for the pod - // we don't need to collect its metrics - cg->processed = 1; - return; - } - } - - if (cgroup_enable_systemd_services && matches_systemd_services_cgroups(cg->id)) { - netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'cgroups to match as systemd services'", cg->id, cg->chart_title); - convert_cgroup_to_systemd_service(cg); - return; - } - - if (matches_enabled_cgroup_renames(cg->id)) { - netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'run script to rename cgroups matching', will try to rename it", cg->id, cg->chart_title); - if (is_inside_k8s && k8s_is_container(cg->id)) { - // it may take up to a minute for the K8s API to return data for the container - // tested on AWS K8s cluster with 100% CPU utilization - cg->pending_renames = 9; // 1.5 minute - } else { - cg->pending_renames = 2; - } - } -} - -static int discovery_is_cgroup_duplicate(struct cgroup *cg) { - // https://github.com/netdata/netdata/issues/797#issuecomment-241248884 - struct cgroup *c; - for (c = discovered_cgroup_root; c; c = c->discovered_next) { - if (c != cg && c->enabled && c->hash_chart == cg->hash_chart && !strcmp(c->chart_id, cg->chart_id)) { - collector_error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.", cg->chart_id, c->id, cg->id); - return 1; - } - } - return 0; -} - -static inline void discovery_process_cgroup(struct cgroup *cg) { - if (!cg) { - netdata_log_debug(D_CGROUP, "discovery_process_cgroup() received NULL"); - return; - } - if (!cg->available || cg->processed) { - return; - } - - if (cg->first_time_seen) { - worker_is_busy(WORKER_DISCOVERY_PROCESS_FIRST_TIME); - discovery_process_first_time_seen_cgroup(cg); - if (unlikely(cg->first_time_seen || cg->processed)) { - return; - } - } - - if (cg->pending_renames) { - worker_is_busy(WORKER_DISCOVERY_PROCESS_RENAME); - discovery_rename_cgroup(cg); - if (unlikely(cg->pending_renames || cg->processed)) { - return; - } - } - - cg->processed = 1; - - if ((strlen(cg->chart_id) + strlen(cgroup_chart_id_prefix)) >= RRD_ID_LENGTH_MAX) { - collector_info("cgroup '%s' (chart id '%s') disabled because chart_id exceeds the limit (RRD_ID_LENGTH_MAX)", cg->id, cg->chart_id); - return; - } - - if (is_cgroup_systemd_service(cg)) { - cg->enabled = 1; - return; - } - - if (!(cg->enabled = matches_enabled_cgroup_names(cg->chart_title))) { - netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups names matching'", cg->id, cg->chart_title); - return; - } - - if (!(cg->enabled = matches_enabled_cgroup_paths(cg->id))) { - netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups matching'", cg->id, cg->chart_title); - return; - } - - if (discovery_is_cgroup_duplicate(cg)) { - cg->enabled = 0; - cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE; - return; - } - - if (!cg->chart_labels) - cg->chart_labels = rrdlabels_create(); - - if (!k8s_is_kubepod(cg)) { - rrdlabels_add(cg->chart_labels, "cgroup_name", cg->chart_id, RRDLABEL_SRC_AUTO); - if (!dictionary_get(cg->chart_labels, "image")) { - rrdlabels_add(cg->chart_labels, "image", "", RRDLABEL_SRC_AUTO); - } - } - - worker_is_busy(WORKER_DISCOVERY_PROCESS_NETWORK); - read_cgroup_network_interfaces(cg); -} - -static inline void discovery_find_all_cgroups() { - netdata_log_debug(D_CGROUP, "searching for cgroups"); - - worker_is_busy(WORKER_DISCOVERY_INIT); - discovery_mark_all_cgroups_as_unavailable(); - - worker_is_busy(WORKER_DISCOVERY_FIND); - if (!cgroup_use_unified_cgroups) { - discovery_find_all_cgroups_v1(); - } else { - discovery_find_all_cgroups_v2(); - } - - struct cgroup *cg; - for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) { - worker_is_busy(WORKER_DISCOVERY_PROCESS); - discovery_process_cgroup(cg); - } - - worker_is_busy(WORKER_DISCOVERY_UPDATE); - discovery_update_filenames(); - - worker_is_busy(WORKER_DISCOVERY_LOCK); - uv_mutex_lock(&cgroup_root_mutex); - - worker_is_busy(WORKER_DISCOVERY_CLEANUP); - discovery_cleanup_all_cgroups(); - - worker_is_busy(WORKER_DISCOVERY_COPY); - discovery_copy_discovered_cgroups_to_reader(); - - uv_mutex_unlock(&cgroup_root_mutex); - - worker_is_busy(WORKER_DISCOVERY_SHARE); - discovery_share_cgroups_with_ebpf(); - - netdata_log_debug(D_CGROUP, "done searching for cgroups"); -} - -static void cgroup_discovery_cleanup(void *ptr) { - UNUSED(ptr); - - discovery_thread.exited = 1; - worker_unregister(); - service_exits(); -} - -void cgroup_discovery_worker(void *ptr) -{ - UNUSED(ptr); - - netdata_thread_cleanup_push(cgroup_discovery_cleanup, ptr); - - worker_register("CGROUPSDISC"); - worker_register_job_name(WORKER_DISCOVERY_INIT, "init"); - worker_register_job_name(WORKER_DISCOVERY_FIND, "find"); - worker_register_job_name(WORKER_DISCOVERY_PROCESS, "process"); - worker_register_job_name(WORKER_DISCOVERY_PROCESS_RENAME, "rename"); - worker_register_job_name(WORKER_DISCOVERY_PROCESS_NETWORK, "network"); - worker_register_job_name(WORKER_DISCOVERY_PROCESS_FIRST_TIME, "new"); - worker_register_job_name(WORKER_DISCOVERY_UPDATE, "update"); - worker_register_job_name(WORKER_DISCOVERY_CLEANUP, "cleanup"); - worker_register_job_name(WORKER_DISCOVERY_COPY, "copy"); - worker_register_job_name(WORKER_DISCOVERY_SHARE, "share"); - worker_register_job_name(WORKER_DISCOVERY_LOCK, "lock"); - - entrypoint_parent_process_comm = simple_pattern_create( - " runc:[* " // http://terenceli.github.io/%E6%8A%80%E6%9C%AF/2021/12/28/runc-internals-3) - " exe ", // https://github.com/falcosecurity/falco/blob/9d41b0a151b83693929d3a9c84f7c5c85d070d3a/rules/falco_rules.yaml#L1961 - NULL, - SIMPLE_PATTERN_EXACT, true); - - while (service_running(SERVICE_COLLECTORS)) { - worker_is_idle(); - - uv_mutex_lock(&discovery_thread.mutex); - while (!discovery_thread.start_discovery && service_running(SERVICE_COLLECTORS)) - uv_cond_wait(&discovery_thread.cond_var, &discovery_thread.mutex); - discovery_thread.start_discovery = 0; - uv_mutex_unlock(&discovery_thread.mutex); - - if (unlikely(!service_running(SERVICE_COLLECTORS))) - break; - - discovery_find_all_cgroups(); - } - - netdata_thread_cleanup_pop(1); -} - -// ---------------------------------------------------------------------------- -// generate charts - -#define CHART_TITLE_MAX 300 - -void update_systemd_services_charts( - int update_every - , int do_cpu - , int do_mem_usage - , int do_mem_detailed - , int do_mem_failcnt - , int do_swap_usage - , int do_io - , int do_io_ops - , int do_throttle_io - , int do_throttle_ops - , int do_queued_ops - , int do_merged_ops -) { - static RRDSET - *st_cpu = NULL, - *st_mem_usage = NULL, - *st_mem_failcnt = NULL, - *st_swap_usage = NULL, - - *st_mem_detailed_cache = NULL, - *st_mem_detailed_rss = NULL, - *st_mem_detailed_mapped = NULL, - *st_mem_detailed_writeback = NULL, - *st_mem_detailed_pgfault = NULL, - *st_mem_detailed_pgmajfault = NULL, - *st_mem_detailed_pgpgin = NULL, - *st_mem_detailed_pgpgout = NULL, - - *st_io_read = NULL, - *st_io_serviced_read = NULL, - *st_throttle_io_read = NULL, - *st_throttle_ops_read = NULL, - *st_queued_ops_read = NULL, - *st_merged_ops_read = NULL, - - *st_io_write = NULL, - *st_io_serviced_write = NULL, - *st_throttle_io_write = NULL, - *st_throttle_ops_write = NULL, - *st_queued_ops_write = NULL, - *st_merged_ops_write = NULL; - - // create the charts - - if (unlikely(do_cpu && !st_cpu)) { - char title[CHART_TITLE_MAX + 1]; - snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (100%% = 1 core)"); - - st_cpu = rrdset_create_localhost( - "services" - , "cpu" - , NULL - , "cpu" - , "services.cpu" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if (unlikely(do_mem_usage && !st_mem_usage)) { - st_mem_usage = rrdset_create_localhost( - "services" - , "mem_usage" - , NULL - , "mem" - , "services.mem_usage" - , "Systemd Services Used Memory" - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(likely(do_mem_detailed)) { - if(unlikely(!st_mem_detailed_rss)) { - st_mem_detailed_rss = rrdset_create_localhost( - "services" - , "mem_rss" - , NULL - , "mem" - , "services.mem_rss" - , "Systemd Services RSS Memory" - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_mem_detailed_mapped)) { - st_mem_detailed_mapped = rrdset_create_localhost( - "services" - , "mem_mapped" - , NULL - , "mem" - , "services.mem_mapped" - , "Systemd Services Mapped Memory" - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_mem_detailed_cache)) { - st_mem_detailed_cache = rrdset_create_localhost( - "services" - , "mem_cache" - , NULL - , "mem" - , "services.mem_cache" - , "Systemd Services Cache Memory" - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_mem_detailed_writeback)) { - st_mem_detailed_writeback = rrdset_create_localhost( - "services" - , "mem_writeback" - , NULL - , "mem" - , "services.mem_writeback" - , "Systemd Services Writeback Memory" - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - - if(unlikely(!st_mem_detailed_pgfault)) { - st_mem_detailed_pgfault = rrdset_create_localhost( - "services" - , "mem_pgfault" - , NULL - , "mem" - , "services.mem_pgfault" - , "Systemd Services Memory Minor Page Faults" - , "MiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_mem_detailed_pgmajfault)) { - st_mem_detailed_pgmajfault = rrdset_create_localhost( - "services" - , "mem_pgmajfault" - , NULL - , "mem" - , "services.mem_pgmajfault" - , "Systemd Services Memory Major Page Faults" - , "MiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_mem_detailed_pgpgin)) { - st_mem_detailed_pgpgin = rrdset_create_localhost( - "services" - , "mem_pgpgin" - , NULL - , "mem" - , "services.mem_pgpgin" - , "Systemd Services Memory Charging Activity" - , "MiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 80 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - - if(unlikely(!st_mem_detailed_pgpgout)) { - st_mem_detailed_pgpgout = rrdset_create_localhost( - "services" - , "mem_pgpgout" - , NULL - , "mem" - , "services.mem_pgpgout" - , "Systemd Services Memory Uncharging Activity" - , "MiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 90 - , update_every - , RRDSET_TYPE_STACKED - ); - } - } - - if(unlikely(do_mem_failcnt && !st_mem_failcnt)) { - st_mem_failcnt = rrdset_create_localhost( - "services" - , "mem_failcnt" - , NULL - , "mem" - , "services.mem_failcnt" - , "Systemd Services Memory Limit Failures" - , "failures" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if (do_swap_usage && !st_swap_usage) { - st_swap_usage = rrdset_create_localhost( - "services" - , "swap_usage" - , NULL - , "swap" - , "services.swap_usage" - , "Systemd Services Swap Memory Used" - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 100 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(likely(do_io)) { - if(unlikely(!st_io_read)) { - st_io_read = rrdset_create_localhost( - "services" - , "io_read" - , NULL - , "disk" - , "services.io_read" - , "Systemd Services Disk Read Bandwidth" - , "KiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 120 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_io_write)) { - st_io_write = rrdset_create_localhost( - "services" - , "io_write" - , NULL - , "disk" - , "services.io_write" - , "Systemd Services Disk Write Bandwidth" - , "KiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 130 - , update_every - , RRDSET_TYPE_STACKED - ); - } - } - - if(likely(do_io_ops)) { - if(unlikely(!st_io_serviced_read)) { - st_io_serviced_read = rrdset_create_localhost( - "services" - , "io_ops_read" - , NULL - , "disk" - , "services.io_ops_read" - , "Systemd Services Disk Read Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 140 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_io_serviced_write)) { - st_io_serviced_write = rrdset_create_localhost( - "services" - , "io_ops_write" - , NULL - , "disk" - , "services.io_ops_write" - , "Systemd Services Disk Write Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 150 - , update_every - , RRDSET_TYPE_STACKED - ); - } - } - - if(likely(do_throttle_io)) { - if(unlikely(!st_throttle_io_read)) { - - st_throttle_io_read = rrdset_create_localhost( - "services" - , "throttle_io_read" - , NULL - , "disk" - , "services.throttle_io_read" - , "Systemd Services Throttle Disk Read Bandwidth" - , "KiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 160 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - - if(unlikely(!st_throttle_io_write)) { - st_throttle_io_write = rrdset_create_localhost( - "services" - , "throttle_io_write" - , NULL - , "disk" - , "services.throttle_io_write" - , "Systemd Services Throttle Disk Write Bandwidth" - , "KiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 170 - , update_every - , RRDSET_TYPE_STACKED - ); - } - } - - if(likely(do_throttle_ops)) { - if(unlikely(!st_throttle_ops_read)) { - st_throttle_ops_read = rrdset_create_localhost( - "services" - , "throttle_io_ops_read" - , NULL - , "disk" - , "services.throttle_io_ops_read" - , "Systemd Services Throttle Disk Read Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 180 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_throttle_ops_write)) { - st_throttle_ops_write = rrdset_create_localhost( - "services" - , "throttle_io_ops_write" - , NULL - , "disk" - , "services.throttle_io_ops_write" - , "Systemd Services Throttle Disk Write Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 190 - , update_every - , RRDSET_TYPE_STACKED - ); - } - } - - if(likely(do_queued_ops)) { - if(unlikely(!st_queued_ops_read)) { - st_queued_ops_read = rrdset_create_localhost( - "services" - , "queued_io_ops_read" - , NULL - , "disk" - , "services.queued_io_ops_read" - , "Systemd Services Queued Disk Read Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 200 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_queued_ops_write)) { - - st_queued_ops_write = rrdset_create_localhost( - "services" - , "queued_io_ops_write" - , NULL - , "disk" - , "services.queued_io_ops_write" - , "Systemd Services Queued Disk Write Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 210 - , update_every - , RRDSET_TYPE_STACKED - ); - } - } - - if(likely(do_merged_ops)) { - if(unlikely(!st_merged_ops_read)) { - st_merged_ops_read = rrdset_create_localhost( - "services" - , "merged_io_ops_read" - , NULL - , "disk" - , "services.merged_io_ops_read" - , "Systemd Services Merged Disk Read Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 220 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - if(unlikely(!st_merged_ops_write)) { - st_merged_ops_write = rrdset_create_localhost( - "services" - , "merged_io_ops_write" - , NULL - , "disk" - , "services.merged_io_ops_write" - , "Systemd Services Merged Disk Write Operations" - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 230 - , update_every - , RRDSET_TYPE_STACKED - ); - } - } - - // update the values - struct cgroup *cg; - for(cg = cgroup_root; cg ; cg = cg->next) { - if(unlikely(!cg->enabled || cg->pending_renames || !is_cgroup_systemd_service(cg))) - continue; - - if(likely(do_cpu && cg->cpuacct_stat.updated)) { - if(unlikely(!cg->rd_cpu)){ - - - if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); - } else { - cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, 1000000, RRD_ALGORITHM_INCREMENTAL); - } - } - - rrddim_set_by_pointer(st_cpu, cg->rd_cpu, cg->cpuacct_stat.user + cg->cpuacct_stat.system); - } - - if(likely(do_mem_usage && cg->memory.updated_usage_in_bytes)) { - if(unlikely(!cg->rd_mem_usage)) - cg->rd_mem_usage = rrddim_add(st_mem_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_usage, cg->rd_mem_usage, cg->memory.usage_in_bytes); - } - - if(likely(do_mem_detailed && cg->memory.updated_detailed)) { - if(unlikely(!cg->rd_mem_detailed_rss)) - cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.total_rss); - - if(unlikely(!cg->rd_mem_detailed_mapped)) - cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_mapped, cg->rd_mem_detailed_mapped, cg->memory.total_mapped_file); - - if(unlikely(!cg->rd_mem_detailed_cache)) - cg->rd_mem_detailed_cache = rrddim_add(st_mem_detailed_cache, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_cache, cg->rd_mem_detailed_cache, cg->memory.total_cache); - - if(unlikely(!cg->rd_mem_detailed_writeback)) - cg->rd_mem_detailed_writeback = rrddim_add(st_mem_detailed_writeback, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_writeback, cg->rd_mem_detailed_writeback, cg->memory.total_writeback); - - if(unlikely(!cg->rd_mem_detailed_pgfault)) - cg->rd_mem_detailed_pgfault = rrddim_add(st_mem_detailed_pgfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgfault, cg->rd_mem_detailed_pgfault, cg->memory.total_pgfault); - - if(unlikely(!cg->rd_mem_detailed_pgmajfault)) - cg->rd_mem_detailed_pgmajfault = rrddim_add(st_mem_detailed_pgmajfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgmajfault, cg->rd_mem_detailed_pgmajfault, cg->memory.total_pgmajfault); - - if(unlikely(!cg->rd_mem_detailed_pgpgin)) - cg->rd_mem_detailed_pgpgin = rrddim_add(st_mem_detailed_pgpgin, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgpgin, cg->rd_mem_detailed_pgpgin, cg->memory.total_pgpgin); - - if(unlikely(!cg->rd_mem_detailed_pgpgout)) - cg->rd_mem_detailed_pgpgout = rrddim_add(st_mem_detailed_pgpgout, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgpgout, cg->rd_mem_detailed_pgpgout, cg->memory.total_pgpgout); - } - - if(likely(do_mem_failcnt && cg->memory.updated_failcnt)) { - if(unlikely(!cg->rd_mem_failcnt)) - cg->rd_mem_failcnt = rrddim_add(st_mem_failcnt, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_failcnt, cg->rd_mem_failcnt, cg->memory.failcnt); - } - - if(likely(do_swap_usage && cg->memory.updated_msw_usage_in_bytes)) { - if(unlikely(!cg->rd_swap_usage)) - cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - rrddim_set_by_pointer( - st_swap_usage, - cg->rd_swap_usage, - cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ? - cg->memory.msw_usage_in_bytes - (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) : 0); - } else { - rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes); - } - } - - if(likely(do_io && cg->io_service_bytes.updated)) { - if(unlikely(!cg->rd_io_service_bytes_read)) - cg->rd_io_service_bytes_read = rrddim_add(st_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_read, cg->rd_io_service_bytes_read, cg->io_service_bytes.Read); - - if(unlikely(!cg->rd_io_service_bytes_write)) - cg->rd_io_service_bytes_write = rrddim_add(st_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_write, cg->rd_io_service_bytes_write, cg->io_service_bytes.Write); - } - - if(likely(do_io_ops && cg->io_serviced.updated)) { - if(unlikely(!cg->rd_io_serviced_read)) - cg->rd_io_serviced_read = rrddim_add(st_io_serviced_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_serviced_read, cg->rd_io_serviced_read, cg->io_serviced.Read); - - if(unlikely(!cg->rd_io_serviced_write)) - cg->rd_io_serviced_write = rrddim_add(st_io_serviced_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_serviced_write, cg->rd_io_serviced_write, cg->io_serviced.Write); - } - - if(likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) { - if(unlikely(!cg->rd_throttle_io_read)) - cg->rd_throttle_io_read = rrddim_add(st_throttle_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_io_read, cg->rd_throttle_io_read, cg->throttle_io_service_bytes.Read); - - if(unlikely(!cg->rd_throttle_io_write)) - cg->rd_throttle_io_write = rrddim_add(st_throttle_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_io_write, cg->rd_throttle_io_write, cg->throttle_io_service_bytes.Write); - } - - if(likely(do_throttle_ops && cg->throttle_io_serviced.updated)) { - if(unlikely(!cg->rd_throttle_io_serviced_read)) - cg->rd_throttle_io_serviced_read = rrddim_add(st_throttle_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_ops_read, cg->rd_throttle_io_serviced_read, cg->throttle_io_serviced.Read); - - if(unlikely(!cg->rd_throttle_io_serviced_write)) - cg->rd_throttle_io_serviced_write = rrddim_add(st_throttle_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_ops_write, cg->rd_throttle_io_serviced_write, cg->throttle_io_serviced.Write); - } - - if(likely(do_queued_ops && cg->io_queued.updated)) { - if(unlikely(!cg->rd_io_queued_read)) - cg->rd_io_queued_read = rrddim_add(st_queued_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_queued_ops_read, cg->rd_io_queued_read, cg->io_queued.Read); - - if(unlikely(!cg->rd_io_queued_write)) - cg->rd_io_queued_write = rrddim_add(st_queued_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_queued_ops_write, cg->rd_io_queued_write, cg->io_queued.Write); - } - - if(likely(do_merged_ops && cg->io_merged.updated)) { - if(unlikely(!cg->rd_io_merged_read)) - cg->rd_io_merged_read = rrddim_add(st_merged_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_merged_ops_read, cg->rd_io_merged_read, cg->io_merged.Read); - - if(unlikely(!cg->rd_io_merged_write)) - cg->rd_io_merged_write = rrddim_add(st_merged_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_merged_ops_write, cg->rd_io_merged_write, cg->io_merged.Write); - } - } - - // complete the iteration - if(likely(do_cpu)) - rrdset_done(st_cpu); - - if(likely(do_mem_usage)) - rrdset_done(st_mem_usage); - - if(unlikely(do_mem_detailed)) { - rrdset_done(st_mem_detailed_cache); - rrdset_done(st_mem_detailed_rss); - rrdset_done(st_mem_detailed_mapped); - rrdset_done(st_mem_detailed_writeback); - rrdset_done(st_mem_detailed_pgfault); - rrdset_done(st_mem_detailed_pgmajfault); - rrdset_done(st_mem_detailed_pgpgin); - rrdset_done(st_mem_detailed_pgpgout); - } - - if(likely(do_mem_failcnt)) - rrdset_done(st_mem_failcnt); - - if(likely(do_swap_usage)) - rrdset_done(st_swap_usage); - - if(likely(do_io)) { - rrdset_done(st_io_read); - rrdset_done(st_io_write); - } - - if(likely(do_io_ops)) { - rrdset_done(st_io_serviced_read); - rrdset_done(st_io_serviced_write); - } - - if(likely(do_throttle_io)) { - rrdset_done(st_throttle_io_read); - rrdset_done(st_throttle_io_write); - } - - if(likely(do_throttle_ops)) { - rrdset_done(st_throttle_ops_read); - rrdset_done(st_throttle_ops_write); - } - - if(likely(do_queued_ops)) { - rrdset_done(st_queued_ops_read); - rrdset_done(st_queued_ops_write); - } - - if(likely(do_merged_ops)) { - rrdset_done(st_merged_ops_read); - rrdset_done(st_merged_ops_write); - } -} - -static inline char *cgroup_chart_type(char *buffer, const char *id, size_t len) { - if(buffer[0]) return buffer; - - if(id[0] == '\0' || (id[0] == '/' && id[1] == '\0')) - strncpy(buffer, "cgroup_root", len); - else - snprintfz(buffer, len, "%s%s", cgroup_chart_id_prefix, id); - - netdata_fix_chart_id(buffer); - return buffer; -} - -static inline void update_cpu_limits(char **filename, unsigned long long *value, struct cgroup *cg) { - if(*filename) { - int ret = -1; - - if(value == &cg->cpuset_cpus) { - unsigned long ncpus = read_cpuset_cpus(*filename, get_system_cpus()); - if(ncpus) { - *value = ncpus; - ret = 0; - } + if (likely(cg->cpuacct_stat.updated)) { + update_cpu_utilization_chart(cg); } - else if(value == &cg->cpu_cfs_period) { - ret = read_single_number_file(*filename, value); + if (likely(cg->memory.updated_msw_usage_in_bytes)) { + update_mem_usage_chart(cg); } - else if(value == &cg->cpu_cfs_quota) { - ret = read_single_number_file(*filename, value); + if (likely(cg->memory.updated_failcnt)) { + update_mem_failcnt_chart(cg); } - else ret = -1; - - if(ret) { - collector_error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename); - freez(*filename); - *filename = NULL; + if (likely(cg->memory.updated_detailed)) { + update_mem_usage_detailed_chart(cg); + update_mem_writeback_chart(cg); + update_mem_pgfaults_chart(cg); + if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + update_mem_activity_chart(cg); + } } - } -} - -static inline void update_cpu_limits2(struct cgroup *cg) { - if(cg->filename_cpu_cfs_quota){ - static procfile *ff = NULL; - - ff = procfile_reopen(ff, cg->filename_cpu_cfs_quota, NULL, CGROUP_PROCFILE_FLAG); - if(unlikely(!ff)) { - goto cpu_limits2_err; + if (likely(cg->io_service_bytes.updated)) { + update_io_serviced_bytes_chart(cg); } - - ff = procfile_readall(ff); - if(unlikely(!ff)) { - goto cpu_limits2_err; + if (likely(cg->io_serviced.updated)) { + update_io_serviced_ops_chart(cg); } - - unsigned long lines = procfile_lines(ff); - - if (unlikely(lines < 1)) { - collector_error("CGROUP: file '%s' should have 1 lines.", cg->filename_cpu_cfs_quota); - return; + if (likely(cg->throttle_io_service_bytes.updated)) { + update_throttle_io_serviced_bytes_chart(cg); } - - cg->cpu_cfs_period = str2ull(procfile_lineword(ff, 0, 1), NULL); - cg->cpuset_cpus = get_system_cpus(); - - char *s = "max\n\0"; - if(strcmp(s, procfile_lineword(ff, 0, 0)) == 0){ - cg->cpu_cfs_quota = cg->cpu_cfs_period * cg->cpuset_cpus; - } else { - cg->cpu_cfs_quota = str2ull(procfile_lineword(ff, 0, 0), NULL); + if (likely(cg->throttle_io_serviced.updated)) { + update_throttle_io_serviced_ops_chart(cg); } - netdata_log_debug(D_CGROUP, "CPU limits values: %llu %llu %llu", cg->cpu_cfs_period, cg->cpuset_cpus, cg->cpu_cfs_quota); - return; - -cpu_limits2_err: - collector_error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, cg->filename_cpu_cfs_quota); - freez(cg->filename_cpu_cfs_quota); - cg->filename_cpu_cfs_quota = NULL; - - } -} - -static inline int update_memory_limits(char **filename, const RRDSETVAR_ACQUIRED **chart_var, unsigned long long *value, const char *chart_var_name, struct cgroup *cg) { - if(*filename) { - if(unlikely(!*chart_var)) { - *chart_var = rrdsetvar_custom_chart_variable_add_and_acquire(cg->st_mem_usage, chart_var_name); - if(!*chart_var) { - collector_error("Cannot create cgroup %s chart variable '%s'. Will not update its limit anymore.", cg->id, chart_var_name); - freez(*filename); - *filename = NULL; - } + if (likely(cg->io_queued.updated)) { + update_io_queued_ops_chart(cg); + } + if (likely(cg->io_merged.updated)) { + update_io_merged_ops_chart(cg); } - if(*filename && *chart_var) { - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - if(read_single_number_file(*filename, value)) { - collector_error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename); - freez(*filename); - *filename = NULL; - } - else { - rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value / (1024 * 1024))); - return 1; - } - } else { - char buffer[30 + 1]; - int ret = read_file(*filename, buffer, 30); - if(ret) { - collector_error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename); - freez(*filename); - *filename = NULL; - return 0; - } - char *s = "max\n\0"; - if(strcmp(s, buffer) == 0){ - *value = UINT64_MAX; - rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value / (1024 * 1024))); - return 1; - } - *value = str2ull(buffer, NULL); - rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value / (1024 * 1024))); - return 1; - } + if (likely(cg->pids.pids_current_updated)) { + update_pids_current_chart(cg); } + + cg->function_ready = true; } - return 0; } -void update_cgroup_charts(int update_every) { - netdata_log_debug(D_CGROUP, "updating cgroups charts"); - - char type[RRD_ID_LENGTH_MAX + 1]; - char title[CHART_TITLE_MAX + 1]; - - int services_do_cpu = 0, - services_do_mem_usage = 0, - services_do_mem_detailed = 0, - services_do_mem_failcnt = 0, - services_do_swap_usage = 0, - services_do_io = 0, - services_do_io_ops = 0, - services_do_throttle_io = 0, - services_do_throttle_ops = 0, - services_do_queued_ops = 0, - services_do_merged_ops = 0; - - struct cgroup *cg; - for(cg = cgroup_root; cg ; cg = cg->next) { - if(unlikely(!cg->enabled || cg->pending_renames)) - continue; - - if(likely(cgroup_enable_systemd_services && is_cgroup_systemd_service(cg))) { - if(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES) services_do_cpu++; - - if(cgroup_enable_systemd_services_detailed_memory && cg->memory.updated_detailed && cg->memory.enabled_detailed) services_do_mem_detailed++; - if(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_mem_usage++; - if(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES) services_do_mem_failcnt++; - if(cg->memory.updated_msw_usage_in_bytes && cg->memory.enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_swap_usage++; - - if(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_io++; - if(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_io_ops++; - if(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_io++; - if(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_ops++; - if(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES) services_do_queued_ops++; - if(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES) services_do_merged_ops++; +void update_cgroup_charts() { + for (struct cgroup *cg = cgroup_root; cg; cg = cg->next) { + if(unlikely(!cg->enabled || cg->pending_renames || is_cgroup_systemd_service(cg))) continue; - } - - type[0] = '\0'; - - if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_cpu)) { - snprintfz( - title, - CHART_TITLE_MAX, - k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU)" : "CPU Usage (100%% = 1 core)"); - - cg->st_cpu = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu" : "cgroup.cpu" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_update_rrdlabels(cg->st_cpu, cg->chart_labels); - - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); - } - else { - rrddim_add(cg->st_cpu, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_cpu, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL); - } - } - rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user); - rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system); - rrdset_done(cg->st_cpu); + if (likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) { + update_cpu_utilization_chart(cg); if(likely(cg->filename_cpuset_cpus || cg->filename_cpu_cfs_period || cg->filename_cpu_cfs_quota)) { if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { @@ -3769,8 +1408,7 @@ void update_cgroup_charts(int update_every) { if(cg->filename_cpu_cfs_quota) freez(cg->filename_cpu_cfs_quota); cg->filename_cpu_cfs_quota = NULL; } - } - else { + } else { NETDATA_DOUBLE value = 0, quota = 0; if(likely( ((!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) && (cg->filename_cpuset_cpus || (cg->filename_cpu_cfs_period && cg->filename_cpu_cfs_quota))) @@ -3784,49 +1422,10 @@ void update_cgroup_charts(int update_every) { value = (NETDATA_DOUBLE)cg->cpuset_cpus * 100; } if(likely(value)) { - if(unlikely(!cg->st_cpu_limit)) { - snprintfz(title, CHART_TITLE_MAX, "CPU Usage within the limits"); - - cg->st_cpu_limit = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_limit" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_limit" : "cgroup.cpu_limit" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority - 1 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_cpu_limit, cg->chart_labels); - - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) - rrddim_add(cg->st_cpu_limit, "used", NULL, 1, system_hz, RRD_ALGORITHM_ABSOLUTE); - else - rrddim_add(cg->st_cpu_limit, "used", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); - cg->prev_cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100; - } - - NETDATA_DOUBLE cpu_usage = 0; - cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100; - NETDATA_DOUBLE cpu_used = 100 * (cpu_usage - cg->prev_cpu_usage) / (value * update_every); - - rrdset_isnot_obsolete(cg->st_cpu_limit); - - rrddim_set(cg->st_cpu_limit, "used", (cpu_used > 0)?cpu_used:0); - - cg->prev_cpu_usage = cpu_usage; - - rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, value); - rrdset_done(cg->st_cpu_limit); - } - else { - if(unlikely(cg->st_cpu_limit)) { - rrdset_is_obsolete(cg->st_cpu_limit); + update_cpu_utilization_limit_chart(cg, value); + } else { + if (unlikely(cg->st_cpu_limit)) { + rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_limit); cg->st_cpu_limit = NULL; } rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, NAN); @@ -3836,1056 +1435,137 @@ void update_cgroup_charts(int update_every) { } if (likely(cg->cpuacct_cpu_throttling.updated && cg->cpuacct_cpu_throttling.enabled == CONFIG_BOOLEAN_YES)) { - if (unlikely(!cg->st_cpu_nr_throttled)) { - snprintfz(title, CHART_TITLE_MAX, "CPU Throttled Runnable Periods"); - - cg->st_cpu_nr_throttled = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "throttled" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.throttled" : "cgroup.throttled" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 10 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_cpu_nr_throttled, cg->chart_labels); - rrddim_add(cg->st_cpu_nr_throttled, "throttled", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else { - rrddim_set(cg->st_cpu_nr_throttled, "throttled", cg->cpuacct_cpu_throttling.nr_throttled_perc); - rrdset_done(cg->st_cpu_nr_throttled); - } - - if (unlikely(!cg->st_cpu_throttled_time)) { - snprintfz(title, CHART_TITLE_MAX, "CPU Throttled Time Duration"); - - cg->st_cpu_throttled_time = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "throttled_duration" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.throttled_duration" : "cgroup.throttled_duration" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 15 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_cpu_throttled_time, cg->chart_labels); - rrddim_add(cg->st_cpu_throttled_time, "duration", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); - } else { - rrddim_set(cg->st_cpu_throttled_time, "duration", cg->cpuacct_cpu_throttling.throttled_time); - rrdset_done(cg->st_cpu_throttled_time); - } + update_cpu_throttled_chart(cg); + update_cpu_throttled_duration_chart(cg); } if (likely(cg->cpuacct_cpu_shares.updated && cg->cpuacct_cpu_shares.enabled == CONFIG_BOOLEAN_YES)) { - if (unlikely(!cg->st_cpu_shares)) { - snprintfz(title, CHART_TITLE_MAX, "CPU Time Relative Share"); - - cg->st_cpu_shares = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_shares" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_shares" : "cgroup.cpu_shares" - , title - , "shares" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 20 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_cpu_shares, cg->chart_labels); - rrddim_add(cg->st_cpu_shares, "shares", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else { - rrddim_set(cg->st_cpu_shares, "shares", cg->cpuacct_cpu_shares.shares); - rrdset_done(cg->st_cpu_shares); - } + update_cpu_shares_chart(cg); } - if(likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) { - char id[RRD_ID_LENGTH_MAX + 1]; - unsigned int i; - - if(unlikely(!cg->st_cpu_per_core)) { - snprintfz( - title, - CHART_TITLE_MAX, - k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU) Per Core" : - "CPU Usage (100%% = 1 core) Per Core"); - - cg->st_cpu_per_core = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_per_core" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_per_core" : "cgroup.cpu_per_core" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 100 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_update_rrdlabels(cg->st_cpu_per_core, cg->chart_labels); - - for(i = 0; i < cg->cpuacct_usage.cpus; i++) { - snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); - rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL); - } - } - - for(i = 0; i < cg->cpuacct_usage.cpus ;i++) { - snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); - rrddim_set(cg->st_cpu_per_core, id, cg->cpuacct_usage.cpu_percpu[i]); - } - rrdset_done(cg->st_cpu_per_core); + if (likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) { + update_cpu_per_core_usage_chart(cg); } - if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_mem)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Usage"); - - cg->st_mem = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.mem" : "cgroup.mem" - , title - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 220 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_update_rrdlabels(cg->st_mem, cg->chart_labels); - - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - if(cg->memory.detailed_has_swap) - rrddim_add(cg->st_mem, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } else { - rrddim_add(cg->st_mem, "anon", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "kernel_stack", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "slab", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "sock", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "anon_thp", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - } - - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - rrddim_set(cg->st_mem, "cache", cg->memory.total_cache); - rrddim_set(cg->st_mem, "rss", (cg->memory.total_rss > cg->memory.total_rss_huge)?(cg->memory.total_rss - cg->memory.total_rss_huge):0); - - if(cg->memory.detailed_has_swap) - rrddim_set(cg->st_mem, "swap", cg->memory.total_swap); - - rrddim_set(cg->st_mem, "rss_huge", cg->memory.total_rss_huge); - rrddim_set(cg->st_mem, "mapped_file", cg->memory.total_mapped_file); - } else { - rrddim_set(cg->st_mem, "anon", cg->memory.anon); - rrddim_set(cg->st_mem, "kernel_stack", cg->memory.kernel_stack); - rrddim_set(cg->st_mem, "slab", cg->memory.slab); - rrddim_set(cg->st_mem, "sock", cg->memory.sock); - rrddim_set(cg->st_mem, "anon_thp", cg->memory.anon_thp); - rrddim_set(cg->st_mem, "file", cg->memory.total_mapped_file); - } - rrdset_done(cg->st_mem); - - if(unlikely(!cg->st_writeback)) { - snprintfz(title, CHART_TITLE_MAX, "Writeback Memory"); - - cg->st_writeback = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "writeback" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.writeback" : "cgroup.writeback" - , title - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 300 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_update_rrdlabels(cg->st_writeback, cg->chart_labels); - - if(cg->memory.detailed_has_dirty) - rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - - if(cg->memory.detailed_has_dirty) - rrddim_set(cg->st_writeback, "dirty", cg->memory.total_dirty); - - rrddim_set(cg->st_writeback, "writeback", cg->memory.total_writeback); - rrdset_done(cg->st_writeback); + if (likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) { + update_mem_usage_detailed_chart(cg); + update_mem_writeback_chart(cg); if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - if(unlikely(!cg->st_mem_activity)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Activity"); - - cg->st_mem_activity = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_activity" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_activity" : "cgroup.mem_activity" - , title - , "MiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 400 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_mem_activity, cg->chart_labels); - - rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set(cg->st_mem_activity, "pgpgin", cg->memory.total_pgpgin); - rrddim_set(cg->st_mem_activity, "pgpgout", cg->memory.total_pgpgout); - rrdset_done(cg->st_mem_activity); + update_mem_activity_chart(cg); } - if(unlikely(!cg->st_pgfaults)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults"); - - cg->st_pgfaults = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "pgfaults" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.pgfaults" : "cgroup.pgfaults" - , title - , "MiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 500 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_pgfaults, cg->chart_labels); - - rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set(cg->st_pgfaults, "pgfault", cg->memory.total_pgfault); - rrddim_set(cg->st_pgfaults, "pgmajfault", cg->memory.total_pgmajfault); - rrdset_done(cg->st_pgfaults); + update_mem_pgfaults_chart(cg); } - if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_mem_usage)) { - snprintfz(title, CHART_TITLE_MAX, "Used Memory"); - - cg->st_mem_usage = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_usage" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage" : "cgroup.mem_usage" - , title - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 210 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_update_rrdlabels(cg->st_mem_usage, cg->chart_labels); - - rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes); - if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { - rrddim_set( - cg->st_mem_usage, - "swap", - cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ? - cg->memory.msw_usage_in_bytes - (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) : 0); - } else { - rrddim_set(cg->st_mem_usage, "swap", cg->memory.msw_usage_in_bytes); - } - rrdset_done(cg->st_mem_usage); - - if (likely(update_memory_limits(&cg->filename_memory_limit, &cg->chart_var_memory_limit, &cg->memory_limit, "memory_limit", cg))) { - static unsigned long long ram_total = 0; - - if(unlikely(!ram_total)) { - procfile *ff = NULL; - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo"); - ff = procfile_open(config_get("plugin:cgroups", "meminfo filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - - if(likely(ff)) - ff = procfile_readall(ff); - if(likely(ff && procfile_lines(ff) && !strncmp(procfile_word(ff, 0), "MemTotal", 8))) - ram_total = str2ull(procfile_word(ff, 1), NULL) * 1024; - else { - collector_error("Cannot read file %s. Will not update cgroup %s RAM limit anymore.", filename, cg->id); - freez(cg->filename_memory_limit); - cg->filename_memory_limit = NULL; - } - - procfile_close(ff); - } + if (likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) { + update_mem_usage_chart(cg); - if(likely(ram_total)) { - unsigned long long memory_limit = ram_total; + // FIXME: this if should be only for unlimited charts + if(likely(host_ram_total)) { + // FIXME: do we need to update mem limits on every data collection? + if (likely(update_memory_limits(cg))) { - if(unlikely(cg->memory_limit < ram_total)) + unsigned long long memory_limit = host_ram_total; + if (unlikely(cg->memory_limit < host_ram_total)) memory_limit = cg->memory_limit; - if(unlikely(!cg->st_mem_usage_limit)) { - snprintfz(title, CHART_TITLE_MAX, "Used RAM within the limits"); - - cg->st_mem_usage_limit = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_usage_limit" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage_limit": "cgroup.mem_usage_limit" - , title - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 200 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_update_rrdlabels(cg->st_mem_usage_limit, cg->chart_labels); - - rrddim_add(cg->st_mem_usage_limit, "available", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem_usage_limit, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - - rrdset_isnot_obsolete(cg->st_mem_usage_limit); - - rrddim_set(cg->st_mem_usage_limit, "available", memory_limit - cg->memory.usage_in_bytes); - rrddim_set(cg->st_mem_usage_limit, "used", cg->memory.usage_in_bytes); - rrdset_done(cg->st_mem_usage_limit); - - if (unlikely(!cg->st_mem_utilization)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Utilization"); - - cg->st_mem_utilization = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_utilization" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_utilization" : "cgroup.mem_utilization" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 199 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_update_rrdlabels(cg->st_mem_utilization, cg->chart_labels); - - rrddim_add(cg->st_mem_utilization, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + update_mem_usage_limit_chart(cg, memory_limit); + update_mem_utilization_chart(cg, memory_limit); + } else { + if (unlikely(cg->st_mem_usage_limit)) { + rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage_limit); + cg->st_mem_usage_limit = NULL; } - if (memory_limit) { - rrdset_isnot_obsolete(cg->st_mem_utilization); - - rrddim_set( - cg->st_mem_utilization, "utilization", cg->memory.usage_in_bytes * 100 / memory_limit); - rrdset_done(cg->st_mem_utilization); + if (unlikely(cg->st_mem_utilization)) { + rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_utilization); + cg->st_mem_utilization = NULL; } } } - else { - if(unlikely(cg->st_mem_usage_limit)) { - rrdset_is_obsolete(cg->st_mem_usage_limit); - cg->st_mem_usage_limit = NULL; - } - - if(unlikely(cg->st_mem_utilization)) { - rrdset_is_obsolete(cg->st_mem_utilization); - cg->st_mem_utilization = NULL; - } - } - - update_memory_limits(&cg->filename_memoryswap_limit, &cg->chart_var_memoryswap_limit, &cg->memoryswap_limit, "memory_and_swap_limit", cg); } - if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_mem_failcnt)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures"); - - cg->st_mem_failcnt = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_failcnt" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_failcnt" : "cgroup.mem_failcnt" - , title - , "count" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 250 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_mem_failcnt, cg->chart_labels); - - rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set(cg->st_mem_failcnt, "failures", cg->memory.failcnt); - rrdset_done(cg->st_mem_failcnt); + if (likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) { + update_mem_failcnt_chart(cg); } - if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_io)) { - snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks)"); - - cg->st_io = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "io" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.io" : "cgroup.io" - , title - , "KiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 1200 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_update_rrdlabels(cg->st_io, cg->chart_labels); - - rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read); - rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write); - rrdset_done(cg->st_io); + if (likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { + update_io_serviced_bytes_chart(cg); } - if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_serviced_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks)"); - - cg->st_serviced_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "serviced_ops" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.serviced_ops" : "cgroup.serviced_ops" - , title - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 1200 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_serviced_ops, cg->chart_labels); - - rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read); - rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write); - rrdset_done(cg->st_serviced_ops); + if (likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) { + update_io_serviced_ops_chart(cg); } - if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_throttle_io)) { - snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks)"); - - cg->st_throttle_io = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "throttle_io" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_io" : "cgroup.throttle_io" - , title - , "KiB/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 1200 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_update_rrdlabels(cg->st_throttle_io, cg->chart_labels); - - rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read); - rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write); - rrdset_done(cg->st_throttle_io); + if (likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { + update_throttle_io_serviced_bytes_chart(cg); } - if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_throttle_serviced_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks)"); - - cg->st_throttle_serviced_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "throttle_serviced_ops" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_serviced_ops" : "cgroup.throttle_serviced_ops" - , title - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 1200 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_throttle_serviced_ops, cg->chart_labels); - - rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read); - rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write); - rrdset_done(cg->st_throttle_serviced_ops); + if (likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) { + update_throttle_io_serviced_ops_chart(cg); } - if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_queued_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks)"); - - cg->st_queued_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "queued_ops" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.queued_ops" : "cgroup.queued_ops" - , title - , "operations" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2000 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_queued_ops, cg->chart_labels); - - rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read); - rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write); - rrdset_done(cg->st_queued_ops); + if (likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) { + update_io_queued_ops_chart(cg); } - if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_merged_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks)"); - - cg->st_merged_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "merged_ops" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.merged_ops" : "cgroup.merged_ops" - , title - , "operations/s" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2100 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(cg->st_merged_ops, cg->chart_labels); - - rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } + if (likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) { + update_io_merged_ops_chart(cg); + } - rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read); - rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write); - rrdset_done(cg->st_merged_ops); + if (likely(cg->pids.pids_current_updated)) { + update_pids_current_chart(cg); } if (cg->options & CGROUP_OPTIONS_IS_UNIFIED) { - struct pressure *res = &cg->cpu_pressure; - - if (likely(res->updated && res->some.enabled)) { - struct pressure_charts *pcs; - pcs = &res->some; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "CPU some pressure"); - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_some_pressure" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure" : "cgroup.cpu_some_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2200 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "CPU some pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_some_pressure_stall_time" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure_stall_time" : "cgroup.cpu_some_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2220 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - update_pressure_charts(pcs); - } - if (likely(res->updated && res->full.enabled)) { - struct pressure_charts *pcs; - pcs = &res->full; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "CPU full pressure"); - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_full_pressure" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure" : "cgroup.cpu_full_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2240 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "CPU full pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_full_pressure_stall_time" - , NULL - , "cpu" - , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure_stall_time" : "cgroup.cpu_full_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2260 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - update_pressure_charts(pcs); - } - - res = &cg->memory_pressure; - - if (likely(res->updated && res->some.enabled)) { - struct pressure_charts *pcs; - pcs = &res->some; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "Memory some pressure"); - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_some_pressure" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure" : "cgroup.memory_some_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2300 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "Memory some pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "memory_some_pressure_stall_time" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure_stall_time" : "cgroup.memory_some_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2320 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - update_pressure_charts(pcs); - } - - if (likely(res->updated && res->full.enabled)) { - struct pressure_charts *pcs; - pcs = &res->full; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "Memory full pressure"); - - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_full_pressure" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure" : "cgroup.memory_full_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2340 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "Memory full pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "memory_full_pressure_stall_time" - , NULL - , "mem" - , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure_stall_time" : "cgroup.memory_full_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2360 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - update_pressure_charts(pcs); - } - - res = &cg->irq_pressure; - - if (likely(res->updated && res->some.enabled)) { - struct pressure_charts *pcs; - pcs = &res->some; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure"); - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "irq_some_pressure" - , NULL - , "interrupts" - , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure" : "cgroup.irq_some_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2310 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "irq_some_pressure_stall_time" - , NULL - , "interrupts" - , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure_stall_time" : "cgroup.irq_some_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2330 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - update_pressure_charts(pcs); + if (likely(cg->cpu_pressure.updated)) { + if (cg->cpu_pressure.some.enabled) { + update_cpu_some_pressure_chart(cg); + update_cpu_some_pressure_stall_time_chart(cg); + } + if (cg->cpu_pressure.full.enabled) { + update_cpu_full_pressure_chart(cg); + update_cpu_full_pressure_stall_time_chart(cg); + } } - if (likely(res->updated && res->full.enabled)) { - struct pressure_charts *pcs; - pcs = &res->full; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure"); - - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "irq_full_pressure" - , NULL - , "interrupts" - , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure" : "cgroup.irq_full_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2350 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + if (likely(cg->memory_pressure.updated)) { + if (cg->memory_pressure.some.enabled) { + update_mem_some_pressure_chart(cg); + update_mem_some_pressure_stall_time_chart(cg); } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "irq_full_pressure_stall_time" - , NULL - , "interrupts" - , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure_stall_time" : "cgroup.irq_full_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2370 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + if (cg->memory_pressure.full.enabled) { + update_mem_full_pressure_chart(cg); + update_mem_full_pressure_stall_time_chart(cg); } - - update_pressure_charts(pcs); } - res = &cg->io_pressure; - - if (likely(res->updated && res->some.enabled)) { - struct pressure_charts *pcs; - pcs = &res->some; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "I/O some pressure"); - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "io_some_pressure" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure" : "cgroup.io_some_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2400 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + if (likely(cg->irq_pressure.updated)) { + if (cg->irq_pressure.some.enabled) { + update_irq_some_pressure_chart(cg); + update_irq_some_pressure_stall_time_chart(cg); } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "I/O some pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "io_some_pressure_stall_time" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure_stall_time" : "cgroup.io_some_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2420 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + if (cg->irq_pressure.full.enabled) { + update_irq_full_pressure_chart(cg); + update_irq_full_pressure_stall_time_chart(cg); } - - update_pressure_charts(pcs); } - if (likely(res->updated && res->full.enabled)) { - struct pressure_charts *pcs; - pcs = &res->full; - - if (unlikely(!pcs->share_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "I/O full pressure"); - chart = pcs->share_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "io_full_pressure" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure" : "cgroup.io_full_pressure" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2440 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels); - pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + if (likely(cg->io_pressure.updated)) { + if (cg->io_pressure.some.enabled) { + update_io_some_pressure_chart(cg); + update_io_some_pressure_stall_time_chart(cg); } - - if (unlikely(!pcs->total_time.st)) { - RRDSET *chart; - snprintfz(title, CHART_TITLE_MAX, "I/O full pressure stall time"); - chart = pcs->total_time.st = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "io_full_pressure_stall_time" - , NULL - , "disk" - , k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure_stall_time" : "cgroup.io_full_pressure_stall_time" - , title - , "ms" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME - , cgroup_containers_chart_priority + 2460 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels); - pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + if (cg->io_pressure.full.enabled) { + update_io_full_pressure_chart(cg); + update_io_full_pressure_stall_time_chart(cg); } - - update_pressure_charts(pcs); } } - } - - if(likely(cgroup_enable_systemd_services)) - update_systemd_services_charts(update_every, services_do_cpu, services_do_mem_usage, services_do_mem_detailed - , services_do_mem_failcnt, services_do_swap_usage, services_do_io - , services_do_io_ops, services_do_throttle_io, services_do_throttle_ops - , services_do_queued_ops, services_do_merged_ops - ); - netdata_log_debug(D_CGROUP, "done updating cgroups charts"); + cg->function_ready = true; + } } // ---------------------------------------------------------------------------- @@ -4901,19 +1581,15 @@ static void cgroup_main_cleanup(void *ptr) { usec_t max = 2 * USEC_PER_SEC, step = 50000; - if (!discovery_thread.exited) { - collector_info("stopping discovery thread worker"); - uv_mutex_lock(&discovery_thread.mutex); - discovery_thread.start_discovery = 1; - uv_cond_signal(&discovery_thread.cond_var); - uv_mutex_unlock(&discovery_thread.mutex); - } - - collector_info("waiting for discovery thread to finish..."); - - while (!discovery_thread.exited && max > 0) { - max -= step; - sleep_usec(step); + if (!__atomic_load_n(&discovery_thread.exited, __ATOMIC_RELAXED)) { + collector_info("waiting for discovery thread to finish..."); + while (!__atomic_load_n(&discovery_thread.exited, __ATOMIC_RELAXED) && max > 0) { + uv_mutex_lock(&discovery_thread.mutex); + uv_cond_signal(&discovery_thread.cond_var); + uv_mutex_unlock(&discovery_thread.mutex); + max -= step; + sleep_usec(step); + } } if (shm_mutex_cgroup_ebpf != SEM_FAILED) { @@ -4932,6 +1608,22 @@ static void cgroup_main_cleanup(void *ptr) { static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; } +void cgroup_read_host_total_ram() { + procfile *ff = NULL; + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo"); + + ff = procfile_open( + config_get("plugin:cgroups", "meminfo filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + + if (likely((ff = procfile_readall(ff)) && procfile_lines(ff) && !strncmp(procfile_word(ff, 0), "MemTotal", 8))) + host_ram_total = str2ull(procfile_word(ff, 1), NULL) * 1024; + else + collector_error("Cannot read file %s. Will not create RAM limit charts.", filename); + + procfile_close(ff); +} + void *cgroups_main(void *ptr) { worker_register("CGROUPS"); worker_register_job_name(WORKER_CGROUPS_LOCK, "lock"); @@ -4946,6 +1638,9 @@ void *cgroups_main(void *ptr) { } read_cgroup_plugin_configuration(); + + cgroup_read_host_total_ram(); + netdata_cgroup_ebpf_initialize_shm(); if (uv_mutex_init(&cgroup_root_mutex)) { @@ -4953,8 +1648,6 @@ void *cgroups_main(void *ptr) { goto exit; } - // dispatch a discovery worker thread - discovery_thread.start_discovery = 0; discovery_thread.exited = 0; if (uv_mutex_init(&discovery_thread.mutex)) { @@ -4971,23 +1664,35 @@ void *cgroups_main(void *ptr) { collector_error("CGROUP: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error)); goto exit; } - uv_thread_set_name_np(discovery_thread.thread, "PLUGIN[cgroups]"); + + uv_thread_set_name_np(discovery_thread.thread, "P[cgroups]"); + + // we register this only on localhost + // for the other nodes, the origin server should register it + rrd_collector_started(); // this creates a collector that runs for as long as netdata runs + cgroup_netdev_link_init(); + rrd_function_add(localhost, NULL, "containers-vms", 10, RRDFUNCTIONS_CGTOP_HELP, true, cgroup_function_cgroup_top, NULL); + rrd_function_add(localhost, NULL, "systemd-services", 10, RRDFUNCTIONS_CGTOP_HELP, true, cgroup_function_systemd_top, NULL); heartbeat_t hb; heartbeat_init(&hb); usec_t step = cgroup_update_every * USEC_PER_SEC; usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0; + netdata_thread_disable_cancelability(); + while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); usec_t hb_dt = heartbeat_next(&hb, step); - if(unlikely(!service_running(SERVICE_COLLECTORS))) break; + if (unlikely(!service_running(SERVICE_COLLECTORS))) + break; find_dt += hb_dt; if (unlikely(find_dt >= find_every || (!is_inside_k8s && cgroups_check))) { + uv_mutex_lock(&discovery_thread.mutex); uv_cond_signal(&discovery_thread.cond_var); - discovery_thread.start_discovery = 1; + uv_mutex_unlock(&discovery_thread.mutex); find_dt = 0; cgroups_check = 0; } @@ -4997,18 +1702,28 @@ void *cgroups_main(void *ptr) { worker_is_busy(WORKER_CGROUPS_READ); read_all_discovered_cgroups(cgroup_root); - if(unlikely(!service_running(SERVICE_COLLECTORS))) break; + + if (unlikely(!service_running(SERVICE_COLLECTORS))) { + uv_mutex_unlock(&cgroup_root_mutex); + break; + } worker_is_busy(WORKER_CGROUPS_CHART); - update_cgroup_charts(cgroup_update_every); - if(unlikely(!service_running(SERVICE_COLLECTORS))) break; + + update_cgroup_charts(); + if (cgroup_enable_systemd_services) + update_cgroup_systemd_services_charts(); + + if (unlikely(!service_running(SERVICE_COLLECTORS))) { + uv_mutex_unlock(&cgroup_root_mutex); + break; + } worker_is_idle(); uv_mutex_unlock(&cgroup_root_mutex); } exit: - worker_unregister(); netdata_thread_cleanup_pop(1); return NULL; } diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h index dc800ba912c61e..e8cfcf5f60c7d9 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.h +++ b/collectors/cgroups.plugin/sys_fs_cgroup.h @@ -5,6 +5,10 @@ #include "daemon/common.h" +#define PLUGIN_CGROUPS_NAME "cgroups.plugin" +#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd" +#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup" + #define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001 #define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002 #define CGROUP_OPTIONS_IS_UNIFIED 0x00000004 @@ -39,6 +43,6 @@ typedef struct netdata_ebpf_cgroup_shm { #include "../proc.plugin/plugin_proc.h" -char *cgroup_parse_resolved_name_and_labels(DICTIONARY *labels, char *data); +char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data); #endif //NETDATA_SYS_FS_CGROUP_H diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c index a0f91530905d5d..bb1fb398856a02 100644 --- a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c +++ b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c @@ -20,13 +20,12 @@ struct k8s_test_data { int i; }; -static int read_label_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) +static int read_label_callback(const char *name, const char *value, void *data) { struct k8s_test_data *test_data = (struct k8s_test_data *)data; test_data->result_key[test_data->i] = name; test_data->result_value[test_data->i] = value; - test_data->result_ls[test_data->i] = ls; test_data->i++; @@ -37,7 +36,7 @@ static void test_cgroup_parse_resolved_name(void **state) { UNUSED(state); - DICTIONARY *labels = rrdlabels_create(); + RRDLABELS *labels = rrdlabels_create(); struct k8s_test_data test_data[] = { // One label diff --git a/collectors/cgroups.plugin/tests/test_doubles.c b/collectors/cgroups.plugin/tests/test_doubles.c index 498f649f5d62b2..b13d4b19ceddd2 100644 --- a/collectors/cgroups.plugin/tests/test_doubles.c +++ b/collectors/cgroups.plugin/tests/test_doubles.c @@ -2,12 +2,12 @@ #include "test_cgroups_plugin.h" -void rrdset_is_obsolete(RRDSET *st) +void rrdset_is_obsolete___safe_from_collector_thread(RRDSET *st) { UNUSED(st); } -void rrdset_isnot_obsolete(RRDSET *st) +void rrdset_isnot_obsolete___safe_from_collector_thread(RRDSET *st) { UNUSED(st); } diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am index 03c7f0a949237b..f82992fd4a5006 100644 --- a/collectors/charts.d.plugin/Makefile.am +++ b/collectors/charts.d.plugin/Makefile.am @@ -45,6 +45,5 @@ include ap/Makefile.inc include apcupsd/Makefile.inc include example/Makefile.inc include libreswan/Makefile.inc -include nut/Makefile.inc include opensips/Makefile.inc include sensors/Makefile.inc diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md deleted file mode 100644 index 339ad13751e4af..00000000000000 --- a/collectors/charts.d.plugin/ap/README.md +++ /dev/null @@ -1,104 +0,0 @@ - - -# Access point collector - -The `ap` collector visualizes data related to access points. - -## Example Netdata charts - -![image](https://cloud.githubusercontent.com/assets/2662304/12377654/9f566e88-bd2d-11e5-855a-e0ba96b8fd98.png) - -## How it works - -It does the following: - -1. Runs `iw dev` searching for interfaces that have `type AP`. - - From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`. - - Example: - -```sh -# iw dev -phy#0 - Interface wlan0 - ifindex 3 - wdev 0x1 - addr 7c:dd:90:77:34:2a - ssid TSAOUSIS - type AP - channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz -``` - -2. For each interface found, it runs `iw INTERFACE station dump`. - - From the output is collects: - - - rx/tx bytes - - rx/tx packets - - tx retries - - tx failed - - signal strength - - rx/tx bitrate - - expected throughput - - Example: - -```sh -# iw wlan0 station dump -Station 40:b8:37:5a:ed:5e (on wlan0) - inactive time: 910 ms - rx bytes: 15588897 - rx packets: 127772 - tx bytes: 52257763 - tx packets: 95802 - tx retries: 2162 - tx failed: 28 - signal: -43 dBm - signal avg: -43 dBm - tx bitrate: 65.0 MBit/s MCS 7 - rx bitrate: 1.0 MBit/s - expected throughput: 32.125Mbps - authorized: yes - authenticated: yes - preamble: long - WMM/WME: yes - MFP: no - TDLS peer: no -``` - -3. For each interface found, it creates 6 charts: - - - Number of Connected clients - - Bandwidth for all clients - - Packets for all clients - - Transmit Issues for all clients - - Average Signal among all clients - - Average Bitrate (including average expected throughput) among all clients - -## Configuration - -If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed. - -Edit the `charts.d/ap.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d/ap.conf -``` - -You can only set `ap_update_every=NUMBER` to change the data collection frequency. - -## Auto-detection - -The plugin is able to auto-detect if you are running access points on your linux box. - - diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md new file mode 120000 index 00000000000000..5b6e751301e428 --- /dev/null +++ b/collectors/charts.d.plugin/ap/README.md @@ -0,0 +1 @@ +integrations/access_points.md \ No newline at end of file diff --git a/collectors/charts.d.plugin/ap/integrations/access_points.md b/collectors/charts.d.plugin/ap/integrations/access_points.md new file mode 100644 index 00000000000000..a0de2c4df2938d --- /dev/null +++ b/collectors/charts.d.plugin/ap/integrations/access_points.md @@ -0,0 +1,174 @@ + + +# Access Points + + + + + +Plugin: charts.d.plugin +Module: ap + + + +## Overview + +The ap collector visualizes data related to wireless access points. + +It uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics. + +This collector is only supported on the following platforms: + +- Linux + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +The plugin is able to auto-detect if you are running access points on your linux box. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per wireless device + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ap.clients | clients | clients | +| ap.net | received, sent | kilobits/s | +| ap.packets | received, sent | packets/s | +| ap.issues | retries, failures | issues/s | +| ap.signal | average signal | dBm | +| ap.bitrate | receive, transmit, expected | Mbps | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install charts.d plugin + +If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. + + +#### `iw` utility. + +Make sure the `iw` utility is installed. + + +### Configuration + +#### File + +The configuration file name for this integration is `charts.d/ap.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config charts.d/ap.conf +``` +#### Options + +The config file is sourced by the charts.d plugin. It's a standard bash file. + +The following collapsed table contains all the options that can be configured for the ap collector. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| ap_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no | +| ap_priority | Controls the order of charts at the netdata dashboard. | 6900 | no | +| ap_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no | + +
+ +#### Examples + +##### Change the collection frequency + +Specify a custom collection frequence (update_every) for this collector + +```yaml +# the data collection frequency +# if unset, will inherit the netdata update frequency +ap_update_every=10 + +# the charts priority on the dashboard +#ap_priority=6900 + +# the number of retries to do in case of failure +# before disabling the module +#ap_retries=10 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `ap` collector, run the `charts.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `charts.d.plugin` to debug the collector: + + ```bash + ./charts.d.plugin debug 1 ap + ``` + + diff --git a/collectors/charts.d.plugin/ap/metadata.yaml b/collectors/charts.d.plugin/ap/metadata.yaml index c4e96a14ab2df0..ee941e417c27dc 100644 --- a/collectors/charts.d.plugin/ap/metadata.yaml +++ b/collectors/charts.d.plugin/ap/metadata.yaml @@ -41,6 +41,9 @@ modules: setup: prerequisites: list: + - title: "Install charts.d plugin" + description: | + If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - title: "`iw` utility." description: "Make sure the `iw` utility is installed." configuration: diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md deleted file mode 100644 index 00e9697dc81c87..00000000000000 --- a/collectors/charts.d.plugin/apcupsd/README.md +++ /dev/null @@ -1,26 +0,0 @@ - - -# APC UPS collector - -Monitors different APC UPS models and retrieves status information using `apcaccess` tool. - -## Configuration - -If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed. - -Edit the `charts.d/apcupsd.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d/apcupsd.conf -``` - - diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md new file mode 120000 index 00000000000000..fc6681fe618065 --- /dev/null +++ b/collectors/charts.d.plugin/apcupsd/README.md @@ -0,0 +1 @@ +integrations/apc_ups.md \ No newline at end of file diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh index ef9a90596a4470..da9cd19c333da7 100644 --- a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh +++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh @@ -50,7 +50,7 @@ apcupsd_check() { local host working=0 failed=0 for host in "${!apcupsd_sources[@]}"; do - apcupsd_get "${apcupsd_sources[${host}]}" > /dev/null + apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null # shellcheck disable=2181 if [ $? -ne 0 ]; then error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}." @@ -77,7 +77,7 @@ apcupsd_create() { local host for host in "${!apcupsd_sources[@]}"; do # create the charts - cat << EOF + cat < + +# APC UPS + + + + + +Plugin: charts.d.plugin +Module: apcupsd + + + +## Overview + +Monitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics. + +The collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics. + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +By default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ups + +Metrics related to UPS. Each UPS provides its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| apcupsd.charge | charge | percentage | +| apcupsd.battery.voltage | voltage, nominal | Volts | +| apcupsd.input.voltage | voltage, min, max | Volts | +| apcupsd.output.voltage | absolute, nominal | Volts | +| apcupsd.input.frequency | frequency | Hz | +| apcupsd.load | load | percentage | +| apcupsd.load_usage | load | Watts | +| apcupsd.temperature | temp | Celsius | +| apcupsd.time | time | Minutes | +| apcupsd.online | online | boolean | +| apcupsd.selftest | OK, NO, BT, NG | status | +| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute | +| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes | +| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection | +| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. | +| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed | +| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load | +| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged | +| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced | +| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery | +| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost | + + +## Setup + +### Prerequisites + +#### Install charts.d plugin + +If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. + + +#### Required software + +Make sure the `apcaccess` and `apcupsd` are installed and running. + + +### Configuration + +#### File + +The configuration file name for this integration is `charts.d/apcupsd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config charts.d/apcupsd.conf +``` +#### Options + +The config file is sourced by the charts.d plugin. It's a standard bash file. + +The following collapsed table contains all the options that can be configured for the apcupsd collector. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no | +| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no | +| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no | +| apcupsd_priority | The charts priority on the dashboard. | 90000 | no | +| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no | + +
+ +#### Examples + +##### Multiple apcupsd sources + +Specify a multiple apcupsd sources along with a custom update interval + +```yaml +# add all your APC UPSes in this array - uncomment it too +declare -A apcupsd_sources=( + ["local"]="127.0.0.1:3551", + ["remote"]="1.2.3.4:3551" +) + +# how long to wait for apcupsd to respond +#apcupsd_timeout=3 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +apcupsd_update_every=5 + +# the charts priority on the dashboard +#apcupsd_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#apcupsd_retries=10 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `charts.d.plugin` to debug the collector: + + ```bash + ./charts.d.plugin debug 1 apcupsd + ``` + + diff --git a/collectors/charts.d.plugin/apcupsd/metadata.yaml b/collectors/charts.d.plugin/apcupsd/metadata.yaml index d078074b7bb325..c333dc96467232 100644 --- a/collectors/charts.d.plugin/apcupsd/metadata.yaml +++ b/collectors/charts.d.plugin/apcupsd/metadata.yaml @@ -42,6 +42,9 @@ modules: setup: prerequisites: list: + - title: "Install charts.d plugin" + description: | + If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - title: "Required software" description: "Make sure the `apcaccess` and `apcupsd` are installed and running." configuration: @@ -121,6 +124,34 @@ modules: link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf metric: apcupsd.load info: number of seconds since the last successful data collection + - name: apcupsd_selftest_warning + link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf + metric: apcupsd.selftest + info: self-test failed due to insufficient battery capacity or due to overload. + - name: apcupsd_status_onbatt + link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf + metric: apcupsd.status + info: APC UPS has switched to battery power because the input power has failed + - name: apcupsd_status_overload + link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf + metric: apcupsd.status + info: APC UPS is overloaded and cannot supply enough power to the load + - name: apcupsd_status_lowbatt + link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf + metric: apcupsd.status + info: APC UPS battery is low and needs to be recharged + - name: apcupsd_status_replacebatt + link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf + metric: apcupsd.status + info: APC UPS battery has reached the end of its lifespan and needs to be replaced + - name: apcupsd_status_nobatt + link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf + metric: apcupsd.status + info: APC UPS has no battery + - name: apcupsd_status_commlost + link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf + metric: apcupsd.status + info: APC UPS communication link is lost metrics: folding: title: Metrics @@ -196,3 +227,30 @@ modules: chart_type: line dimensions: - name: online + - name: apcupsd.selftest + description: UPS Self-Test status + unit: status + chart_type: line + dimensions: + - name: OK + - name: NO + - name: BT + - name: NG + - name: apcupsd.status + description: UPS Status + unit: status + chart_type: line + dimensions: + - name: ONLINE + - name: ONBATT + - name: OVERLOAD + - name: LOWBATT + - name: REPLACEBATT + - name: NOBATT + - name: SLAVE + - name: SLAVEDOWN + - name: COMMLOST + - name: CAL + - name: TRIM + - name: BOOST + - name: SHUTTING_DOWN diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf index 2d32f73ea89ee9..4614f259efad48 100644 --- a/collectors/charts.d.plugin/charts.d.conf +++ b/collectors/charts.d.plugin/charts.d.conf @@ -36,7 +36,6 @@ # ap=yes # apcupsd=yes # libreswan=yes -# nut=yes # opensips=yes # ----------------------------------------------------------------------------- diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in index 20996eb93739e5..14694809902c66 100755 --- a/collectors/charts.d.plugin/charts.d.plugin.in +++ b/collectors/charts.d.plugin/charts.d.plugin.in @@ -13,13 +13,116 @@ # each will have a different config file and modules configuration directory. # -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin:@sbindir_POST@" PROGRAM_FILE="$0" -PROGRAM_NAME="$(basename $0)" -PROGRAM_NAME="${PROGRAM_NAME/.plugin/}" MODULE_NAME="main" +# ----------------------------------------------------------------------------- +# logging + +PROGRAM_NAME="$(basename "${0}")" +SHORT_PROGRAM_NAME="${PROGRAM_NAME/.plugin/}" + +# these should be the same with syslog() priorities +NDLP_EMERG=0 # system is unusable +NDLP_ALERT=1 # action must be taken immediately +NDLP_CRIT=2 # critical conditions +NDLP_ERR=3 # error conditions +NDLP_WARN=4 # warning conditions +NDLP_NOTICE=5 # normal but significant condition +NDLP_INFO=6 # informational +NDLP_DEBUG=7 # debug-level messages + +# the max (numerically) log level we will log +LOG_LEVEL=$NDLP_INFO + +set_log_min_priority() { + case "${NETDATA_LOG_LEVEL,,}" in + "emerg" | "emergency") + LOG_LEVEL=$NDLP_EMERG + ;; + + "alert") + LOG_LEVEL=$NDLP_ALERT + ;; + + "crit" | "critical") + LOG_LEVEL=$NDLP_CRIT + ;; + + "err" | "error") + LOG_LEVEL=$NDLP_ERR + ;; + + "warn" | "warning") + LOG_LEVEL=$NDLP_WARN + ;; + + "notice") + LOG_LEVEL=$NDLP_NOTICE + ;; + + "info") + LOG_LEVEL=$NDLP_INFO + ;; + + "debug") + LOG_LEVEL=$NDLP_DEBUG + ;; + esac +} + +set_log_min_priority + +log() { + local level="${1}" + shift 1 + + [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return + + systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <&2 "BASH version 4 or later is required (this is ${BASH_VERSION})." + exit 1 +fi + # ----------------------------------------------------------------------------- # create temp dir @@ -47,36 +150,6 @@ logdate() { date "+%Y-%m-%d %H:%M:%S" } -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - echo "DISABLE" - exit 1 -} - -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - # ----------------------------------------------------------------------------- # check a few commands @@ -176,12 +249,14 @@ while [ ! -z "$1" ]; do if [ "$1" = "debug" -o "$1" = "all" ]; then debug=1 + LOG_LEVEL=$NDLP_DEBUG shift continue fi if [ -f "$chartsd/$1.chart.sh" ]; then debug=1 + LOG_LEVEL=$NDLP_DEBUG chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")" shift continue @@ -189,6 +264,7 @@ while [ ! -z "$1" ]; do if [ -f "$chartsd/$1" ]; then debug=1 + LOG_LEVEL=$NDLP_DEBUG chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")" shift continue @@ -229,7 +305,7 @@ source "$pluginsd/loopsleepms.sh.inc" # ----------------------------------------------------------------------------- # load my configuration -for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"; do +for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${SHORT_PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${SHORT_PROGRAM_NAME}.conf"; do if [ -f "$myconfig" ]; then source "$myconfig" if [ $? -ne 0 ]; then diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md deleted file mode 100644 index b6eeb0180ca880..00000000000000 --- a/collectors/charts.d.plugin/libreswan/README.md +++ /dev/null @@ -1,61 +0,0 @@ - - -# Libreswan IPSec tunnel collector - -Collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels. - -The following charts are created, **per tunnel**: - -1. **Uptime** - -- the uptime of the tunnel - -2. **Traffic** - -- bytes in -- bytes out - -## Configuration - -If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed. - -Edit the `charts.d/libreswan.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d/libreswan.conf -``` - -The plugin executes 2 commands to collect all the information it needs: - -```sh -ipsec whack --status -ipsec whack --trafficstatus -``` - -The first command is used to extract the currently established tunnels, their IDs and their names. -The second command is used to extract the current uptime and traffic. - -Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied. -The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics. - -To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content: - -``` -netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status -netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus -``` - -Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path). - ---- - - diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md new file mode 120000 index 00000000000000..1416d9597ed6ce --- /dev/null +++ b/collectors/charts.d.plugin/libreswan/README.md @@ -0,0 +1 @@ +integrations/libreswan.md \ No newline at end of file diff --git a/collectors/charts.d.plugin/libreswan/integrations/libreswan.md b/collectors/charts.d.plugin/libreswan/integrations/libreswan.md new file mode 100644 index 00000000000000..bd1eec647730ed --- /dev/null +++ b/collectors/charts.d.plugin/libreswan/integrations/libreswan.md @@ -0,0 +1,194 @@ + + +# Libreswan + + + + + +Plugin: charts.d.plugin +Module: libreswan + + + +## Overview + +Monitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts. + +The collector uses the `ipsec` command to collect the information it needs. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per IPSEC tunnel + +Metrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| libreswan.net | in, out | kilobits/s | +| libreswan.uptime | uptime | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install charts.d plugin + +If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. + + +#### Permissions to execute `ipsec` + +The plugin executes 2 commands to collect all the information it needs: + +```sh +ipsec whack --status +ipsec whack --trafficstatus +``` + +The first command is used to extract the currently established tunnels, their IDs and their names. +The second command is used to extract the current uptime and traffic. + +Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied. +The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics. + +To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content: + +``` +netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status +netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus +``` + +Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path). + + + +### Configuration + +#### File + +The configuration file name for this integration is `charts.d/libreswan.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config charts.d/libreswan.conf +``` +#### Options + +The config file is sourced by the charts.d plugin. It's a standard bash file. + +The following collapsed table contains all the options that can be configured for the libreswan collector. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no | +| libreswan_priority | The charts priority on the dashboard | 90000 | no | +| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no | +| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no | + +
+ +#### Examples + +##### Run `ipsec` without sudo + +Run the `ipsec` utility without sudo + +```yaml +# the data collection frequency +# if unset, will inherit the netdata update frequency +#libreswan_update_every=1 + +# the charts priority on the dashboard +#libreswan_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#libreswan_retries=10 + +# set to 1, to run ipsec with sudo (the default) +# set to 0, to run ipsec without sudo +libreswan_sudo=0 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `charts.d.plugin` to debug the collector: + + ```bash + ./charts.d.plugin debug 1 libreswan + ``` + + diff --git a/collectors/charts.d.plugin/libreswan/metadata.yaml b/collectors/charts.d.plugin/libreswan/metadata.yaml index 484d79edec27fd..77cb254505ec92 100644 --- a/collectors/charts.d.plugin/libreswan/metadata.yaml +++ b/collectors/charts.d.plugin/libreswan/metadata.yaml @@ -40,6 +40,9 @@ modules: setup: prerequisites: list: + - title: "Install charts.d plugin" + description: | + If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - title: "Permissions to execute `ipsec`" description: | The plugin executes 2 commands to collect all the information it needs: diff --git a/collectors/charts.d.plugin/nut/Makefile.inc b/collectors/charts.d.plugin/nut/Makefile.inc deleted file mode 100644 index 4fb47145dd821a..00000000000000 --- a/collectors/charts.d.plugin/nut/Makefile.inc +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# install these files -dist_charts_DATA += nut/nut.chart.sh -dist_chartsconfig_DATA += nut/nut.conf - -# do not install these files, but include them in the distribution -dist_noinst_DATA += nut/README.md nut/Makefile.inc - diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md deleted file mode 100644 index 4608ce3e1ac39f..00000000000000 --- a/collectors/charts.d.plugin/nut/README.md +++ /dev/null @@ -1,79 +0,0 @@ - - -# UPS/PDU collector - -Collects UPS data for all power devices configured in the system. - -The following charts will be created: - -1. **UPS Charge** - -- percentage changed - -2. **UPS Battery Voltage** - -- current voltage -- high voltage -- low voltage -- nominal voltage - -3. **UPS Input Voltage** - -- current voltage -- fault voltage -- nominal voltage - -4. **UPS Input Current** - -- nominal current - -5. **UPS Input Frequency** - -- current frequency -- nominal frequency - -6. **UPS Output Voltage** - -- current voltage - -7. **UPS Load** - -- current load - -8. **UPS Temperature** - -- current temperature - -## Configuration - -If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed. - -Edit the `charts.d/nut.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d/nut.conf -``` - -This is the internal default for `charts.d/nut.conf` - -```sh -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -nut_ups= - -# how frequently to collect UPS data -nut_update_every=2 -``` - ---- - - diff --git a/collectors/charts.d.plugin/nut/metadata.yaml b/collectors/charts.d.plugin/nut/metadata.yaml deleted file mode 100644 index ea2e6b2eb666d0..00000000000000 --- a/collectors/charts.d.plugin/nut/metadata.yaml +++ /dev/null @@ -1,219 +0,0 @@ -plugin_name: charts.d.plugin -modules: - - meta: - plugin_name: charts.d.plugin - module_name: nut - monitored_instance: - name: Network UPS Tools (NUT) - link: '' - categories: - - data-collection.ups - icon_filename: 'plug-circle-bolt.svg' - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: '' - keywords: - - nut - - network ups tools - - ups - - pdu - most_popular: false - overview: - data_collection: - metrics_description: 'Examine UPS/PDU metrics with Netdata for insights into power device performance. Improve your power device performance with comprehensive dashboards and anomaly detection.' - method_description: 'This collector uses the `nut` (Network UPS Tools) to query statistics for multiple UPS devices.' - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: '' - default_behavior: - auto_detection: - description: '' - limits: - description: '' - performance_impact: - description: '' - setup: - prerequisites: - list: - - title: "Required software" - description: "Make sure the Network UPS Tools (`nut`) is installed and can detect your UPS devices." - configuration: - file: - name: charts.d/nut.conf - options: - description: | - The config file is sourced by the charts.d plugin. It's a standard bash file. - - The following collapsed table contains all the options that can be configured for the nut collector. - folding: - title: "Config options" - enabled: true - list: - - name: nut_ups - description: A space separated list of UPS names. If empty, the list returned by `upsc -l` will be used. - default_value: "" - required: false - - name: nut_names - description: Each line represents an alias for one UPS. If empty, the FQDN will be used. - default_value: "" - required: false - - name: nut_timeout - description: How long to wait for nut to respond. - default_value: 2 - required: false - - name: nut_clients_chart - description: Set this to 1 to enable another chart showing the number of UPS clients connected to `upsd`. - default_value: 1 - required: false - - name: nut_update_every - description: The data collection frequency. If unset, will inherit the netdata update frequency. - default_value: 2 - required: false - - name: nut_priority - description: The charts priority on the dashboard - default_value: 90000 - required: false - - name: nut_retries - description: The number of retries to do in case of failure before disabling the collector. - default_value: 10 - required: false - examples: - folding: - enabled: true - title: "Config" - list: - - name: Provide names to UPS devices - description: Map aliases to UPS devices - config: | - # a space separated list of UPS names - # if empty, the list returned by 'upsc -l' will be used - #nut_ups= - - # each line represents an alias for one UPS - # if empty, the FQDN will be used - nut_names["XXXXXX"]="UPS-office" - nut_names["YYYYYY"]="UPS-rack" - - # how much time in seconds, to wait for nut to respond - #nut_timeout=2 - - # set this to 1, to enable another chart showing the number - # of UPS clients connected to upsd - #nut_clients_chart=1 - - # the data collection frequency - # if unset, will inherit the netdata update frequency - #nut_update_every=2 - - # the charts priority on the dashboard - #nut_priority=90000 - - # the number of retries to do in case of failure - # before disabling the module - #nut_retries=10 - troubleshooting: - problems: - list: [] - alerts: - - name: nut_ups_charge - link: https://github.com/netdata/netdata/blob/master/health/health.d/nut.conf - metric: nut.charge - info: average UPS charge over the last minute - os: "*" - - name: nut_10min_ups_load - link: https://github.com/netdata/netdata/blob/master/health/health.d/nut.conf - metric: nut.load - info: average UPS load over the last 10 minutes - os: "*" - - name: nut_last_collected_secs - link: https://github.com/netdata/netdata/blob/master/health/health.d/nut.conf - metric: nut.load - info: number of seconds since the last successful data collection - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: ups - description: "Metrics related to UPS. Each UPS provides its own set of the following metrics." - labels: [] - metrics: - - name: nut.charge - description: UPS Charge - unit: "percentage" - chart_type: area - dimensions: - - name: charge - - name: nut.runtime - description: UPS Runtime - unit: "seconds" - chart_type: line - dimensions: - - name: runtime - - name: nut.battery.voltage - description: UPS Battery Voltage - unit: "Volts" - chart_type: line - dimensions: - - name: voltage - - name: high - - name: low - - name: nominal - - name: nut.input.voltage - description: UPS Input Voltage - unit: "Volts" - chart_type: line - dimensions: - - name: voltage - - name: fault - - name: nominal - - name: nut.input.current - description: UPS Input Current - unit: "Ampere" - chart_type: line - dimensions: - - name: nominal - - name: nut.input.frequency - description: UPS Input Frequency - unit: "Hz" - chart_type: line - dimensions: - - name: frequency - - name: nominal - - name: nut.output.voltage - description: UPS Output Voltage - unit: "Volts" - chart_type: line - dimensions: - - name: voltage - - name: nut.load - description: UPS Load - unit: "percentage" - chart_type: area - dimensions: - - name: load - - name: nut.load_usage - description: UPS Load Usage - unit: "Watts" - chart_type: area - dimensions: - - name: load_usage - - name: nut.temperature - description: UPS Temperature - unit: "temperature" - chart_type: line - dimensions: - - name: temp - - name: nut.clients - description: UPS Connected Clients - unit: "clients" - chart_type: area - dimensions: - - name: clients diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh deleted file mode 100644 index 7c32b6ddeda86f..00000000000000 --- a/collectors/charts.d.plugin/nut/nut.chart.sh +++ /dev/null @@ -1,244 +0,0 @@ -# shellcheck shell=bash -# no need for shebang - this file is loaded from charts.d.plugin -# SPDX-License-Identifier: GPL-3.0-or-later - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016-2017 Costa Tsaousis -# - -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -nut_ups= - -# how frequently to collect UPS data -nut_update_every=2 - -# how much time in seconds, to wait for nut to respond -nut_timeout=2 - -# set this to 1, to enable another chart showing the number -# of UPS clients connected to upsd -nut_clients_chart=0 - -# the priority of nut related to other charts -nut_priority=90000 - -declare -A nut_ids=() -declare -A nut_names=() - -nut_get_all() { - run -t $nut_timeout upsc -l -} - -nut_get() { - run -t $nut_timeout upsc "$1" - - if [ "${nut_clients_chart}" -eq "1" ]; then - printf "ups.connected_clients: " - run -t $nut_timeout upsc -c "$1" | wc -l - fi -} - -nut_check() { - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - local x - - require_cmd upsc || return 1 - - [ -z "$nut_ups" ] && nut_ups="$(nut_get_all)" - - for x in $nut_ups; do - nut_get "$x" > /dev/null - # shellcheck disable=SC2181 - if [ $? -eq 0 ]; then - if [ -n "${nut_names[${x}]}" ]; then - nut_ids[$x]="$(fixid "${nut_names[${x}]}")" - else - nut_ids[$x]="$(fixid "$x")" - fi - continue - fi - error "cannot get information for NUT UPS '$x'." - done - - if [ ${#nut_ids[@]} -eq 0 ]; then - # shellcheck disable=SC2154 - error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf" - return 1 - fi - - return 0 -} - -nut_create() { - # create the charts - local x - - for x in "${nut_ids[@]}"; do - cat << EOF -CHART nut_$x.charge '' "UPS Charge" "percentage" ups nut.charge area $((nut_priority + 2)) $nut_update_every -DIMENSION battery_charge charge absolute 1 100 - -CHART nut_$x.runtime '' "UPS Runtime" "seconds" ups nut.runtime area $((nut_priority + 3)) $nut_update_every -DIMENSION battery_runtime runtime absolute 1 100 - -CHART nut_$x.battery_voltage '' "UPS Battery Voltage" "Volts" ups nut.battery.voltage line $((nut_priority + 4)) $nut_update_every -DIMENSION battery_voltage voltage absolute 1 100 -DIMENSION battery_voltage_high high absolute 1 100 -DIMENSION battery_voltage_low low absolute 1 100 -DIMENSION battery_voltage_nominal nominal absolute 1 100 - -CHART nut_$x.input_voltage '' "UPS Input Voltage" "Volts" input nut.input.voltage line $((nut_priority + 5)) $nut_update_every -DIMENSION input_voltage voltage absolute 1 100 -DIMENSION input_voltage_fault fault absolute 1 100 -DIMENSION input_voltage_nominal nominal absolute 1 100 - -CHART nut_$x.input_current '' "UPS Input Current" "Ampere" input nut.input.current line $((nut_priority + 6)) $nut_update_every -DIMENSION input_current_nominal nominal absolute 1 100 - -CHART nut_$x.input_frequency '' "UPS Input Frequency" "Hz" input nut.input.frequency line $((nut_priority + 7)) $nut_update_every -DIMENSION input_frequency frequency absolute 1 100 -DIMENSION input_frequency_nominal nominal absolute 1 100 - -CHART nut_$x.output_voltage '' "UPS Output Voltage" "Volts" output nut.output.voltage line $((nut_priority + 8)) $nut_update_every -DIMENSION output_voltage voltage absolute 1 100 - -CHART nut_$x.load '' "UPS Load" "percentage" ups nut.load area $((nut_priority)) $nut_update_every -DIMENSION load load absolute 1 100 - -CHART nut_$x.load_usage '' "UPS Load Usage" "Watts" ups nut.load_usage area $((nut_priority + 1)) $nut_update_every -DIMENSION load_usage load_usage absolute 1 100 - -CHART nut_$x.temp '' "UPS Temperature" "temperature" ups nut.temperature line $((nut_priority + 9)) $nut_update_every -DIMENSION temp temp absolute 1 100 -EOF - - if [ "${nut_clients_chart}" = "1" ]; then - cat << EOF2 -CHART nut_$x.clients '' "UPS Connected Clients" "clients" ups nut.clients area $((nut_priority + 10)) $nut_update_every -DIMENSION clients '' absolute 1 1 -EOF2 - fi - - done - - return 0 -} - -nut_update() { - # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see below). - - # do all the work to collect / calculate the values - # for each dimension - # remember: KEEP IT SIMPLE AND SHORT - - local i x - for i in "${!nut_ids[@]}"; do - x="${nut_ids[$i]}" - nut_get "$i" | awk " -BEGIN { - battery_charge = 0; - battery_runtime = 0; - battery_voltage = 0; - battery_voltage_high = 0; - battery_voltage_low = 0; - battery_voltage_nominal = 0; - input_voltage = 0; - input_voltage_fault = 0; - input_voltage_nominal = 0; - input_current_nominal = 0; - input_frequency = 0; - input_frequency_nominal = 0; - output_voltage = 0; - load = 0; - load_usage = 0; - nompower = 0; - temp = 0; - client = 0; - do_clients = ${nut_clients_chart}; -} -/^battery.charge: .*/ { battery_charge = \$2 * 100 }; -/^battery.runtime: .*/ { battery_runtime = \$2 * 100 }; -/^battery.voltage: .*/ { battery_voltage = \$2 * 100 }; -/^battery.voltage.high: .*/ { battery_voltage_high = \$2 * 100 }; -/^battery.voltage.low: .*/ { battery_voltage_low = \$2 * 100 }; -/^battery.voltage.nominal: .*/ { battery_voltage_nominal = \$2 * 100 }; -/^input.voltage: .*/ { input_voltage = \$2 * 100 }; -/^input.voltage.fault: .*/ { input_voltage_fault = \$2 * 100 }; -/^input.voltage.nominal: .*/ { input_voltage_nominal = \$2 * 100 }; -/^input.current.nominal: .*/ { input_current_nominal = \$2 * 100 }; -/^input.frequency: .*/ { input_frequency = \$2 * 100 }; -/^input.frequency.nominal: .*/ { input_frequency_nominal = \$2 * 100 }; -/^output.voltage: .*/ { output_voltage = \$2 * 100 }; -/^ups.load: .*/ { load = \$2 * 100 }; -/^ups.realpower.nominal: .*/ { nompower = \$2 }; -/^ups.temperature: .*/ { temp = \$2 * 100 }; -/^ups.connected_clients: .*/ { clients = \$2 }; -END { - { load_usage = nompower * load / 100 }; - - print \"BEGIN nut_$x.charge $1\"; - print \"SET battery_charge = \" battery_charge; - print \"END\" - - print \"BEGIN nut_$x.runtime $1\"; - print \"SET battery_runtime = \" battery_runtime; - print \"END\" - - print \"BEGIN nut_$x.battery_voltage $1\"; - print \"SET battery_voltage = \" battery_voltage; - print \"SET battery_voltage_high = \" battery_voltage_high; - print \"SET battery_voltage_low = \" battery_voltage_low; - print \"SET battery_voltage_nominal = \" battery_voltage_nominal; - print \"END\" - - print \"BEGIN nut_$x.input_voltage $1\"; - print \"SET input_voltage = \" input_voltage; - print \"SET input_voltage_fault = \" input_voltage_fault; - print \"SET input_voltage_nominal = \" input_voltage_nominal; - print \"END\" - - print \"BEGIN nut_$x.input_current $1\"; - print \"SET input_current_nominal = \" input_current_nominal; - print \"END\" - - print \"BEGIN nut_$x.input_frequency $1\"; - print \"SET input_frequency = \" input_frequency; - print \"SET input_frequency_nominal = \" input_frequency_nominal; - print \"END\" - - print \"BEGIN nut_$x.output_voltage $1\"; - print \"SET output_voltage = \" output_voltage; - print \"END\" - - print \"BEGIN nut_$x.load $1\"; - print \"SET load = \" load; - print \"END\" - - print \"BEGIN nut_$x.load_usage $1\"; - print \"SET load_usage = \" load_usage; - print \"END\" - - print \"BEGIN nut_$x.temp $1\"; - print \"SET temp = \" temp; - print \"END\" - - if(do_clients) { - print \"BEGIN nut_$x.clients $1\"; - print \"SET clients = \" clients; - print \"END\" - } -}" - # shellcheck disable=2181 - [ $? -ne 0 ] && unset "nut_ids[$i]" && error "failed to get values for '$i', disabling it." - done - - [ ${#nut_ids[@]} -eq 0 ] && error "no UPSes left active." && return 1 - return 0 -} diff --git a/collectors/charts.d.plugin/nut/nut.conf b/collectors/charts.d.plugin/nut/nut.conf deleted file mode 100644 index b95ad9048c05f0..00000000000000 --- a/collectors/charts.d.plugin/nut/nut.conf +++ /dev/null @@ -1,33 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis -# GPL v3+ - -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -#nut_ups= - -# each line represents an alias for one UPS -# if empty, the FQDN will be used -#nut_names["FQDN1"]="alias" -#nut_names["FQDN2"]="alias" - -# how much time in seconds, to wait for nut to respond -#nut_timeout=2 - -# set this to 1, to enable another chart showing the number -# of UPS clients connected to upsd -#nut_clients_chart=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#nut_update_every=2 - -# the charts priority on the dashboard -#nut_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#nut_retries=10 diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md deleted file mode 100644 index 1d7322140515fd..00000000000000 --- a/collectors/charts.d.plugin/opensips/README.md +++ /dev/null @@ -1,24 +0,0 @@ - - -# OpenSIPS collector - -## Configuration - -If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed. - -Edit the `charts.d/opensips.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d/opensips.conf -``` - - diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md new file mode 120000 index 00000000000000..bb85ba6d019f85 --- /dev/null +++ b/collectors/charts.d.plugin/opensips/README.md @@ -0,0 +1 @@ +integrations/opensips.md \ No newline at end of file diff --git a/collectors/charts.d.plugin/opensips/integrations/opensips.md b/collectors/charts.d.plugin/opensips/integrations/opensips.md new file mode 100644 index 00000000000000..8c88dba0b256db --- /dev/null +++ b/collectors/charts.d.plugin/opensips/integrations/opensips.md @@ -0,0 +1,192 @@ + + +# OpenSIPS + + + + + +Plugin: charts.d.plugin +Module: opensips + + + +## Overview + +Examine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services. + +The collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per OpenSIPS instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| opensips.dialogs_active | active, early | dialogs | +| opensips.users | registered, location, contacts, expires | users | +| opensips.registrar | accepted, rejected | registrations/s | +| opensips.transactions | UAS, UAC | transactions/s | +| opensips.core_rcv | requests, replies | queries/s | +| opensips.core_fwd | requests, replies | queries/s | +| opensips.core_drop | requests, replies | queries/s | +| opensips.core_err | requests, replies | queries/s | +| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s | +| opensips.tm_replies | received, relayed, local | replies/s | +| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s | +| opensips.transactions_inuse | inuse | transactions | +| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s | +| opensips.dialogs | processed, expire, failed | dialogs/s | +| opensips.net_waiting | UDP, TCP | kilobytes | +| opensips.uri_checks | positive, negative | checks / sec | +| opensips.traces | requests, replies | traces / sec | +| opensips.shmem | total, used, real_used, max_used, free | kilobytes | +| opensips.shmem_fragment | fragments | fragments | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install charts.d plugin + +If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. + + +#### Required software + +The collector requires the `opensipsctl` to be installed. + + +### Configuration + +#### File + +The configuration file name for this integration is `charts.d/opensips.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config charts.d/opensips.conf +``` +#### Options + +The config file is sourced by the charts.d plugin. It's a standard bash file. + +The following collapsed table contains all the options that can be configured for the opensips collector. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no | +| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no | +| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no | +| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no | +| opensips_priority | The charts priority on the dashboard. | 80000 | no | +| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no | + +
+ +#### Examples + +##### Custom `opensipsctl` command + +Set a custom path to the `opensipsctl` command + +```yaml +#opensips_opts="fifo get_statistics all" +opensips_cmd=/opt/opensips/bin/opensipsctl +#opensips_timeout=2 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#opensips_update_every=5 + +# the charts priority on the dashboard +#opensips_priority=80000 + +# the number of retries to do in case of failure +# before disabling the module +#opensips_retries=10 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `charts.d.plugin` to debug the collector: + + ```bash + ./charts.d.plugin debug 1 opensips + ``` + + diff --git a/collectors/charts.d.plugin/opensips/metadata.yaml b/collectors/charts.d.plugin/opensips/metadata.yaml index 27f6632862265b..356de5615c2512 100644 --- a/collectors/charts.d.plugin/opensips/metadata.yaml +++ b/collectors/charts.d.plugin/opensips/metadata.yaml @@ -41,6 +41,9 @@ modules: setup: prerequisites: list: + - title: "Install charts.d plugin" + description: | + If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - title: "Required software" description: "The collector requires the `opensipsctl` to be installed." configuration: diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md deleted file mode 100644 index 0dbe96225b2806..00000000000000 --- a/collectors/charts.d.plugin/sensors/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Linux machine sensors collector - -Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures). -For all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/sensors), which supports multiple -jobs, is more efficient and performs calculations on top of the kernel provided values. - -This plugin will provide charts for all configured system sensors, by reading sensors directly from the kernel. -The values graphed are the raw hardware values of the sensors. - -The plugin will create Netdata charts for: - -1. **Temperature** -2. **Voltage** -3. **Current** -4. **Power** -5. **Fans Speed** -6. **Energy** -7. **Humidity** - -One chart for every sensor chip found and each of the above will be created. - -## Enable the collector - -If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed. - -The `sensors` collector is disabled by default. - -To enable the collector, you need to edit the configuration file of `charts.d/sensors.conf`. You can do so by using the `edit config` script. - -> ### Info -> -> To edit configuration files in a safe way, we provide the [`edit config` script](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) located in your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) (typically is `/etc/netdata`) that creates the proper file and opens it in an editor automatically. -> It is recommended to use this way for configuring Netdata. -> -> Please also note that after most configuration changes you will need to [restart the Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for the changes to take effect. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d.conf -``` - -You need to uncomment the regarding `sensors`, and set the value to `force`. - -```shell -# example=force -sensors=force -``` - -## Configuration - -Edit the `charts.d/sensors.conf` configuration file using `edit-config`: - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d/sensors.conf -``` - -This is the internal default for `charts.d/sensors.conf` - -```sh -# the directory the kernel keeps sensor data -sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" - -# how deep in the tree to check for sensor data -sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -sensors_source_update=1 - -# how frequently to collect sensor data -# the default is to collect it at every iteration of charts.d -sensors_update_every= - -# array of sensors which are excluded -# the default is to include all -sensors_excluded=() -``` - ---- diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md new file mode 120000 index 00000000000000..7e5a416c43abee --- /dev/null +++ b/collectors/charts.d.plugin/sensors/README.md @@ -0,0 +1 @@ +integrations/linux_sensors_sysfs.md \ No newline at end of file diff --git a/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md b/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md new file mode 100644 index 00000000000000..130352f61b52d9 --- /dev/null +++ b/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md @@ -0,0 +1,201 @@ + + +# Linux Sensors (sysfs) + + + + + +Plugin: charts.d.plugin +Module: sensors + + + +## Overview + +Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures). +For all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/sensors), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values." + + +It will provide charts for all configured system sensors, by reading sensors directly from the kernel. +The values graphed are the raw hardware values of the sensors. + + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, the collector will try to read entries under `/sys/devices` + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per sensor chip + +Metrics related to sensor chips. Each chip provides its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| sensors.temp | {filename} | Celsius | +| sensors.volt | {filename} | Volts | +| sensors.curr | {filename} | Ampere | +| sensors.power | {filename} | Watt | +| sensors.fans | {filename} | Rotations / Minute | +| sensors.energy | {filename} | Joule | +| sensors.humidity | {filename} | Percent | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install charts.d plugin + +If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. + + +#### Enable the sensors collector + +The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory, if different +sudo ./edit-config charts.d.conf +``` + +Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. + + + +### Configuration + +#### File + +The configuration file name for this integration is `charts.d/sensors.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config charts.d/sensors.conf +``` +#### Options + +The config file is sourced by the charts.d plugin. It's a standard bash file. + +The following collapsed table contains all the options that can be configured for the sensors collector. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no | +| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no | +| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no | +| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no | +| sensors_priority | The charts priority on the dashboard. | 90000 | no | +| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no | + +
+ +#### Examples + +##### Set sensors path depth + +Set a different sensors path depth + +```yaml +# the directory the kernel keeps sensor data +#sensors_sys_dir="/sys/devices" + +# how deep in the tree to check for sensor data +sensors_sys_depth=5 + +# if set to 1, the script will overwrite internal +# script functions with code generated ones +# leave to 1, is faster +#sensors_source_update=1 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#sensors_update_every= + +# the charts priority on the dashboard +#sensors_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#sensors_retries=10 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `charts.d.plugin` to debug the collector: + + ```bash + ./charts.d.plugin debug 1 sensors + ``` + + diff --git a/collectors/charts.d.plugin/sensors/metadata.yaml b/collectors/charts.d.plugin/sensors/metadata.yaml index 33beaad299ccae..47f6f4042e989c 100644 --- a/collectors/charts.d.plugin/sensors/metadata.yaml +++ b/collectors/charts.d.plugin/sensors/metadata.yaml @@ -44,7 +44,20 @@ modules: description: "" setup: prerequisites: - list: [] + list: + - title: "Install charts.d plugin" + description: | + If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. + - title: "Enable the sensors collector" + description: | + The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file. + + ```bash + cd /etc/netdata # Replace this path with your Netdata config directory, if different + sudo ./edit-config charts.d.conf + ``` + + Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. configuration: file: name: charts.d/sensors.conf diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md deleted file mode 100644 index 8652ec575116c8..00000000000000 --- a/collectors/cups.plugin/README.md +++ /dev/null @@ -1,68 +0,0 @@ - - -# Printers (cups.plugin) - -`cups.plugin` collects Common Unix Printing System (CUPS) metrics. - -## Prerequisites - -This plugin needs a running local CUPS daemon (`cupsd`). This plugin does not need any configuration. Supports cups since version 1.7. - -If you installed Netdata using our native packages, you will have to additionally install `netdata-plugin-cups` to use this plugin for data collection. It is not installed by default due to the large number of dependencies it requires. - -## Charts - -`cups.plugin` provides one common section `destinations` and one section per destination. - -> Destinations in CUPS represent individual printers or classes (collections or pools) of printers () - -The section `server` provides these charts: - -1. **destinations by state** - - - idle - - printing - - stopped - -2. **destinations by options** - - - total - - accepting jobs - - shared - -3. **total job number by status** - - - pending - - processing - - held - -4. **total job size by status** - - - pending - - processing - - held - -For each destination the plugin provides these charts: - -1. **job number by status** - - - pending - - held - - processing - -2. **job size by status** - - - pending - - held - - processing - -At the moment only job status pending, processing, and held are reported because we do not have a method to collect stopped, canceled, aborted and completed jobs which scales. - - diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md new file mode 120000 index 00000000000000..e325706392dfdb --- /dev/null +++ b/collectors/cups.plugin/README.md @@ -0,0 +1 @@ +integrations/cups.md \ No newline at end of file diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c index ce7f05d4d23973..827322066a9e2e 100644 --- a/collectors/cups.plugin/cups_plugin.c +++ b/collectors/cups.plugin/cups_plugin.c @@ -226,20 +226,8 @@ void reset_metrics() { } int main(int argc, char **argv) { - stderror = stderr; clocks_init(); - - // ------------------------------------------------------------------------ - // initialization of netdata plugin - - program_name = "cups.plugin"; - - // disable syslog - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; + nd_log_initialize_for_external_plugins("cups.plugin"); parse_command_line(argc, argv); diff --git a/collectors/cups.plugin/integrations/cups.md b/collectors/cups.plugin/integrations/cups.md new file mode 100644 index 00000000000000..a8ea5b15f6430d --- /dev/null +++ b/collectors/cups.plugin/integrations/cups.md @@ -0,0 +1,141 @@ + + +# CUPS + + + + + +Plugin: cups.plugin +Module: cups.plugin + + + +## Overview + +Monitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks. + +The plugin uses CUPS shared library to connect and monitor the server. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library. + +### Default Behavior + +#### Auto-Detection + +The plugin detects when CUPS server is running and tries to connect to it. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per CUPS instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cups.dests_state | idle, printing, stopped | dests | +| cups.dests_option | total, acceptingjobs, shared | dests | +| cups.job_num | pending, held, processing | jobs | +| cups.job_size | pending, held, processing | KB | + +### Per destination + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cups.destination_job_num | pending, held, processing | jobs | +| cups.destination_job_size | pending, held, processing | KB | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Minimum setup + +The CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`. + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:cups]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| command options | Additional parameters for the collector | | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/cups.plugin/metadata.yaml b/collectors/cups.plugin/metadata.yaml index a416d392e880bf..9ec2f4118fecd0 100644 --- a/collectors/cups.plugin/metadata.yaml +++ b/collectors/cups.plugin/metadata.yaml @@ -37,7 +37,7 @@ modules: prerequisites: list: - title: Minimum setup - description: "The CUPS server must be installed and running." + description: "The CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`." configuration: file: name: "netdata.conf" diff --git a/collectors/debugfs.plugin/debugfs_plugin.c b/collectors/debugfs.plugin/debugfs_plugin.c index c189f908d16a7a..f693de96c1862a 100644 --- a/collectors/debugfs.plugin/debugfs_plugin.c +++ b/collectors/debugfs.plugin/debugfs_plugin.c @@ -159,14 +159,8 @@ static void debugfs_parse_args(int argc, char **argv) int main(int argc, char **argv) { - // debug_flags = D_PROCFILE; - stderror = stderr; - - // set the name for logging - program_name = "debugfs.plugin"; - - // disable syslog for debugfs.plugin - error_log_syslog = 0; + clocks_init(); + nd_log_initialize_for_external_plugins("debugfs.plugin"); netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); if (verify_netdata_host_prefix() == -1) @@ -241,6 +235,13 @@ int main(int argc, char **argv) netdata_log_info("all modules are disabled, exiting..."); return 1; } + + fprintf(stdout, "\n"); + fflush(stdout); + if (ferror(stdout) && errno == EPIPE) { + netdata_log_error("error writing to stdout: EPIPE. Exiting..."); + return 1; + } } fprintf(stdout, "EXIT\n"); diff --git a/collectors/debugfs.plugin/integrations/linux_zswap.md b/collectors/debugfs.plugin/integrations/linux_zswap.md new file mode 100644 index 00000000000000..44478454b0d01d --- /dev/null +++ b/collectors/debugfs.plugin/integrations/linux_zswap.md @@ -0,0 +1,138 @@ + + +# Linux ZSwap + + + + + +Plugin: debugfs.plugin +Module: /sys/kernel/debug/zswap + + + +## Overview + +Collects zswap performance metrics on Linux systems. + + +Parse data from `debugfs file. + +This collector is only supported on the following platforms: + +- Linux + +This collector only supports collecting metrics from a single instance of this integration. + +This integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root. + + +### Default Behavior + +#### Auto-Detection + +Assuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +Monitor the performance statistics of zswap. + +### Per Linux ZSwap instance + +Global zswap performance metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.zswap_pool_compression_ratio | compression_ratio | ratio | +| system.zswap_pool_compressed_size | compressed_size | bytes | +| system.zswap_pool_raw_size | uncompressed_size | bytes | +| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s | +| system.zswap_pool_limit_hit | limit | events/s | +| system.zswap_written_back_raw_bytes | written_back | bytes/s | +| system.zswap_same_filled_raw_size | same_filled | bytes | +| system.zswap_duplicate_entry | duplicate | entries/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### filesystem + +The debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata. + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:debugfs]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| command options | Additinal parameters for collector | | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/debugfs.plugin/integrations/power_capping.md b/collectors/debugfs.plugin/integrations/power_capping.md new file mode 100644 index 00000000000000..d4b7eb890dbf05 --- /dev/null +++ b/collectors/debugfs.plugin/integrations/power_capping.md @@ -0,0 +1,132 @@ + + +# Power Capping + + + + + +Plugin: debugfs.plugin +Module: intel_rapl + + + +## Overview + +Collects power capping performance metrics on Linux systems. + + +Parse data from `debugfs file. + +This collector is only supported on the following platforms: + +- Linux + +This collector only supports collecting metrics from a single instance of this integration. + +This integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root. + + +### Default Behavior + +#### Auto-Detection + +Assuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +Monitor the Intel RAPL zones Consumption. + +### Per Power Capping instance + +Global Intel RAPL zones. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.powercap_intel_rapl_zone | Power | Watts | +| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### filesystem + +The debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata. + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:debugfs]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| command options | Additinal parameters for collector | | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md b/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md new file mode 100644 index 00000000000000..ef287bc3011b35 --- /dev/null +++ b/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md @@ -0,0 +1,136 @@ + + +# System Memory Fragmentation + + + + + +Plugin: debugfs.plugin +Module: /sys/kernel/debug/extfrag + + + +## Overview + +Collects memory fragmentation statistics from the Linux kernel + +Parse data from `debugfs` file + +This collector is only supported on the following platforms: + +- Linux + +This collector only supports collecting metrics from a single instance of this integration. + +This integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root. + + +### Default Behavior + +#### Auto-Detection + +Assuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +Monitor the overall memory fragmentation of the system. + +### Per node + +Memory fragmentation statistics for each NUMA node in the system. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| numa_node | The NUMA node the metrics are associated with. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | +| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | +| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### filesystem + +The debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata. + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:debugfs]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| command options | Additinal parameters for collector | | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/debugfs.plugin/sys_devices_virtual_powercap.c b/collectors/debugfs.plugin/sys_devices_virtual_powercap.c index 5f22b19e227974..ee261c27f65bcb 100644 --- a/collectors/debugfs.plugin/sys_devices_virtual_powercap.c +++ b/collectors/debugfs.plugin/sys_devices_virtual_powercap.c @@ -151,7 +151,7 @@ int do_sys_devices_virtual_powercap(int update_every, const char *name __maybe_u update_every); fprintf(stdout, - "CLABEL 'zone' '%s' 0\n" + "CLABEL 'zone' '%s' 1\n" "CLABEL_COMMIT\n", zone->name); @@ -171,7 +171,7 @@ int do_sys_devices_virtual_powercap(int update_every, const char *name __maybe_u update_every); fprintf(stdout, - "CLABEL 'zone' '%s' 0\n" + "CLABEL 'zone' '%s' 1\n" "CLABEL_COMMIT\n", zone->name); @@ -186,7 +186,7 @@ int do_sys_devices_virtual_powercap(int update_every, const char *name __maybe_u if(get_measurement(zone->path, &zone->energy_uj)) { fprintf(stdout, "BEGIN '%s'\n" - "SET power = %lld\n" + "SET power = %llu\n" "END\n" , zone->zone_chart_id , zone->energy_uj); @@ -200,7 +200,7 @@ int do_sys_devices_virtual_powercap(int update_every, const char *name __maybe_u for (struct zone_t *subzone = zone->subzones; subzone; subzone = subzone->next) { if(get_measurement(subzone->path, &subzone->energy_uj)) { fprintf(stdout, - "SET '%s' = %lld\n", + "SET '%s' = %llu\n", subzone->name, subzone->energy_uj); } diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md deleted file mode 100644 index 5ca1090fdd5b1e..00000000000000 --- a/collectors/diskspace.plugin/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Monitor disk (diskspace.plugin) - -This plugin monitors the disk space usage of mounted disks, under Linux. The plugin requires Netdata to have execute/search permissions on the mount point itself, as well as each component of the absolute path to the mount point. - -Two charts are available for every mount: - -- Disk Space Usage -- Disk Files (inodes) Usage - -## configuration - -Simple patterns can be used to exclude mounts from showed statistics based on path or filesystem. By default read-only mounts are not displayed. To display them `yes` should be set for a chart instead of `auto`. - -By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). - -Netdata will try to detect mounts that are duplicates (i.e. from the same device), or binds, and will not display charts for them, as the device is usually already monitored. - -To configure this plugin, you need to edit the configuration file `netdata.conf`. You can do so by using the `edit config` script. - -> ### Info -> -> To edit configuration files in a safe way, we provide the [`edit config` script](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) located in your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) (typically is `/etc/netdata`) that creates the proper file and opens it in an editor automatically. -> It is recommended to use this way for configuring Netdata. -> -> Please also note that after most configuration changes you will need to [restart the Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for the changes to take effect. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config netdata.conf -``` - -You can enable the effect of each line by uncommenting it. - -You can set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. - -```conf -[plugin:proc:diskspace] - # remove charts of unmounted disks = yes - # update every = 1 - # check for new mount points every = 15 - # exclude space metrics on paths = /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* - # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs - # space usage for all disks = auto - # inodes usage for all disks = auto -``` - -Charts can be enabled/disabled for every mount separately, just look for the name of the mount after `[plugin:proc:diskspace:`. - -```conf -[plugin:proc:diskspace:/] - # space usage = auto - # inodes usage = auto -``` - -> for disks performance monitoring, see the `proc` plugin, [here](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md#monitoring-disks) diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md new file mode 120000 index 00000000000000..c9f4e1c5e3e4fc --- /dev/null +++ b/collectors/diskspace.plugin/README.md @@ -0,0 +1 @@ +integrations/disk_space.md \ No newline at end of file diff --git a/collectors/diskspace.plugin/integrations/disk_space.md b/collectors/diskspace.plugin/integrations/disk_space.md new file mode 100644 index 00000000000000..1c937ed7fd8fd0 --- /dev/null +++ b/collectors/diskspace.plugin/integrations/disk_space.md @@ -0,0 +1,140 @@ + + +# Disk space + + + + + +Plugin: diskspace.plugin +Module: diskspace.plugin + + + +## Overview + +Monitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per mount point + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mount_point | Path used to mount a filesystem | +| filesystem | The filesystem used to format a partition. | +| mount_root | Root directory where mount points are present. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| disk.space | avail, used, reserved_for_root | GiB | +| disk.inodes | avail, used, reserved_for_root | inodes | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization | +| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + +You can also specify per mount point `[plugin:proc:diskspace:mountpoint]` + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no | +| check for new mount points every | Parse proc files frequency. | 15 | no | +| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no | +| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no | +| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no | +| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no | +| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c index 73236a19e2e051..94257810c36b2d 100644 --- a/collectors/diskspace.plugin/plugin_diskspace.c +++ b/collectors/diskspace.plugin/plugin_diskspace.c @@ -9,6 +9,8 @@ #define DEFAULT_EXCLUDED_FILESYSTEMS_INODES "msdosfs msdos vfat overlayfs aufs* *unionfs" #define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace" +#define RRDFUNCTIONS_DISKSPACE_HELP "View mount point statistics" + #define MAX_STAT_USEC 10000LU #define SLOW_UPDATE_EVERY 5 @@ -42,7 +44,12 @@ struct mount_point_metadata { int updated; int slow; - DICTIONARY *chart_labels; + bool function_ready; + + STRING *filesystem; + STRING *mountroot; + + RRDLABELS *chart_labels; size_t collected; // the number of times this has been collected @@ -59,7 +66,7 @@ struct mount_point_metadata { static DICTIONARY *dict_mountpoints = NULL; -#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st) +#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st) int mount_point_cleanup(const char *name, void *entry, int slow) { (void)name; @@ -76,10 +83,17 @@ int mount_point_cleanup(const char *name, void *entry, int slow) { } if(likely(cleanup_mount_points && mp->collected)) { + mp->function_ready = false; mp->collected = 0; mp->updated = 0; mp->shown_error = 0; + string_freez(mp->filesystem); + string_freez(mp->mountroot); + + rrdset_obsolete_and_pointer_null(mp->st_space); + rrdset_obsolete_and_pointer_null(mp->st_inodes); + mp->rd_space_avail = NULL; mp->rd_space_used = NULL; mp->rd_space_reserved = NULL; @@ -87,9 +101,6 @@ int mount_point_cleanup(const char *name, void *entry, int slow) { mp->rd_inodes_avail = NULL; mp->rd_inodes_used = NULL; mp->rd_inodes_reserved = NULL; - - rrdset_obsolete_and_pointer_null(mp->st_space); - rrdset_obsolete_and_pointer_null(mp->st_inodes); } return 0; @@ -214,7 +225,7 @@ static void calculate_values_and_show_charts( m->st_space = rrdset_find_active_bytype_localhost("disk_space", disk); if(unlikely(!m->st_space || m->st_space->update_every != update_every)) { char title[4096 + 1]; - snprintfz(title, 4096, "Disk Space Usage"); + snprintfz(title, sizeof(title) - 1, "Disk Space Usage"); m->st_space = rrdset_create_localhost( "disk_space" , disk @@ -254,7 +265,7 @@ static void calculate_values_and_show_charts( m->st_inodes = rrdset_find_active_bytype_localhost("disk_inodes", disk); if(unlikely(!m->st_inodes) || m->st_inodes->update_every != update_every) { char title[4096 + 1]; - snprintfz(title, 4096, "Disk Files (inodes) Usage"); + snprintfz(title, sizeof(title) - 1, "Disk Files (inodes) Usage"); m->st_inodes = rrdset_create_localhost( "disk_inodes" , disk @@ -286,6 +297,8 @@ static void calculate_values_and_show_charts( rendered++; } + m->function_ready = rendered > 0; + if(likely(rendered)) m->collected++; } @@ -330,22 +343,9 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { dict_mountpoints = dictionary_create_advanced(DICT_OPTION_NONE, &dictionary_stats_category_collectors, 0); } -#ifdef NETDATA_SKIP_IF_NOT_COLLECT - if(unlikely(simple_pattern_matches(excluded_mountpoints, mi->mount_point))) { - netdata_log_debug(D_COLLECTOR, "DISKSPACE: Skipping mount point '%s' (disk '%s', filesystem '%s', root '%s') because it is excluded by configuration.", - mi->mount_point, - disk, - mi->filesystem?mi->filesystem:"", - mi->root?mi->root:""); - return; - } -#endif - struct mount_point_metadata *m = dictionary_get(dict_mountpoints, mi->mount_point); if(unlikely(!m)) { int slow = 0; - char var_name[4096 + 1]; - snprintfz(var_name, 4096, "plugin:proc:diskspace:%s", mi->mount_point); int def_space = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "space usage for all disks", CONFIG_BOOLEAN_AUTO); int def_inodes = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO); @@ -396,8 +396,16 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { slow = 1; } - do_space = config_get_boolean_ondemand(var_name, "space usage", def_space); - do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes); + char var_name[4096 + 1]; + snprintfz(var_name, 4096, "plugin:proc:diskspace:%s", mi->mount_point); + + do_space = def_space; + do_inodes = def_inodes; + + if (config_exists(var_name, "space usage")) + do_space = config_get_boolean_ondemand(var_name, "space usage", def_space); + if (config_exists(var_name, "inodes usage")) + do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes); struct mount_point_metadata mp = { .do_space = do_space, @@ -419,6 +427,9 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { .rd_inodes_reserved = NULL }; + mp.filesystem = string_strdupz(mi->filesystem); + mp.mountroot = string_strdupz(mi->root); + mp.chart_labels = rrdlabels_create(); rrdlabels_add(mp.chart_labels, "mount_point", mi->mount_point, RRDLABEL_SRC_AUTO); rrdlabels_add(mp.chart_labels, "filesystem", mi->filesystem, RRDLABEL_SRC_AUTO); @@ -625,6 +636,228 @@ static void diskspace_main_cleanup(void *ptr) { #error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 3 #endif +int diskspace_function_mount_points(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused, + void *collector_data __maybe_unused, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused, + void *register_canceller_cb_data __maybe_unused) { + + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost)); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_DISKSPACE_HELP); + buffer_json_member_add_array(wb, "data"); + + double max_space_util = 0.0; + double max_space_avail = 0.0; + double max_space_used = 0.0; + double max_space_reserved = 0.0; + + double max_inodes_util = 0.0; + double max_inodes_avail = 0.0; + double max_inodes_used = 0.0; + double max_inodes_reserved = 0.0; + + struct mount_point_metadata *mp; + dfe_start_write(dict_mountpoints, mp) { + if (!mp->function_ready) + continue; + + buffer_json_add_array_item_array(wb); + + buffer_json_add_array_item_string(wb, mp_dfe.name); + buffer_json_add_array_item_string(wb, string2str(mp->filesystem)); + buffer_json_add_array_item_string(wb, string2str(mp->mountroot)); + + double space_avail = rrddim_get_last_stored_value(mp->rd_space_avail, &max_space_avail, 1.0); + double space_used = rrddim_get_last_stored_value(mp->rd_space_used, &max_space_used, 1.0); + double space_reserved = rrddim_get_last_stored_value(mp->rd_space_reserved, &max_space_reserved, 1.0); + double inodes_avail = rrddim_get_last_stored_value(mp->rd_inodes_avail, &max_inodes_avail, 1.0); + double inodes_used = rrddim_get_last_stored_value(mp->rd_inodes_used, &max_inodes_used, 1.0); + double inodes_reserved = rrddim_get_last_stored_value(mp->rd_inodes_reserved, &max_inodes_reserved, 1.0); + + double space_util = NAN; + if (!isnan(space_avail) && !isnan(space_used)) { + space_util = space_avail + space_used > 0 ? space_used * 100.0 / (space_avail + space_used) : 0; + max_space_util = MAX(max_space_util, space_util); + } + double inodes_util = NAN; + if (!isnan(inodes_avail) && !isnan(inodes_used)) { + inodes_util = inodes_avail + inodes_used > 0 ? inodes_used * 100.0 / (inodes_avail + inodes_used) : 0; + max_inodes_util = MAX(max_inodes_util, inodes_util); + } + + buffer_json_add_array_item_double(wb, space_util); + buffer_json_add_array_item_double(wb, space_avail); + buffer_json_add_array_item_double(wb, space_used); + buffer_json_add_array_item_double(wb, space_reserved); + + buffer_json_add_array_item_double(wb, inodes_util); + buffer_json_add_array_item_double(wb, inodes_avail); + buffer_json_add_array_item_double(wb, inodes_used); + buffer_json_add_array_item_double(wb, inodes_reserved); + + buffer_json_array_close(wb); + } + dfe_done(mp); + + buffer_json_array_close(wb); // data + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + buffer_rrdf_table_add_field(wb, field_id++, "Mountpoint", "Mountpoint Name", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Filesystem", "Mountpoint Filesystem", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Root", "Mountpoint Root", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Used%", "Space Utilization", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "%", max_space_util, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Avail", "Space Avail", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "GiB", max_space_avail, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Used", "Space Used", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "GiB", max_space_used, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Reserved", "Space Reserved for root", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "GiB", max_space_reserved, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "iUsed%", "Inodes Utilization", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "%", max_inodes_util, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "iAvail", "Inodes Avail", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "inodes", max_inodes_avail, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "iUsed", "Inodes Used", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "inodes", max_inodes_used, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "iReserved", "Inodes Reserved for root", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "inodes", max_inodes_reserved, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + } + + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "Used%"); + + buffer_json_member_add_object(wb, "charts"); + { + buffer_json_member_add_object(wb, "Utilization"); + { + buffer_json_member_add_string(wb, "name", "Utilization"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Used%"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "Usage"); + { + buffer_json_member_add_string(wb, "name", "Usage"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Avail"); + buffer_json_add_array_item_string(wb, "Used"); + buffer_json_add_array_item_string(wb, "Reserved"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "Inodes"); + { + buffer_json_member_add_string(wb, "name", "Inodes"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "iAvail"); + buffer_json_add_array_item_string(wb, "iUsed"); + buffer_json_add_array_item_string(wb, "iReserved"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Utilization"); + buffer_json_add_array_item_string(wb, "Mountpoint"); + buffer_json_array_close(wb); + + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Usage"); + buffer_json_add_array_item_string(wb, "Mountpoint"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + int response = HTTP_RESP_OK; + if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) { + buffer_flush(wb); + response = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + + if(result_cb) + result_cb(wb, response, result_cb_data); + + return response; +} + void *diskspace_main(void *ptr) { worker_register("DISKSPACE"); worker_register_job_name(WORKER_JOB_MOUNTINFO, "mountinfo"); @@ -632,6 +865,7 @@ void *diskspace_main(void *ptr) { worker_register_job_name(WORKER_JOB_CLEANUP, "cleanup"); rrd_collector_started(); + rrd_function_add(localhost, NULL, "mount-points", 10, RRDFUNCTIONS_DISKSPACE_HELP, true, diskspace_function_mount_points, NULL); netdata_thread_cleanup_push(diskspace_main_cleanup, ptr); diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md index fb036a5aa3939f..06915ea5218ea2 100644 --- a/collectors/ebpf.plugin/README.md +++ b/collectors/ebpf.plugin/README.md @@ -261,7 +261,7 @@ You can also enable the following eBPF programs: - `swap` : This eBPF program creates charts that show information about swap access. - `mdflush`: This eBPF program creates charts that show information about - `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2). -- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the +- `socket`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the bandwidth consumed by each. multi-device software flushes. - `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions. @@ -302,12 +302,13 @@ are divided in the following sections: #### `[network connections]` -You can configure the information shown on `outbound` and `inbound` charts with the settings in this section. +You can configure the information shown with function `ebpf_socket` using the settings in this section. ```conf [network connections] - maximum dimensions = 500 + enabled = yes resolve hostname ips = no + resolve service names = yes ports = 1-1024 !145 !domain hostnames = !example.com ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 @@ -318,24 +319,23 @@ write `ports = 19999`, Netdata will collect only connections for itself. The `ho [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny specific values or asterisk alone to define all values. -In the above example, Netdata will collect metrics for all ports between 1 and 443, with the exception of 53 (domain) -and 145. +In the above example, Netdata will collect metrics for all ports between `1` and `1024`, with the exception of `53` (domain) +and `145`. The following options are available: +- `enabled`: Disable network connections monitoring. This can affect directly some funcion output. +- `resolve hostname ips`: Enable resolving IPs to hostnames. It is disabled by default because it can be too slow. +- `resolve service names`: Convert destination ports into service names, for example, port `53` protocol `UDP` becomes `domain`. + all names are read from /etc/services. - `ports`: Define the destination ports for Netdata to monitor. - `hostnames`: The list of hostnames that can be resolved to an IP address. - `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a - range of IPs, or use CIDR values. By default, only data for private IP addresses is collected, but this can - be changed with the `ips` setting. + range of IPs, or use CIDR values. -By default, Netdata displays up to 500 dimensions on network connection charts. If there are more possible dimensions, -they will be bundled into the `other` dimension. You can increase the number of shown dimensions by changing -the `maximum dimensions` setting. - -The dimensions for the traffic charts are created using the destination IPs of the sockets by default. This can be -changed setting `resolve hostname ips = yes` and restarting Netdata, after this Netdata will create dimensions using -the `hostnames` every time that is possible to resolve IPs to their hostnames. +By default the traffic table is created using the destination IPs and ports of the sockets. This can be +changed, so that Netdata uses service names (if possible), by specifying `resolve service name = yes` in the configuration +section. #### `[service name]` @@ -990,13 +990,15 @@ shows how the lockdown module impacts `ebpf.plugin` based on the selected option If you or your distribution compiled the kernel with the last combination, your system cannot load shared libraries required to run `ebpf.plugin`. -## Function +## Functions + +### ebpf_thread The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named `ebpf_thread` that controls its internal threads and helps to reduce the overhead on host. Using the function you can run the plugin with all threads disabled and enable them only when you want to take a look in specific areas. -### List threads +#### List threads To list all threads status you can query directly the endpoint function: @@ -1006,7 +1008,7 @@ It is also possible to query a specific thread adding keyword `thread` and threa `http://localhost:19999/api/v1/function?function=ebpf_thread%20thread:mount` -### Enable thread +#### Enable thread It is possible to enable a specific thread using the keyword `enable`: @@ -1019,14 +1021,14 @@ after the thread name: in this example thread `mount` will run during 600 seconds (10 minutes). -### Disable thread +#### Disable thread It is also possible to stop any thread running using the keyword `disable`. For example, to disable `cachestat` you can request: `http://localhost:19999/api/v1/function?function=ebpf_thread%20disable:cachestat` -### Debugging threads +#### Debugging threads You can verify the impact of threads on the host by running the [ebpf_thread_function.sh](https://github.com/netdata/netdata/blob/master/tests/ebpf/ebpf_thread_function.sh) @@ -1036,3 +1038,34 @@ You can check the results of having threads running on your environment in the N dashboard Threads running. + +### ebpf_socket + +The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named +`ebpf_socket` that shows the current status of open sockets on host. + +#### Families + +The plugin shows by default sockets for IPV4 and IPV6, but it is possible to select a specific family by passing the +family as an argument: + +`http://localhost:19999/api/v1/function?function=ebpf_socket%20family:IPV4` + +#### Resolve + +The plugin resolves ports to service names by default. You can show the port number by disabling the name resolution: + +`http://localhost:19999/api/v1/function?function=ebpf_socket%20resolve:NO` + +#### CIDR + +The plugin shows connections for all possible destination IPs by default. You can limit the range by specifying the CIDR: + +`http://localhost:19999/api/v1/function?function=ebpf_socket%20cidr:192.168.1.0/24` + +#### PORT + +The plugin shows connections for all possible ports by default. You can limit the range by specifying a port or range +of ports: + +`http://localhost:19999/api/v1/function?function=ebpf_socket%20port:1-1024` diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index 844047305c9a5e..381bf5718cf407 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -49,176 +49,258 @@ struct netdata_static_thread cgroup_integration_thread = { }; ebpf_module_t ebpf_modules[] = { - { .thread_name = "process", .config_name = "process", .thread_description = NETDATA_EBPF_MODULE_PROCESS_DESC, - .enabled = 0, .start_routine = ebpf_process_thread, + { .info = {.thread_name = "process", + .config_name = "process", + .thread_description = NETDATA_EBPF_MODULE_PROCESS_DESC}, + .functions = {.start_routine = ebpf_process_thread, + .apps_routine = ebpf_process_create_apps_charts, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_process_create_apps_charts, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config, .config_file = NETDATA_PROCESS_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0 }, - { .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_socket_thread, + { .info = {.thread_name = "socket", + .config_name = "socket", + .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC}, + .functions = {.start_routine = ebpf_socket_thread, + .apps_routine = ebpf_socket_create_apps_charts, + .fnct_routine = ebpf_socket_read_open_connections, + .fcnt_name = EBPF_FUNCTION_SOCKET, + .fcnt_desc = EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION, + .fcnt_thread_chart_name = NULL, + .fcnt_thread_lifetime_name = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config, .config_file = NETDATA_NETWORK_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "cachestat", .config_name = "cachestat", .thread_description = NETDATA_EBPF_CACHESTAT_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_cachestat_thread, + { .info = {.thread_name = "cachestat", .config_name = "cachestat", .thread_description = NETDATA_EBPF_CACHESTAT_MODULE_DESC}, + .functions = {.start_routine = ebpf_cachestat_thread, + .apps_routine = ebpf_cachestat_create_apps_charts, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_cachestat_create_apps_charts, .maps = cachestat_maps, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config, + .maps = cachestat_maps, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config, .config_file = NETDATA_CACHESTAT_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18| NETDATA_V5_4 | NETDATA_V5_14 | NETDATA_V5_15 | NETDATA_V5_16, .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "sync", .config_name = "sync", .thread_description = NETDATA_EBPF_SYNC_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_sync_thread, + { .info = {.thread_name = "sync", + .config_name = "sync", + .thread_description = NETDATA_EBPF_SYNC_MODULE_DESC}, + .functions = {.start_routine = ebpf_sync_thread, + .apps_routine = NULL, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .maps = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config, .config_file = NETDATA_SYNC_CONFIG_FILE, // All syscalls have the same kernels .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "dc", .config_name = "dc", .thread_description = NETDATA_EBPF_DC_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_dcstat_thread, + { .info = {.thread_name = "dc", + .config_name = "dc", + .thread_description = NETDATA_EBPF_DC_MODULE_DESC}, + .functions = {.start_routine = ebpf_dcstat_thread, + .apps_routine = ebpf_dcstat_create_apps_charts, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_dcstat_create_apps_charts, .maps = dcstat_maps, + .maps = dcstat_maps, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config, .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "swap", .config_name = "swap", .thread_description = NETDATA_EBPF_SWAP_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_swap_thread, + { .info = {.thread_name = "swap", .config_name = "swap", .thread_description = NETDATA_EBPF_SWAP_MODULE_DESC}, + .functions = {.start_routine = ebpf_swap_thread, + .apps_routine = ebpf_swap_create_apps_charts, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config, .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "vfs", .config_name = "vfs", .thread_description = NETDATA_EBPF_VFS_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_vfs_thread, + { .info = {.thread_name = "vfs", + .config_name = "vfs", + .thread_description = NETDATA_EBPF_VFS_MODULE_DESC}, + .functions = {.start_routine = ebpf_vfs_thread, + .apps_routine = ebpf_vfs_create_apps_charts, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config, .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = vfs_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "filesystem", .config_name = "filesystem", .thread_description = NETDATA_EBPF_FS_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_filesystem_thread, + { .info = {.thread_name = "filesystem", .config_name = "filesystem", .thread_description = NETDATA_EBPF_FS_MODULE_DESC}, + .functions = {.start_routine = ebpf_filesystem_thread, + .apps_routine = NULL, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config, .config_file = NETDATA_FILESYSTEM_CONFIG_FILE, //We are setting kernels as zero, because we load eBPF programs according the kernel running. .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "disk", .config_name = "disk", .thread_description = NETDATA_EBPF_DISK_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_disk_thread, + { .info = {.thread_name = "disk", + .config_name = "disk", + .thread_description = NETDATA_EBPF_DISK_MODULE_DESC}, + .functions = {.start_routine = ebpf_disk_thread, + .apps_routine = NULL, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config, .config_file = NETDATA_DISK_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "mount", .config_name = "mount", .thread_description = NETDATA_EBPF_MOUNT_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_mount_thread, + { .info = {.thread_name = "mount", + .config_name = "mount", + .thread_description = NETDATA_EBPF_MOUNT_MODULE_DESC}, + .functions = {.start_routine = ebpf_mount_thread, + .apps_routine = NULL, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config, .config_file = NETDATA_MOUNT_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "fd", .config_name = "fd", .thread_description = NETDATA_EBPF_FD_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_fd_thread, + { .info = { .thread_name = "fd", + .config_name = "fd", + .thread_description = NETDATA_EBPF_FD_MODULE_DESC}, + .functions = {.start_routine = ebpf_fd_thread, + .apps_routine = ebpf_fd_create_apps_charts, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_fd_create_apps_charts, .maps = NULL, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config, .config_file = NETDATA_FD_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = fd_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "hardirq", .config_name = "hardirq", .thread_description = NETDATA_EBPF_HARDIRQ_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_hardirq_thread, + { .info = { .thread_name = "hardirq", + .config_name = "hardirq", + .thread_description = NETDATA_EBPF_HARDIRQ_MODULE_DESC}, + .functions = {.start_routine = ebpf_hardirq_thread, + .apps_routine = NULL, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config, .config_file = NETDATA_HARDIRQ_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "softirq", .config_name = "softirq", .thread_description = NETDATA_EBPF_SOFTIRQ_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_softirq_thread, + { .info = { .thread_name = "softirq", + .config_name = "softirq", + .thread_description = NETDATA_EBPF_SOFTIRQ_MODULE_DESC}, + .functions = {.start_routine = ebpf_softirq_thread, + .apps_routine = NULL, + .fnct_routine = NULL }, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config, .config_file = NETDATA_SOFTIRQ_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "oomkill", .config_name = "oomkill", .thread_description = NETDATA_EBPF_OOMKILL_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_oomkill_thread, + { .info = {.thread_name = "oomkill", + .config_name = "oomkill", + .thread_description = NETDATA_EBPF_OOMKILL_MODULE_DESC}, + .functions = {.start_routine = ebpf_oomkill_thread, + .apps_routine = ebpf_oomkill_create_apps_charts, + .fnct_routine = NULL},.enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config, .config_file = NETDATA_OOMKILL_CONFIG_FILE, .kernels = NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "shm", .config_name = "shm", .thread_description = NETDATA_EBPF_SHM_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_shm_thread, + { .info = {.thread_name = "shm", + .config_name = "shm", + .thread_description = NETDATA_EBPF_SHM_MODULE_DESC}, + .functions = {.start_routine = ebpf_shm_thread, + .apps_routine = ebpf_shm_create_apps_charts, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_shm_create_apps_charts, .maps = NULL, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config, .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "mdflush", .config_name = "mdflush", .thread_description = NETDATA_EBPF_MD_MODULE_DESC, - .enabled = 0, .start_routine = ebpf_mdflush_thread, + { .info = { .thread_name = "mdflush", + .config_name = "mdflush", + .thread_description = NETDATA_EBPF_MD_MODULE_DESC}, + .functions = {.start_routine = ebpf_mdflush_thread, + .apps_routine = NULL, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config, .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = mdflush_targets, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = "functions", .config_name = "functions", .thread_description = NETDATA_EBPF_FUNCTIONS_MODULE_DESC, - .enabled = 1, .start_routine = ebpf_function_thread, + { .info = { .thread_name = "functions", + .config_name = "functions", + .thread_description = NETDATA_EBPF_FUNCTIONS_MODULE_DESC}, + .functions = {.start_routine = ebpf_function_thread, + .apps_routine = NULL, + .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = NULL, + .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = NULL, .config_file = NETDATA_DIRECTORY_FUNCTIONS_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0}, - { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY, + { .info = {.thread_name = NULL, .config_name = NULL}, + .functions = {.start_routine = NULL, .apps_routine = NULL, .fnct_routine = NULL}, + .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 0, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = 0, .names = NULL, .cfg = NULL, .config_name = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .maps = NULL, + .pid_map_size = 0, .names = NULL, .cfg = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES}, }; @@ -559,6 +641,8 @@ ebpf_network_viewer_options_t network_viewer_opt; ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .threads = 0, .tracepoints = 0, .probes = 0, .retprobes = 0, .trampolines = 0, .memlock_kern = 0, .hash_tables = 0}; +netdata_ebpf_judy_pid_t ebpf_judy_pid = {.pid_table = NULL, .index = {.JudyLArray = NULL}}; +bool ebpf_plugin_exit = false; #ifdef LIBBPF_MAJOR_VERSION struct btf *default_btf = NULL; @@ -578,6 +662,61 @@ void *default_btf = NULL; #endif char *btf_path = NULL; +/***************************************************************** + * + * FUNCTIONS USED TO MANIPULATE JUDY ARRAY + * + *****************************************************************/ + +/** + * Hashtable insert unsafe + * + * Find or create a value associated to the index + * + * @return The lsocket = 0 when new item added to the array otherwise the existing item value is returned in *lsocket + * we return a pointer to a pointer, so that the caller can put anything needed at the value of the index. + * The pointer to pointer we return has to be used before any other operation that may change the index (insert/delete). + * + */ +void **ebpf_judy_insert_unsafe(PPvoid_t arr, Word_t key) +{ + JError_t J_Error; + Pvoid_t *idx = JudyLIns(arr, key, &J_Error); + if (unlikely(idx == PJERR)) { + netdata_log_error("Cannot add PID to JudyL, JU_ERRNO_* == %u, ID == %d", + JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); + } + + return idx; +} + +/** + * Get PID from judy + * + * Get a pointer for the `pid` from judy_array; + * + * @param judy_array a judy array where PID is the primary key + * @param pid pid stored. + */ +netdata_ebpf_judy_pid_stats_t *ebpf_get_pid_from_judy_unsafe(PPvoid_t judy_array, uint32_t pid) +{ + netdata_ebpf_judy_pid_stats_t **pid_pptr = + (netdata_ebpf_judy_pid_stats_t **)ebpf_judy_insert_unsafe(judy_array, pid); + netdata_ebpf_judy_pid_stats_t *pid_ptr = *pid_pptr; + if (likely(*pid_pptr == NULL)) { + // a new PID added to the index + *pid_pptr = aral_mallocz(ebpf_judy_pid.pid_table); + + pid_ptr = *pid_pptr; + + pid_ptr->cmdline = NULL; + pid_ptr->socket_stats.JudyLArray = NULL; + rw_spinlock_init(&pid_ptr->socket_stats.rw_spinlock); + } + + return pid_ptr; +} + /***************************************************************** * * FUNCTIONS USED TO ALLOCATE APPS/CGROUP MEMORIES (ARAL) @@ -626,7 +765,7 @@ static inline void ebpf_check_before2go() i = 0; int j; pthread_mutex_lock(&ebpf_exit_cleanup); - for (j = 0; ebpf_modules[j].thread_name != NULL; j++) { + for (j = 0; ebpf_modules[j].info.thread_name != NULL; j++) { if (ebpf_modules[j].enabled < NETDATA_THREAD_EBPF_STOPPING) i++; } @@ -704,14 +843,15 @@ void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe static void ebpf_unload_unique_maps() { int i; - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { // These threads are cleaned with other functions if (i != EBPF_MODULE_SOCKET_IDX) continue; if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) { if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) - netdata_log_error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name); + netdata_log_error("Cannot unload maps for thread %s, because it is not stopped.", + ebpf_modules[i].info.thread_name); continue; } @@ -775,13 +915,12 @@ static void ebpf_unload_sync() } } -int ebpf_exit_plugin = 0; /** * Close the collector gracefully * * @param sig is the signal number used to close the collector */ -static void ebpf_stop_threads(int sig) +void ebpf_stop_threads(int sig) { UNUSED(sig); static int only_one = 0; @@ -794,11 +933,11 @@ static void ebpf_stop_threads(int sig) } only_one = 1; int i; - for (i = 0; ebpf_modules[i].thread_name != NULL; i++) { + for (i = 0; ebpf_modules[i].info.thread_name != NULL; i++) { if (ebpf_modules[i].enabled < NETDATA_THREAD_EBPF_STOPPING) { netdata_thread_cancel(*ebpf_modules[i].thread->thread); #ifdef NETDATA_DEV_MODE - netdata_log_info("Sending cancel for thread %s", ebpf_modules[i].thread_name); + netdata_log_info("Sending cancel for thread %s", ebpf_modules[i].info.thread_name); #endif } } @@ -811,7 +950,7 @@ static void ebpf_stop_threads(int sig) #endif pthread_mutex_unlock(&mutex_cgroup_shm); - ebpf_exit_plugin = 1; + ebpf_plugin_exit = true; ebpf_check_before2go(); @@ -839,8 +978,8 @@ static void ebpf_stop_threads(int sig) * @param root a pointer for the targets. */ static inline void ebpf_create_apps_for_module(ebpf_module_t *em, struct ebpf_target *root) { - if (em->enabled < NETDATA_THREAD_EBPF_STOPPING && em->apps_charts && em->apps_routine) - em->apps_routine(em, root); + if (em->enabled < NETDATA_THREAD_EBPF_STOPPING && em->apps_charts && em->functions.apps_routine) + em->functions.apps_routine(em, root); } /** @@ -920,25 +1059,6 @@ collected_number get_value_from_structure(char *basis, size_t offset) return ret; } -/** - * Write begin command on standard output - * - * @param family the chart family name - * @param name the chart name - */ -void write_begin_chart(char *family, char *name) -{ - printf("BEGIN %s.%s\n", family, name); -} - -/** - * Write END command on stdout. - */ -inline void write_end_chart() -{ - printf("END\n"); -} - /** * Write set command on standard output * @@ -962,7 +1082,7 @@ void write_chart_dimension(char *dim, long long value) */ void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end) { - write_begin_chart(family, name); + ebpf_write_begin_chart(family, name, ""); uint32_t i = 0; while (move && i < end) { @@ -972,7 +1092,7 @@ void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move i++; } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -985,7 +1105,7 @@ void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move */ void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end) { - write_begin_chart(family, name); + ebpf_write_begin_chart(family, name, ""); int i = 0; while (move && i < end) { @@ -995,7 +1115,7 @@ void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, i++; } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -1010,11 +1130,11 @@ void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, */ void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1) { - write_begin_chart(family, chart); + ebpf_write_begin_chart(family, chart, ""); write_chart_dimension(dim, v1); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -1031,19 +1151,20 @@ void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long */ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread) { - write_begin_chart(family, chart); + ebpf_write_begin_chart(family, chart, ""); write_chart_dimension(dwrite, vwrite); write_chart_dimension(dread, vread); - write_end_chart(); + ebpf_write_end_chart(); } /** * Write chart cmd on standard output * * @param type chart type - * @param id chart id + * @param id chart id (the apps group name). + * @param suffix suffix to differentiate charts * @param title chart title * @param units units label * @param family group name used to attach the chart on dashboard @@ -1053,12 +1174,13 @@ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, c * @param update_every update interval used by plugin * @param module chart module name, this is the eBPF thread. */ -void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family, +void ebpf_write_chart_cmd(char *type, char *id, char *suffix, char *title, char *units, char *family, char *charttype, char *context, int order, int update_every, char *module) { - printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d '' 'ebpf.plugin' '%s'\n", + printf("CHART %s.%s%s '' '%s' '%s' '%s' '%s' '%s' %d %d '' 'ebpf.plugin' '%s'\n", type, id, + suffix, title, units, (family)?family:"", @@ -1074,6 +1196,7 @@ void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char * * * @param type chart type * @param id chart id + * @param suffix add suffix to obsolete charts. * @param title chart title * @param units units label * @param family group name used to attach the chart on dashboard @@ -1082,12 +1205,13 @@ void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char * * @param order chart order * @param update_every value to overwrite the update frequency set by the server. */ -void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family, +void ebpf_write_chart_obsolete(char *type, char *id, char *suffix, char *title, char *units, char *family, char *charttype, char *context, int order, int update_every) { - printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n", + printf("CHART %s.%s%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n", type, id, + suffix, title, units, (family)?family:"", @@ -1159,40 +1283,13 @@ void ebpf_create_chart(char *type, int update_every, char *module) { - ebpf_write_chart_cmd(type, id, title, units, family, charttype, context, order, update_every, module); + ebpf_write_chart_cmd(type, id, "", title, units, family, charttype, context, order, update_every, module); if (ncd) { ncd(move, end); } } -/** - * Create charts on apps submenu - * - * @param id the chart id - * @param title the value displayed on vertical axis. - * @param units the value displayed on vertical axis. - * @param family Submenu that the chart will be attached on dashboard. - * @param charttype chart type - * @param order the chart order - * @param algorithm the algorithm used by dimension - * @param root structure used to create the dimensions. - * @param update_every update interval used by plugin - * @param module chart module name, this is the eBPF thread. - */ -void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, char *charttype, int order, - char *algorithm, struct ebpf_target *root, int update_every, char *module) -{ - struct ebpf_target *w; - ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order, - update_every, module); - - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm); - } -} - /** * Call the necessary functions to create a name. * @@ -1206,14 +1303,14 @@ void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family */ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end) { - write_begin_chart(family, name); + ebpf_write_begin_chart(family, name, ""); uint32_t i; for (i = 0; i < end; i++) { write_chart_dimension(dimensions[i], (long long) hist[i]); } - write_end_chart(); + ebpf_write_end_chart(); fflush(stdout); } @@ -1238,6 +1335,7 @@ int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em) ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, em->memory_usage, + "", "Bytes allocated for ARAL.", "bytes", NETDATA_EBPF_FAMILY, @@ -1253,6 +1351,7 @@ int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em) ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, em->memory_allocations, + "", "Calls to allocate memory.", "calls", NETDATA_EBPF_FAMILY, @@ -1282,6 +1381,7 @@ void ebpf_statistic_obsolete_aral_chart(ebpf_module_t *em, int prio) { ebpf_write_chart_obsolete(NETDATA_MONITORING_FAMILY, em->memory_allocations, + "", "Calls to allocate memory.", "calls", NETDATA_EBPF_FAMILY, @@ -1292,6 +1392,7 @@ void ebpf_statistic_obsolete_aral_chart(ebpf_module_t *em, int prio) ebpf_write_chart_obsolete(NETDATA_MONITORING_FAMILY, em->memory_allocations, + "", "Calls to allocate memory.", "calls", NETDATA_EBPF_FAMILY, @@ -1316,13 +1417,13 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em) struct aral_statistics *stats = aral_statistics(memory); - write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage, ""); write_chart_dimension(mem, (long long)stats->structures.allocated_bytes); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_allocations); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_allocations, ""); write_chart_dimension(aral, (long long)stats->structures.allocations); - write_end_chart(); + ebpf_write_end_chart(); } /***************************************************************** @@ -1368,6 +1469,607 @@ void ebpf_read_global_table_stats(netdata_idx_t *stats, } } +/***************************************************************** + * + * FUNCTIONS USED WITH SOCKET + * + *****************************************************************/ + +/** + * Netmask + * + * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h) + * + * @param prefix create the netmask based in the CIDR value. + * + * @return + */ +static inline in_addr_t ebpf_netmask(int prefix) { + + if (prefix == 0) + return (~((in_addr_t) - 1)); + else + return (in_addr_t)(~((1 << (32 - prefix)) - 1)); + +} + +/** + * Broadcast + * + * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h) + * + * @param addr is the ip address + * @param prefix is the CIDR value. + * + * @return It returns the last address of the range + */ +static inline in_addr_t ebpf_broadcast(in_addr_t addr, int prefix) +{ + return (addr | ~ebpf_netmask(prefix)); +} + +/** + * Network + * + * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h) + * + * @param addr is the ip address + * @param prefix is the CIDR value. + * + * @return It returns the first address of the range. + */ +static inline in_addr_t ebpf_ipv4_network(in_addr_t addr, int prefix) +{ + return (addr & ebpf_netmask(prefix)); +} + +/** + * Calculate ipv6 first address + * + * @param out the address to store the first address. + * @param in the address used to do the math. + * @param prefix number of bits used to calculate the address + */ +static void get_ipv6_first_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix) +{ + uint64_t mask,tmp; + uint64_t ret[2]; + + memcpy(ret, in->addr32, sizeof(union netdata_ip_t)); + + if (prefix == 128) { + memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t)); + return; + } else if (!prefix) { + ret[0] = ret[1] = 0; + memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); + return; + } else if (prefix <= 64) { + ret[1] = 0ULL; + + tmp = be64toh(ret[0]); + mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix); + tmp &= mask; + ret[0] = htobe64(tmp); + } else { + mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix); + tmp = be64toh(ret[1]); + tmp &= mask; + ret[1] = htobe64(tmp); + } + + memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); +} + +/** + * Get IPV6 Last Address + * + * @param out the address to store the last address. + * @param in the address used to do the math. + * @param prefix number of bits used to calculate the address + */ +static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix) +{ + uint64_t mask,tmp; + uint64_t ret[2]; + memcpy(ret, in->addr32, sizeof(union netdata_ip_t)); + + if (prefix == 128) { + memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t)); + return; + } else if (!prefix) { + ret[0] = ret[1] = 0xFFFFFFFFFFFFFFFF; + memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); + return; + } else if (prefix <= 64) { + ret[1] = 0xFFFFFFFFFFFFFFFFULL; + + tmp = be64toh(ret[0]); + mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix); + tmp |= ~mask; + ret[0] = htobe64(tmp); + } else { + mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix); + tmp = be64toh(ret[1]); + tmp |= ~mask; + ret[1] = htobe64(tmp); + } + + memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); +} + +/** + * IP to network long + * + * @param dst the vector to store the result + * @param ip the source ip given by our users. + * @param domain the ip domain (IPV4 or IPV6) + * @param source the original string + * + * @return it returns 0 on success and -1 otherwise. + */ +static inline int ebpf_ip2nl(uint8_t *dst, char *ip, int domain, char *source) +{ + if (inet_pton(domain, ip, dst) <= 0) { + netdata_log_error("The address specified (%s) is invalid ", source); + return -1; + } + + return 0; +} + +/** + * Clean port Structure + * + * Clean the allocated list. + * + * @param clean the list that will be cleaned + */ +void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean) +{ + ebpf_network_viewer_port_list_t *move = *clean; + while (move) { + ebpf_network_viewer_port_list_t *next = move->next; + freez(move->value); + freez(move); + + move = next; + } + *clean = NULL; +} + +/** + * Clean IP structure + * + * Clean the allocated list. + * + * @param clean the list that will be cleaned + */ +void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean) +{ + ebpf_network_viewer_ip_list_t *move = *clean; + while (move) { + ebpf_network_viewer_ip_list_t *next = move->next; + freez(move->value); + freez(move); + + move = next; + } + *clean = NULL; +} + +/** + * Parse IP List + * + * Parse IP list and link it. + * + * @param out a pointer to store the link list + * @param ip the value given as parameter + */ +static void ebpf_parse_ip_list_unsafe(void **out, char *ip) +{ + ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out; + + char *ipdup = strdupz(ip); + union netdata_ip_t first = { }; + union netdata_ip_t last = { }; + char *is_ipv6; + if (*ip == '*' && *(ip+1) == '\0') { + memset(first.addr8, 0, sizeof(first.addr8)); + memset(last.addr8, 0xFF, sizeof(last.addr8)); + + is_ipv6 = ip; + + ebpf_clean_ip_structure(list); + goto storethisip; + } + + char *end = ip; + // Move while I cannot find a separator + while (*end && *end != '/' && *end != '-') end++; + + // We will use only the classic IPV6 for while, but we could consider the base 85 in a near future + // https://tools.ietf.org/html/rfc1924 + is_ipv6 = strchr(ip, ':'); + + int select; + if (*end && !is_ipv6) { // IPV4 range + select = (*end == '/') ? 0 : 1; + *end++ = '\0'; + if (*end == '!') { + netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup); + goto cleanipdup; + } + + if (!select) { // CIDR + select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup); + if (select) + goto cleanipdup; + + select = (int) str2i(end); + if (select < NETDATA_MINIMUM_IPV4_CIDR || select > NETDATA_MAXIMUM_IPV4_CIDR) { + netdata_log_info("The specified CIDR %s is not valid, the IP %s will be ignored.", end, ip); + goto cleanipdup; + } + + last.addr32[0] = htonl(ebpf_broadcast(ntohl(first.addr32[0]), select)); + // This was added to remove + // https://app.codacy.com/manual/netdata/netdata/pullRequest?prid=5810941&bid=19021977 + UNUSED(last.addr32[0]); + + uint32_t ipv4_test = htonl(ebpf_ipv4_network(ntohl(first.addr32[0]), select)); + if (first.addr32[0] != ipv4_test) { + first.addr32[0] = ipv4_test; + struct in_addr ipv4_convert; + ipv4_convert.s_addr = ipv4_test; + char ipv4_msg[INET_ADDRSTRLEN]; + if(inet_ntop(AF_INET, &ipv4_convert, ipv4_msg, INET_ADDRSTRLEN)) + netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv4_msg); + } + } else { // Range + select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup); + if (select) + goto cleanipdup; + + select = ebpf_ip2nl(last.addr8, end, AF_INET, ipdup); + if (select) + goto cleanipdup; + } + + if (htonl(first.addr32[0]) > htonl(last.addr32[0])) { + netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.", + ipdup); + goto cleanipdup; + } + } else if (is_ipv6) { // IPV6 + if (!*end) { // Unique + select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup); + if (select) + goto cleanipdup; + + memcpy(last.addr8, first.addr8, sizeof(first.addr8)); + } else if (*end == '-') { + *end++ = 0x00; + if (*end == '!') { + netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup); + goto cleanipdup; + } + + select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup); + if (select) + goto cleanipdup; + + select = ebpf_ip2nl(last.addr8, end, AF_INET6, ipdup); + if (select) + goto cleanipdup; + } else { // CIDR + *end++ = 0x00; + if (*end == '!') { + netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup); + goto cleanipdup; + } + + select = str2i(end); + if (select < 0 || select > 128) { + netdata_log_info("The CIDR %s is not valid, the address %s will be ignored.", end, ip); + goto cleanipdup; + } + + uint64_t prefix = (uint64_t)select; + select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup); + if (select) + goto cleanipdup; + + get_ipv6_last_addr(&last, &first, prefix); + + union netdata_ip_t ipv6_test; + get_ipv6_first_addr(&ipv6_test, &first, prefix); + + if (memcmp(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)) != 0) { + memcpy(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)); + + struct in6_addr ipv6_convert; + memcpy(ipv6_convert.s6_addr, ipv6_test.addr8, sizeof(union netdata_ip_t)); + + char ipv6_msg[INET6_ADDRSTRLEN]; + if(inet_ntop(AF_INET6, &ipv6_convert, ipv6_msg, INET6_ADDRSTRLEN)) + netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv6_msg); + } + } + + if ((be64toh(*(uint64_t *)&first.addr32[2]) > be64toh(*(uint64_t *)&last.addr32[2]) && + !memcmp(first.addr32, last.addr32, 2*sizeof(uint32_t))) || + (be64toh(*(uint64_t *)&first.addr32) > be64toh(*(uint64_t *)&last.addr32)) ) { + netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.", + ipdup); + goto cleanipdup; + } + } else { // Unique ip + select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup); + if (select) + goto cleanipdup; + + memcpy(last.addr8, first.addr8, sizeof(first.addr8)); + } + + ebpf_network_viewer_ip_list_t *store; + + storethisip: + store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t)); + store->value = ipdup; + store->hash = simple_hash(ipdup); + store->ver = (uint8_t)(!is_ipv6)?AF_INET:AF_INET6; + memcpy(store->first.addr8, first.addr8, sizeof(first.addr8)); + memcpy(store->last.addr8, last.addr8, sizeof(last.addr8)); + + ebpf_fill_ip_list_unsafe(list, store, "socket"); + return; + + cleanipdup: + freez(ipdup); +} + +/** + * Parse IP Range + * + * Parse the IP ranges given and create Network Viewer IP Structure + * + * @param ptr is a pointer with the text to parse. + */ +void ebpf_parse_ips_unsafe(char *ptr) +{ + // No value + if (unlikely(!ptr)) + return; + + while (likely(ptr)) { + // Move forward until next valid character + while (isspace(*ptr)) ptr++; + + // No valid value found + if (unlikely(!*ptr)) + return; + + // Find space that ends the list + char *end = strchr(ptr, ' '); + if (end) { + *end++ = '\0'; + } + + int neg = 0; + if (*ptr == '!') { + neg++; + ptr++; + } + + if (isascii(*ptr)) { // Parse port + ebpf_parse_ip_list_unsafe( + (!neg) ? (void **)&network_viewer_opt.included_ips : (void **)&network_viewer_opt.excluded_ips, ptr); + } + + ptr = end; + } +} + +/** + * Fill Port list + * + * @param out a pointer to the link list. + * @param in the structure that will be linked. + */ +static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_network_viewer_port_list_t *in) +{ + if (likely(*out)) { + ebpf_network_viewer_port_list_t *move = *out, *store = *out; + uint16_t first = ntohs(in->first); + uint16_t last = ntohs(in->last); + while (move) { + uint16_t cmp_first = ntohs(move->first); + uint16_t cmp_last = ntohs(move->last); + if (cmp_first <= first && first <= cmp_last && + cmp_first <= last && last <= cmp_last ) { + netdata_log_info("The range/value (%u, %u) is inside the range/value (%u, %u) already inserted, it will be ignored.", + first, last, cmp_first, cmp_last); + freez(in->value); + freez(in); + return; + } else if (first <= cmp_first && cmp_first <= last && + first <= cmp_last && cmp_last <= last) { + netdata_log_info("The range (%u, %u) is bigger than previous range (%u, %u) already inserted, the previous will be ignored.", + first, last, cmp_first, cmp_last); + freez(move->value); + move->value = in->value; + move->first = in->first; + move->last = in->last; + freez(in); + return; + } + + store = move; + move = move->next; + } + + store->next = in; + } else { + *out = in; + } + +#ifdef NETDATA_INTERNAL_CHECKS + netdata_log_info("Adding values %s( %u, %u) to %s port list used on network viewer", + in->value, in->first, in->last, + (*out == network_viewer_opt.included_port)?"included":"excluded"); +#endif +} + +/** + * Parse Service List + * + * @param out a pointer to store the link list + * @param service the service used to create the structure that will be linked. + */ +static void ebpf_parse_service_list(void **out, char *service) +{ + ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; + struct servent *serv = getservbyname((const char *)service, "tcp"); + if (!serv) + serv = getservbyname((const char *)service, "udp"); + + if (!serv) { + netdata_log_info("Cannot resolve the service '%s' with protocols TCP and UDP, it will be ignored", service); + return; + } + + ebpf_network_viewer_port_list_t *w = callocz(1, sizeof(ebpf_network_viewer_port_list_t)); + w->value = strdupz(service); + w->hash = simple_hash(service); + + w->first = w->last = (uint16_t)serv->s_port; + + fill_port_list(list, w); +} + +/** + * Parse port list + * + * Parse an allocated port list with the range given + * + * @param out a pointer to store the link list + * @param range the informed range for the user. + */ +static void ebpf_parse_port_list(void **out, char *range) +{ + int first, last; + ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; + + char *copied = strdupz(range); + if (*range == '*' && *(range+1) == '\0') { + first = 1; + last = 65535; + + ebpf_clean_port_structure(list); + goto fillenvpl; + } + + char *end = range; + //Move while I cannot find a separator + while (*end && *end != ':' && *end != '-') end++; + + //It has a range + if (likely(*end)) { + *end++ = '\0'; + if (*end == '!') { + netdata_log_info("The exclusion cannot be in the second part of the range, the range %s will be ignored.", copied); + freez(copied); + return; + } + last = str2i((const char *)end); + } else { + last = 0; + } + + first = str2i((const char *)range); + if (first < NETDATA_MINIMUM_PORT_VALUE || first > NETDATA_MAXIMUM_PORT_VALUE) { + netdata_log_info("The first port %d of the range \"%s\" is invalid and it will be ignored!", first, copied); + freez(copied); + return; + } + + if (!last) + last = first; + + if (last < NETDATA_MINIMUM_PORT_VALUE || last > NETDATA_MAXIMUM_PORT_VALUE) { + netdata_log_info("The second port %d of the range \"%s\" is invalid and the whole range will be ignored!", last, copied); + freez(copied); + return; + } + + if (first > last) { + netdata_log_info("The specified order %s is wrong, the smallest value is always the first, it will be ignored!", copied); + freez(copied); + return; + } + + ebpf_network_viewer_port_list_t *w; + fillenvpl: + w = callocz(1, sizeof(ebpf_network_viewer_port_list_t)); + w->value = copied; + w->hash = simple_hash(copied); + w->first = (uint16_t)first; + w->last = (uint16_t)last; + w->cmp_first = (uint16_t)first; + w->cmp_last = (uint16_t)last; + + fill_port_list(list, w); +} + +/** + * Parse Port Range + * + * Parse the port ranges given and create Network Viewer Port Structure + * + * @param ptr is a pointer with the text to parse. + */ +void ebpf_parse_ports(char *ptr) +{ + // No value + if (unlikely(!ptr)) + return; + + while (likely(ptr)) { + // Move forward until next valid character + while (isspace(*ptr)) ptr++; + + // No valid value found + if (unlikely(!*ptr)) + return; + + // Find space that ends the list + char *end = strchr(ptr, ' '); + if (end) { + *end++ = '\0'; + } + + int neg = 0; + if (*ptr == '!') { + neg++; + ptr++; + } + + if (isdigit(*ptr)) { // Parse port + ebpf_parse_port_list( + (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr); + } else if (isalpha(*ptr)) { // Parse service + ebpf_parse_service_list( + (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr); + } else if (*ptr == '*') { // All + ebpf_parse_port_list( + (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr); + } + + ptr = end; + } +} + /***************************************************************** * * FUNCTIONS TO DEFINE OPTIONS @@ -1428,13 +2130,7 @@ static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode) */ static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disable_cgroup) { - em->enabled = CONFIG_BOOLEAN_YES; - - // oomkill stores data inside apps submenu, so it always need to have apps_enabled for plugin to create - // its chart, without this comparison eBPF.plugin will try to store invalid data when apps is disabled. - if (!strcmp(em->thread_name, "oomkill")) { - em->apps_charts = NETDATA_EBPF_APPS_FLAG_YES; - } + em->enabled = NETDATA_THREAD_EBPF_RUNNING; if (!disable_cgroup) { em->cgroup_charts = CONFIG_BOOLEAN_YES; @@ -1451,8 +2147,8 @@ static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disabl static inline void disable_all_global_charts() { int i; - for (i = 0; ebpf_modules[i].thread_name; i++) { - ebpf_modules[i].enabled = 0; + for (i = 0; ebpf_modules[i].info.thread_name; i++) { + ebpf_modules[i].enabled = NETDATA_THREAD_EBPF_NOT_RUNNING; ebpf_modules[i].global_charts = 0; } } @@ -1465,7 +2161,7 @@ static inline void disable_all_global_charts() static inline void ebpf_enable_chart(int idx, int disable_cgroup) { int i; - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { if (i == idx) { ebpf_enable_specific_chart(&ebpf_modules[i], disable_cgroup); break; @@ -1481,7 +2177,7 @@ static inline void ebpf_enable_chart(int idx, int disable_cgroup) static inline void ebpf_disable_cgroups() { int i; - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { ebpf_modules[i].cgroup_charts = 0; } } @@ -1661,6 +2357,203 @@ uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps) * *****************************************************************/ +/** + * Is ip inside the range + * + * Check if the ip is inside a IP range + * + * @param rfirst the first ip address of the range + * @param rlast the last ip address of the range + * @param cmpfirst the first ip to compare + * @param cmplast the last ip to compare + * @param family the IP family + * + * @return It returns 1 if the IP is inside the range and 0 otherwise + */ +static int ebpf_is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast, + union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family) +{ + if (family == AF_INET) { + if ((rfirst->addr32[0] <= cmpfirst->addr32[0]) && (rlast->addr32[0] >= cmplast->addr32[0])) + return 1; + } else { + if (memcmp(rfirst->addr8, cmpfirst->addr8, sizeof(union netdata_ip_t)) <= 0 && + memcmp(rlast->addr8, cmplast->addr8, sizeof(union netdata_ip_t)) >= 0) { + return 1; + } + + } + return 0; +} + +/** + * Fill IP list + * + * @param out a pointer to the link list. + * @param in the structure that will be linked. + * @param table the modified table. + */ +void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, + char *table __maybe_unused) +{ + if (in->ver == AF_INET) { // It is simpler to compare using host order + in->first.addr32[0] = ntohl(in->first.addr32[0]); + in->last.addr32[0] = ntohl(in->last.addr32[0]); + } + if (likely(*out)) { + ebpf_network_viewer_ip_list_t *move = *out, *store = *out; + while (move) { + if (in->ver == move->ver && + ebpf_is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) { +#ifdef NETDATA_DEV_MODE + netdata_log_info("The range/value (%s) is inside the range/value (%s) already inserted, it will be ignored.", + in->value, move->value); +#endif + freez(in->value); + freez(in); + return; + } + store = move; + move = move->next; + } + + store->next = in; + } else { + *out = in; + } + +#ifdef NETDATA_DEV_MODE + char first[256], last[512]; + if (in->ver == AF_INET) { + netdata_log_info("Adding values %s: (%u - %u) to %s IP list \"%s\" used on network viewer", + in->value, in->first.addr32[0], in->last.addr32[0], + (*out == network_viewer_opt.included_ips)?"included":"excluded", + table); + } else { + if (inet_ntop(AF_INET6, in->first.addr8, first, INET6_ADDRSTRLEN) && + inet_ntop(AF_INET6, in->last.addr8, last, INET6_ADDRSTRLEN)) + netdata_log_info("Adding values %s - %s to %s IP list \"%s\" used on network viewer", + first, last, + (*out == network_viewer_opt.included_ips)?"included":"excluded", + table); + } +#endif +} + +/** + * Link hostname + * + * @param out is the output link list + * @param in the hostname to add to list. + */ +static void ebpf_link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_network_viewer_hostname_list_t *in) +{ + if (likely(*out)) { + ebpf_network_viewer_hostname_list_t *move = *out; + for (; move->next ; move = move->next ) { + if (move->hash == in->hash && !strcmp(move->value, in->value)) { + netdata_log_info("The hostname %s was already inserted, it will be ignored.", in->value); + freez(in->value); + simple_pattern_free(in->value_pattern); + freez(in); + return; + } + } + + move->next = in; + } else { + *out = in; + } +#ifdef NETDATA_INTERNAL_CHECKS + netdata_log_info("Adding value %s to %s hostname list used on network viewer", + in->value, + (*out == network_viewer_opt.included_hostnames)?"included":"excluded"); +#endif +} + +/** + * Link Hostnames + * + * Parse the list of hostnames to create the link list. + * This is not associated with the IP, because simple patterns like *example* cannot be resolved to IP. + * + * @param out is the output link list + * @param parse is a pointer with the text to parser. + */ +static void ebpf_link_hostnames(char *parse) +{ + // No value + if (unlikely(!parse)) + return; + + while (likely(parse)) { + // Find the first valid value + while (isspace(*parse)) parse++; + + // No valid value found + if (unlikely(!*parse)) + return; + + // Find space that ends the list + char *end = strchr(parse, ' '); + if (end) { + *end++ = '\0'; + } + + int neg = 0; + if (*parse == '!') { + neg++; + parse++; + } + + ebpf_network_viewer_hostname_list_t *hostname = callocz(1 , sizeof(ebpf_network_viewer_hostname_list_t)); + hostname->value = strdupz(parse); + hostname->hash = simple_hash(parse); + hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT, true); + + ebpf_link_hostname((!neg) ? &network_viewer_opt.included_hostnames : + &network_viewer_opt.excluded_hostnames, + hostname); + + parse = end; + } +} + +/** + * Parse network viewer section + * + * @param cfg the configuration structure + */ +void parse_network_viewer_section(struct config *cfg) +{ + network_viewer_opt.hostname_resolution_enabled = appconfig_get_boolean(cfg, + EBPF_NETWORK_VIEWER_SECTION, + EBPF_CONFIG_RESOLVE_HOSTNAME, + CONFIG_BOOLEAN_NO); + + network_viewer_opt.service_resolution_enabled = appconfig_get_boolean(cfg, + EBPF_NETWORK_VIEWER_SECTION, + EBPF_CONFIG_RESOLVE_SERVICE, + CONFIG_BOOLEAN_YES); + + char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL); + ebpf_parse_ports(value); + + if (network_viewer_opt.hostname_resolution_enabled) { + value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_HOSTNAMES, NULL); + ebpf_link_hostnames(value); + } else { + netdata_log_info("Name resolution is disabled, collector will not parse \"hostnames\" list."); + } + + value = appconfig_get(cfg, + EBPF_NETWORK_VIEWER_SECTION, + "ips", + NULL); + //"ips", "!127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128"); + ebpf_parse_ips_unsafe(value); +} + /** * Read Local Ports * @@ -1705,7 +2598,7 @@ static void read_local_ports(char *filename, uint8_t proto) * * Read the local address from the interfaces. */ -static void read_local_addresses() +void ebpf_read_local_addresses_unsafe() { struct ifaddrs *ifaddr, *ifa; if (getifaddrs(&ifaddr) == -1) { @@ -1754,9 +2647,8 @@ static void read_local_addresses() } } - ebpf_fill_ip_list((family == AF_INET)?&network_viewer_opt.ipv4_local_ip:&network_viewer_opt.ipv6_local_ip, - w, - "selector"); + ebpf_fill_ip_list_unsafe( + (family == AF_INET) ? &network_viewer_opt.ipv4_local_ip : &network_viewer_opt.ipv6_local_ip, w, "selector"); } freeifaddrs(ifaddr); @@ -1773,6 +2665,7 @@ void ebpf_start_pthread_variables() pthread_mutex_init(&ebpf_exit_cleanup, NULL); pthread_mutex_init(&collect_data_mutex, NULL); pthread_mutex_init(&mutex_cgroup_shm, NULL); + rw_spinlock_init(&ebpf_judy_pid.index.rw_spinlock); } /** @@ -1780,6 +2673,8 @@ void ebpf_start_pthread_variables() */ static void ebpf_allocate_common_vectors() { + ebpf_judy_pid.pid_table = ebpf_allocate_pid_aral(NETDATA_EBPF_PID_SOCKET_ARAL_TABLE_NAME, + sizeof(netdata_ebpf_judy_pid_stats_t)); ebpf_all_pids = callocz((size_t)pid_max, sizeof(struct ebpf_pid_stat *)); ebpf_aral_init(); } @@ -1825,7 +2720,7 @@ static void ebpf_update_interval(int update_every) int i; int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY, update_every); - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { ebpf_modules[i].update_every = value; } } @@ -1840,7 +2735,7 @@ static void ebpf_update_table_size() int i; uint32_t value = (uint32_t) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_PID_SIZE, ND_EBPF_DEFAULT_PID_SIZE); - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { ebpf_modules[i].pid_map_size = value; } } @@ -1855,7 +2750,7 @@ static void ebpf_update_lifetime() int i; uint32_t value = (uint32_t) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_LIFETIME, EBPF_DEFAULT_LIFETIME); - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { ebpf_modules[i].lifetime = value; } } @@ -1868,7 +2763,7 @@ static void ebpf_update_lifetime() static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load, netdata_ebpf_load_mode_t origin) { int i; - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { ebpf_modules[i].load &= ~NETDATA_EBPF_LOAD_METHODS; ebpf_modules[i].load |= load | origin ; } @@ -1897,7 +2792,7 @@ static void ebpf_update_map_per_core() int i; int value = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_MAPS_PER_CORE, CONFIG_BOOLEAN_YES); - for (i = 0; ebpf_modules[i].thread_name; i++) { + for (i = 0; ebpf_modules[i].info.thread_name; i++) { ebpf_modules[i].maps_per_core = value; } } @@ -1961,7 +2856,7 @@ static void read_collector_values(int *disable_cgroups, // Read ebpf programs section enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, - ebpf_modules[EBPF_MODULE_PROCESS_IDX].config_name, CONFIG_BOOLEAN_YES); + ebpf_modules[EBPF_MODULE_PROCESS_IDX].info.config_name, CONFIG_BOOLEAN_YES); if (enabled) { ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_cgroups); } @@ -1971,7 +2866,7 @@ static void read_collector_values(int *disable_cgroups, CONFIG_BOOLEAN_NO); if (!enabled) enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, - ebpf_modules[EBPF_MODULE_SOCKET_IDX].config_name, + ebpf_modules[EBPF_MODULE_SOCKET_IDX].info.config_name, CONFIG_BOOLEAN_NO); if (enabled) { ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_cgroups); @@ -1979,10 +2874,11 @@ static void read_collector_values(int *disable_cgroups, // This is kept to keep compatibility enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connection monitoring", - CONFIG_BOOLEAN_NO); + CONFIG_BOOLEAN_YES); if (!enabled) enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections", - CONFIG_BOOLEAN_NO); + CONFIG_BOOLEAN_YES); + network_viewer_opt.enabled = enabled; if (enabled) { if (!ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled) @@ -1991,7 +2887,7 @@ static void read_collector_values(int *disable_cgroups, // Read network viewer section if network viewer is enabled // This is kept here to keep backward compatibility parse_network_viewer_section(&collector_config); - parse_service_name_section(&collector_config); + ebpf_parse_service_name_section(&collector_config); } enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "cachestat", @@ -2238,7 +3134,7 @@ static void ebpf_parse_args(int argc, char **argv) }; memset(&network_viewer_opt, 0, sizeof(network_viewer_opt)); - network_viewer_opt.max_dim = NETDATA_NV_CAP_VALUE; + rw_spinlock_init(&network_viewer_opt.rw_spinlock); if (argc > 1) { int n = (int)str2l(argv[1]); @@ -2250,6 +3146,7 @@ static void ebpf_parse_args(int argc, char **argv) if (!freq) freq = EBPF_DEFAULT_UPDATE_EVERY; + //rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); if (ebpf_load_collector_config(ebpf_user_config_dir, &disable_cgroups, freq)) { netdata_log_info( "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.", @@ -2260,6 +3157,7 @@ static void ebpf_parse_args(int argc, char **argv) } ebpf_load_thread_config(); + //rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); while (1) { int c = getopt_long_only(argc, argv, "", long_options, &option_index); @@ -2457,8 +3355,7 @@ static void ebpf_parse_args(int argc, char **argv) } if (disable_cgroups) { - if (disable_cgroups) - ebpf_disable_cgroups(); + ebpf_disable_cgroups(); } if (select_threads) { @@ -2507,16 +3404,16 @@ static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique" static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx) { int i; - write_begin_chart(NETDATA_MONITORING_FAMILY, chart); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, chart, ""); for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { ebpf_module_t *wem = &ebpf_modules[i]; - if (wem->apps_routine) - write_chart_dimension((char *)wem->thread_name, + if (wem->functions.apps_routine) + write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? wem->hash_table_stats[idx]: 0); } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -2528,13 +3425,13 @@ static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx) static inline void ebpf_send_global_hash_table_data() { int i; - write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS, ""); for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { ebpf_module_t *wem = &ebpf_modules[i]; - write_chart_dimension((char *)wem->thread_name, + write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? NETDATA_CONTROLLER_END: 0); } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -2547,48 +3444,71 @@ void ebpf_send_statistic_data() if (!publish_internal_metrics) return; - write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_THREADS); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_THREADS, ""); int i; for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { ebpf_module_t *wem = &ebpf_modules[i]; - write_chart_dimension((char *)wem->thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? 1 : 0); + if (wem->functions.fnct_routine) + continue; + + write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? 1 : 0); } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LIFE_TIME); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LIFE_TIME, ""); for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) { ebpf_module_t *wem = &ebpf_modules[i]; // Threads like VFS is slow to load and this can create an invalid number, this is the motive // we are also testing wem->lifetime value. - write_chart_dimension((char *)wem->thread_name, + if (wem->functions.fnct_routine) + continue; + + write_chart_dimension((char *)wem->info.thread_name, (wem->lifetime && wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? (long long) (wem->lifetime - wem->running_time): 0) ; } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LOAD_METHOD); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LOAD_METHOD, ""); write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY], (long long)plugin_statistics.legacy); write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE], (long long)plugin_statistics.core); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_KERNEL_MEMORY); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_KERNEL_MEMORY, ""); write_chart_dimension(memlock_stat, (long long)plugin_statistics.memlock_kern); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED, ""); write_chart_dimension(hash_table_stat, (long long)plugin_statistics.hash_tables); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_PER_CORE); + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_PER_CORE, ""); write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu); write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique); - write_end_chart(); + ebpf_write_end_chart(); ebpf_send_global_hash_table_data(); ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD); ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL); + + for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { + ebpf_module_t *wem = &ebpf_modules[i]; + if (!wem->functions.fnct_routine) + continue; + + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, (char *)wem->functions.fcnt_thread_chart_name, ""); + write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? 1 : 0); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, (char *)wem->functions.fcnt_thread_lifetime_name, ""); + write_chart_dimension((char *)wem->info.thread_name, + (wem->lifetime && wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? + (long long) (wem->lifetime - wem->running_time): + 0) ; + ebpf_write_end_chart(); + } } /** @@ -2607,57 +3527,52 @@ static void update_internal_metric_variable() } /** - * Create chart for Statistic Thread + * Create Thread Chart * - * Write to standard output current values for threads. + * Write to standard output current values for threads charts. * + * @param name is the chart name + * @param title chart title. + * @param units chart units + * @param order is the chart order * @param update_every time used to update charts + * @param module a module to create a specific chart. */ -static inline void ebpf_create_statistic_thread_chart(int update_every) +static void ebpf_create_thread_chart(char *name, + char *title, + char *units, + int order, + int update_every, + ebpf_module_t *module) { + // common call for specific and all charts. ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, - NETDATA_EBPF_THREADS, - "Threads running.", - "boolean", + name, + "", + title, + units, NETDATA_EBPF_FAMILY, NETDATA_EBPF_CHART_TYPE_LINE, NULL, - NETDATA_EBPF_ORDER_STAT_THREADS, + order, update_every, - NETDATA_EBPF_MODULE_NAME_PROCESS); + "main"); - int i; - for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { - ebpf_write_global_dimension((char *)ebpf_modules[i].thread_name, - (char *)ebpf_modules[i].thread_name, + if (module) { + ebpf_write_global_dimension((char *)module->info.thread_name, + (char *)module->info.thread_name, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + return; } -} - -/** - * Create lifetime Thread Chart - * - * Write to standard output current values for threads lifetime. - * - * @param update_every time used to update charts - */ -static inline void ebpf_create_lifetime_thread_chart(int update_every) -{ - ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, - NETDATA_EBPF_LIFE_TIME, - "Threads running.", - "seconds", - NETDATA_EBPF_FAMILY, - NETDATA_EBPF_CHART_TYPE_LINE, - NULL, - NETDATA_EBPF_ORDER_STAT_LIFE_TIME, - update_every, - NETDATA_EBPF_MODULE_NAME_PROCESS); int i; for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { - ebpf_write_global_dimension((char *)ebpf_modules[i].thread_name, - (char *)ebpf_modules[i].thread_name, + ebpf_module_t *em = &ebpf_modules[i]; + if (em->functions.fnct_routine) + continue; + + ebpf_write_global_dimension((char *)em->info.thread_name, + (char *)em->info.thread_name, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); } } @@ -2673,6 +3588,7 @@ static inline void ebpf_create_statistic_load_chart(int update_every) { ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LOAD_METHOD, + "", "Load info.", "methods", NETDATA_EBPF_FAMILY, @@ -2702,6 +3618,7 @@ static inline void ebpf_create_statistic_kernel_memory(int update_every) { ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_KERNEL_MEMORY, + "", "Memory allocated for hash tables.", "bytes", NETDATA_EBPF_FAMILY, @@ -2727,6 +3644,7 @@ static inline void ebpf_create_statistic_hash_tables(int update_every) { ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED, + "", "Number of hash tables loaded.", "hash tables", NETDATA_EBPF_FAMILY, @@ -2752,6 +3670,7 @@ static inline void ebpf_create_statistic_hash_per_core(int update_every) { ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_PER_CORE, + "", "How threads are loading hash/array tables.", "threads", NETDATA_EBPF_FAMILY, @@ -2781,6 +3700,7 @@ static void ebpf_create_statistic_hash_global_elements(int update_every) { ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS, + "", "Controllers inside global table", "rows", NETDATA_EBPF_FAMILY, @@ -2792,8 +3712,8 @@ static void ebpf_create_statistic_hash_global_elements(int update_every) int i; for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { - ebpf_write_global_dimension((char *)ebpf_modules[i].thread_name, - (char *)ebpf_modules[i].thread_name, + ebpf_write_global_dimension((char *)ebpf_modules[i].info.thread_name, + (char *)ebpf_modules[i].info.thread_name, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); } } @@ -2812,6 +3732,7 @@ static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, cha { ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, id, + "", title, "rows", NETDATA_EBPF_FAMILY, @@ -2824,9 +3745,9 @@ static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, cha int i; for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { ebpf_module_t *wem = &ebpf_modules[i]; - if (wem->apps_routine) - ebpf_write_global_dimension((char *)wem->thread_name, - (char *)wem->thread_name, + if (wem->functions.apps_routine) + ebpf_write_global_dimension((char *)wem->info.thread_name, + (char *)wem->info.thread_name, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); } } @@ -2850,15 +3771,63 @@ static void ebpf_create_statistic_charts(int update_every) create_charts = 0; - ebpf_create_statistic_thread_chart(update_every); + ebpf_create_thread_chart(NETDATA_EBPF_THREADS, + "Threads running.", + "boolean", + NETDATA_EBPF_ORDER_STAT_THREADS, + update_every, + NULL); + /* #ifdef NETDATA_DEV_MODE EBPF_PLUGIN_FUNCTIONS(EBPF_FUNCTION_THREAD, EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION); #endif - - ebpf_create_lifetime_thread_chart(update_every); + */ + + ebpf_create_thread_chart(NETDATA_EBPF_LIFE_TIME, + "Time remaining for thread.", + "seconds", + NETDATA_EBPF_ORDER_STAT_LIFE_TIME, + update_every, + NULL); + /* #ifdef NETDATA_DEV_MODE EBPF_PLUGIN_FUNCTIONS(EBPF_FUNCTION_THREAD, EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION); #endif + */ + + int i,j; + char name[256]; + for (i = 0, j = NETDATA_EBPF_ORDER_FUNCTION_PER_THREAD; i < EBPF_MODULE_FUNCTION_IDX; i++) { + ebpf_module_t *em = &ebpf_modules[i]; + if (!em->functions.fnct_routine) + continue; + + em->functions.order_thread_chart = j; + snprintfz(name, sizeof(name) - 1, "%s_%s", NETDATA_EBPF_THREADS, em->info.thread_name); + em->functions.fcnt_thread_chart_name = strdupz(name); + ebpf_create_thread_chart(name, + "Threads running.", + "boolean", + j++, + update_every, + em); +#ifdef NETDATA_DEV_MODE + EBPF_PLUGIN_FUNCTIONS(em->functions.fcnt_name, em->functions.fcnt_desc); +#endif + + em->functions.order_thread_lifetime = j; + snprintfz(name, sizeof(name) - 1, "%s_%s", NETDATA_EBPF_LIFE_TIME, em->info.thread_name); + em->functions.fcnt_thread_lifetime_name = strdupz(name); + ebpf_create_thread_chart(name, + "Time remaining for thread.", + "seconds", + j++, + update_every, + em); +#ifdef NETDATA_DEV_MODE + EBPF_PLUGIN_FUNCTIONS(em->functions.fcnt_name, em->functions.fcnt_desc); +#endif + } ebpf_create_statistic_load_chart(update_every); @@ -3013,7 +3982,7 @@ static void ebpf_kill_previous_process(char *filename, pid_t pid) */ void ebpf_pid_file(char *filename, size_t length) { - snprintfz(filename, length, "%s%s/ebpf.d/ebpf.pid", netdata_configured_host_prefix, ebpf_plugin_dir); + snprintfz(filename, length, "%s/var/run/ebpf.pid", netdata_configured_host_prefix); } /** @@ -3040,8 +4009,8 @@ static void ebpf_manage_pid(pid_t pid) static void ebpf_set_static_routine() { int i; - for (i = 0; ebpf_modules[i].thread_name; i++) { - ebpf_threads[i].start_routine = ebpf_modules[i].start_routine; + for (i = 0; ebpf_modules[i].info.thread_name; i++) { + ebpf_threads[i].start_routine = ebpf_modules[i].functions.start_routine; } } @@ -3055,8 +4024,9 @@ static void ebpf_manage_pid(pid_t pid) */ int main(int argc, char **argv) { - stderror = stderr; clocks_init(); + nd_log_initialize_for_external_plugins("ebpf.plugin"); + main_thread_id = gettid(); set_global_variables(); @@ -3066,16 +4036,6 @@ int main(int argc, char **argv) if (ebpf_check_conditions()) return 2; - // set name - program_name = "ebpf.plugin"; - - // disable syslog - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; - if (ebpf_adjust_memory_limit()) return 3; @@ -3095,7 +4055,7 @@ int main(int argc, char **argv) libbpf_set_strict_mode(LIBBPF_STRICT_ALL); #endif - read_local_addresses(); + ebpf_read_local_addresses_unsafe(); read_local_ports("/proc/net/tcp", IPPROTO_TCP); read_local_ports("/proc/net/tcp6", IPPROTO_TCP); read_local_ports("/proc/net/udp", IPPROTO_UDP); @@ -3116,13 +4076,13 @@ int main(int argc, char **argv) ebpf_module_t *em = &ebpf_modules[i]; em->thread = st; em->thread_id = i; - if (em->enabled) { + if (em->enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) { st->thread = mallocz(sizeof(netdata_thread_t)); em->enabled = NETDATA_THREAD_EBPF_RUNNING; em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME; netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em); } else { - em->enabled = NETDATA_THREAD_EBPF_NOT_RUNNING; + em->lifetime = EBPF_DEFAULT_LIFETIME; } } @@ -3133,7 +4093,7 @@ int main(int argc, char **argv) int update_apps_list = update_apps_every - 1; int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core; //Plugin will be killed when it receives a signal - for ( ; !ebpf_exit_plugin ; global_iterations_counter++) { + for ( ; !ebpf_plugin_exit; global_iterations_counter++) { (void)heartbeat_next(&hb, step); if (global_iterations_counter % EBPF_DEFAULT_UPDATE_EVERY == 0) { diff --git a/collectors/ebpf.plugin/ebpf.d/network.conf b/collectors/ebpf.plugin/ebpf.d/network.conf index 00cbf2e8ba07e8..99c32edc13bee8 100644 --- a/collectors/ebpf.plugin/ebpf.d/network.conf +++ b/collectors/ebpf.plugin/ebpf.d/network.conf @@ -26,6 +26,11 @@ # # The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6. # +# The `collect pid` option defines the PID stored inside hash tables and accepts the following options: +# `real parent`: Only stores real parent inside PID +# `parent` : Only stores parent PID. +# `all` : Stores all PIDs used by software. This is the most expensive option. +# # The `lifetime` defines the time length a thread will run when it is enabled by a function. # # Uncomment lines to define specific options for thread. @@ -35,12 +40,12 @@ # cgroups = no # update every = 10 bandwidth table size = 16384 - ipv4 connection table size = 16384 - ipv6 connection table size = 16384 + socket monitoring table size = 16384 udp connection table size = 4096 ebpf type format = auto - ebpf co-re tracing = trampoline + ebpf co-re tracing = probe maps per core = no + collect pid = all lifetime = 300 # @@ -49,11 +54,12 @@ # This is a feature with status WIP(Work in Progress) # [network connections] - maximum dimensions = 50 + enabled = yes resolve hostnames = no - resolve service names = no + resolve service names = yes ports = * - ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128 +# ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128 + ips = * hostnames = * [service name] diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h index 78e3a9252b73ad..ad7c5a94cddfdc 100644 --- a/collectors/ebpf.plugin/ebpf.h +++ b/collectors/ebpf.plugin/ebpf.h @@ -31,6 +31,7 @@ #include "daemon/main.h" #include "ebpf_apps.h" +#include "ebpf_functions.h" #include "ebpf_cgroup.h" #define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf" @@ -98,6 +99,26 @@ typedef struct netdata_error_report { int err; } netdata_error_report_t; +typedef struct netdata_ebpf_judy_pid { + ARAL *pid_table; + + // Index for PIDs + struct { // support for multiple indexing engines + Pvoid_t JudyLArray; // the hash table + RW_SPINLOCK rw_spinlock; // protect the index + } index; +} netdata_ebpf_judy_pid_t; + +typedef struct netdata_ebpf_judy_pid_stats { + char *cmdline; + + // Index for Socket timestamp + struct { // support for multiple indexing engines + Pvoid_t JudyLArray; // the hash table + RW_SPINLOCK rw_spinlock; // protect the index + } socket_stats; +} netdata_ebpf_judy_pid_stats_t; + extern ebpf_module_t ebpf_modules[]; enum ebpf_main_index { EBPF_MODULE_PROCESS_IDX, @@ -217,6 +238,7 @@ void ebpf_global_labels(netdata_syscall_stat_t *is, void ebpf_write_chart_cmd(char *type, char *id, + char *suffix, char *title, char *units, char *family, @@ -244,8 +266,6 @@ void ebpf_create_chart(char *type, int update_every, char *module); -void write_begin_chart(char *family, char *name); - void write_chart_dimension(char *dim, long long value); void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end); @@ -255,18 +275,47 @@ void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread); -void ebpf_create_charts_on_apps(char *name, - char *title, - char *units, - char *family, - char *charttype, - int order, - char *algorithm, - struct ebpf_target *root, - int update_every, - char *module); - -void write_end_chart(); +/** + * Create Chart labels + * + * @param name the label name. + * @param value the label value. + * @param origin the labeel source. + */ +static inline void ebpf_create_chart_labels(char *name, char *value, int source) +{ + fprintf(stdout, "CLABEL '%s' '%s' %d\n", name, value, source); +} + +/** + * Commit label + * + * Write commit label to stdout + */ +static inline void ebpf_commit_label() +{ + fprintf(stdout, "CLABEL_COMMIT\n"); +} + +/** + * Write begin command on standard output + * + * @param family the chart family name + * @param name the chart name + * @param metric the chart suffix (used with apps and cgroups) + */ +static inline void ebpf_write_begin_chart(char *family, char *name, char *metric) +{ + printf("BEGIN %s.%s%s\n", family, name, metric); +} + +/** + * Write END command on stdout. + */ +static inline void ebpf_write_end_chart() +{ + printf("END\n"); +} int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp); int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp); @@ -276,6 +325,9 @@ void ebpf_pid_file(char *filename, size_t length); #define EBPF_PROGRAMS_SECTION "ebpf programs" +#define EBPF_COMMON_DIMENSION_PERCENTAGE "%" +#define EBPF_PROGRAMS_SECTION "ebpf programs" + #define EBPF_COMMON_DIMENSION_PERCENTAGE "%" #define EBPF_COMMON_DIMENSION_CALL "calls/s" #define EBPF_COMMON_DIMENSION_CONNECTIONS "connections/s" @@ -313,7 +365,7 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root); void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1); collected_number get_value_from_structure(char *basis, size_t offset); void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em); -void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family, +void ebpf_write_chart_obsolete(char *type, char *id, char *suffix, char *title, char *units, char *family, char *charttype, char *context, int order, int update_every); void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end); void ebpf_update_disabled_plugin_stats(ebpf_module_t *em); @@ -322,10 +374,19 @@ void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe void ebpf_read_global_table_stats(netdata_idx_t *stats, netdata_idx_t *values, int map_fd, int maps_per_core, uint32_t begin, uint32_t end); +void **ebpf_judy_insert_unsafe(PPvoid_t arr, Word_t key); +netdata_ebpf_judy_pid_stats_t *ebpf_get_pid_from_judy_unsafe(PPvoid_t judy_array, uint32_t pid); + +void parse_network_viewer_section(struct config *cfg); +void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean); +void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean); +void ebpf_read_local_addresses_unsafe(); extern ebpf_filesystem_partitions_t localfs[]; extern ebpf_sync_syscalls_t local_syscalls[]; -extern int ebpf_exit_plugin; +extern bool ebpf_plugin_exit; +void ebpf_stop_threads(int sig); +extern netdata_ebpf_judy_pid_t ebpf_judy_pid; #define EBPF_MAX_SYNCHRONIZATION_TIME 300 diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c index c7c0cbbbb00d95..10c452267a8429 100644 --- a/collectors/ebpf.plugin/ebpf_apps.c +++ b/collectors/ebpf.plugin/ebpf_apps.c @@ -132,16 +132,6 @@ ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void) return target; } -/** - * eBPF socket release - * - * @param stat Release a target after usage. - */ -void ebpf_socket_release(ebpf_socket_publish_apps_t *stat) -{ - aral_freez(ebpf_aral_socket_pid, stat); -} - /***************************************************************** * * CACHESTAT ARAL FUNCTIONS @@ -375,58 +365,6 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid) return -1; } -/** - * Read socket statistic - * - * Read information from kernel ring to user ring. - * - * @param ep the table with all process stats values. - * @param fd the file descriptor mapped from kernel - * @param ef a pointer for the functions mapped from dynamic library - * @param pids the list of pids associated to a target. - * - * @return - */ -size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct ebpf_pid_on_target *pids) -{ - size_t count = 0; - while (pids) { - uint32_t current_pid = pids->pid; - if (!ebpf_read_hash_table(ep[current_pid], fd, current_pid)) - count++; - - pids = pids->next; - } - - return count; -} - -/** - * Read bandwidth statistic using hash table - * - * @param out the output tensor that will receive the information. - * @param fd the file descriptor that has the data - * @param bpf_map_lookup_elem a pointer for the function to read the data - * @param bpf_map_get_next_key a pointer fo the function to read the index. - */ -size_t read_bandwidth_statistic_using_hash_table(ebpf_bandwidth_t **out, int fd) -{ - size_t count = 0; - uint32_t key = 0; - uint32_t next_key = 0; - - while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { - ebpf_bandwidth_t *eps = out[next_key]; - if (!eps) { - eps = callocz(1, sizeof(ebpf_process_stat_t)); - out[next_key] = eps; - } - ebpf_read_hash_table(eps, fd, next_key); - } - - return count; -} - /***************************************************************** * * FUNCTIONS CALLED FROM COLLECTORS @@ -564,6 +502,13 @@ struct ebpf_target *get_apps_groups_target(struct ebpf_target **agrt, const char // copy the id strncpyz(w->name, nid, EBPF_MAX_NAME); + strncpyz(w->clean_name, w->name, EBPF_MAX_NAME); + netdata_fix_chart_name(w->clean_name); + for (char *d = w->clean_name; *d; d++) { + if (*d == '.') + *d = '_'; + } + strncpyz(w->compare, nid, EBPF_MAX_COMPARE_NAME); size_t len = strlen(w->compare); if (w->compare[len - 1] == '*') { @@ -887,6 +832,7 @@ static inline int read_proc_pid_cmdline(struct ebpf_pid_stat *p) { static char cmdline[MAX_CMDLINE + 1]; + int ret = 0; if (unlikely(!p->cmdline_filename)) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid); @@ -909,20 +855,23 @@ static inline int read_proc_pid_cmdline(struct ebpf_pid_stat *p) cmdline[i] = ' '; } - if (p->cmdline) - freez(p->cmdline); - p->cmdline = strdupz(cmdline); - debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline); - return 1; + ret = 1; cleanup: // copy the command to the command line if (p->cmdline) freez(p->cmdline); p->cmdline = strdupz(p->comm); - return 0; + + rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock); + netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(&ebpf_judy_pid.index.JudyLArray, p->pid); + if (pid_ptr) + pid_ptr->cmdline = p->cmdline; + rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock); + + return ret; } /** @@ -1238,6 +1187,24 @@ static inline void del_pid_entry(pid_t pid) freez(p->status_filename); freez(p->io_filename); freez(p->cmdline_filename); + + rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock); + netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(&ebpf_judy_pid.index.JudyLArray, p->pid); + if (pid_ptr) { + if (pid_ptr->socket_stats.JudyLArray) { + Word_t local_socket = 0; + Pvoid_t *socket_value; + bool first_socket = true; + while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_socket, &first_socket))) { + netdata_socket_plus_t *socket_clean = *socket_value; + aral_freez(aral_socket_table, socket_clean); + } + JudyLFreeArray(&pid_ptr->socket_stats.JudyLArray, PJE0); + } + JudyLDel(&ebpf_judy_pid.index.JudyLArray, p->pid, PJE0); + } + rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock); + freez(p->cmdline); ebpf_pid_stat_release(p); @@ -1279,12 +1246,6 @@ int get_pid_comm(pid_t pid, size_t n, char *dest) */ void cleanup_variables_from_other_threads(uint32_t pid) { - // Clean socket structures - if (socket_bandwidth_curr) { - ebpf_socket_release(socket_bandwidth_curr[pid]); - socket_bandwidth_curr[pid] = NULL; - } - // Clean cachestat structure if (cachestat_pid) { ebpf_cachestat_release(cachestat_pid[pid]); diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h index fc894a55fe44d4..2580915078c3ee 100644 --- a/collectors/ebpf.plugin/ebpf_apps.h +++ b/collectors/ebpf.plugin/ebpf_apps.h @@ -10,11 +10,13 @@ #include "libnetdata/ebpf/ebpf.h" #define NETDATA_APPS_FAMILY "apps" +#define NETDATA_APP_FAMILY "app" #define NETDATA_APPS_FILE_GROUP "file_access" +#define NETDATA_APPS_FILE_FDS "fds" #define NETDATA_APPS_FILE_CGROUP_GROUP "file_access (eBPF)" #define NETDATA_APPS_PROCESS_GROUP "process (eBPF)" #define NETDATA_APPS_NET_GROUP "net" -#define NETDATA_APPS_IPC_SHM_GROUP "ipc shm (eBPF)" +#define NETDATA_APPS_IPC_SHM_GROUP "ipc shm" #include "ebpf_process.h" #include "ebpf_dcstat.h" @@ -47,8 +49,10 @@ struct ebpf_target { char id[EBPF_MAX_NAME + 1]; uint32_t idhash; + uint32_t charts_created; char name[EBPF_MAX_NAME + 1]; + char clean_name[EBPF_MAX_NAME + 1]; // sanitized name used in chart id (need to replace at least dots) // Changes made to simplify integration between apps and eBPF. netdata_publish_cachestat_t cachestat; @@ -150,24 +154,6 @@ typedef struct ebpf_process_stat { uint8_t removeme; } ebpf_process_stat_t; -typedef struct ebpf_bandwidth { - uint32_t pid; - - uint64_t first; // First timestamp - uint64_t ct; // Last timestamp - uint64_t bytes_sent; // Bytes sent - uint64_t bytes_received; // Bytes received - uint64_t call_tcp_sent; // Number of times tcp_sendmsg was called - uint64_t call_tcp_received; // Number of times tcp_cleanup_rbuf was called - uint64_t retransmit; // Number of times tcp_retransmit was called - uint64_t call_udp_sent; // Number of times udp_sendmsg was called - uint64_t call_udp_received; // Number of times udp_recvmsg was called - uint64_t close; // Number of times tcp_close was called - uint64_t drop; // THIS IS NOT USED FOR WHILE, we are in groom section - uint32_t tcp_v4_connection; // Number of times tcp_v4_connection was called. - uint32_t tcp_v6_connection; // Number of times tcp_v6_connection was called. -} ebpf_bandwidth_t; - /** * Internal function used to write debug messages. * @@ -208,12 +194,6 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid); int get_pid_comm(pid_t pid, size_t n, char *dest); -size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep, - int fd, - struct ebpf_pid_on_target *pids); - -size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct ebpf_pid_on_target *pids); - void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core); void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core); @@ -242,7 +222,6 @@ extern ebpf_process_stat_t *process_stat_vector; extern ARAL *ebpf_aral_socket_pid; void ebpf_socket_aral_init(); ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void); -void ebpf_socket_release(ebpf_socket_publish_apps_t *stat); extern ARAL *ebpf_aral_cachestat_pid; void ebpf_cachestat_aral_init(); diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index affecdea2d4a44..d9f8f7b06b6e38 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -353,6 +353,7 @@ static void ebpf_obsolete_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART, + "", "Hit ratio", EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU, @@ -363,6 +364,7 @@ static void ebpf_obsolete_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART, + "", "Number of dirty pages", EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU, @@ -373,6 +375,7 @@ static void ebpf_obsolete_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_CHART, + "", "Number of accessed files", EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU, @@ -383,6 +386,7 @@ static void ebpf_obsolete_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_MISSES_CHART, + "", "Files out of page cache", EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU, @@ -425,6 +429,7 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART, + "", "Hit ratio", EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU, @@ -435,6 +440,7 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART, + "", "Number of dirty pages", EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU, @@ -445,6 +451,7 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART, + "", "Number of accessed files", EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU, @@ -455,6 +462,7 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART, + "", "Files out of page cache", EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU, @@ -473,44 +481,57 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em) */ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_CACHESTAT_HIT_RATIO_CHART, - "Hit ratio", - EBPF_COMMON_DIMENSION_PERCENTAGE, - NETDATA_CACHESTAT_SUBMENU, - NETDATA_EBPF_CHART_TYPE_LINE, - NULL, - 20090, - em->update_every); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_hit_ratio", + "Hit ratio", + EBPF_COMMON_DIMENSION_PERCENTAGE, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + "app.ebpf_cachestat_hit_ratio", + 20260, + update_every); - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_CHART, - "Number of accessed files", - EBPF_CACHESTAT_DIMENSION_HITS, - NETDATA_CACHESTAT_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20092, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_dirty_pages", + "Number of dirty pages", + EBPF_CACHESTAT_DIMENSION_PAGE, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_cachestat_dirty_pages", + 20261, + update_every); - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_CACHESTAT_MISSES_CHART, - "Files out of page cache", - EBPF_CACHESTAT_DIMENSION_MISSES, - NETDATA_CACHESTAT_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20093, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_access", + "Number of accessed files", + EBPF_CACHESTAT_DIMENSION_HITS, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_cachestat_access", + 20262, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_misses", + "Files out of page cache", + EBPF_CACHESTAT_DIMENSION_MISSES, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_cachestat_misses", + 20263, + update_every); + w->charts_created &= ~(1<update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); - - ebpf_create_charts_on_apps(NETDATA_CACHESTAT_DIRTY_CHART, - "Number of dirty pages", - EBPF_CACHESTAT_DIMENSION_PAGE, - NETDATA_CACHESTAT_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20091, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); - - ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_CHART, - "Number of accessed files", - EBPF_CACHESTAT_DIMENSION_HITS, - NETDATA_CACHESTAT_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20092, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); - - ebpf_create_charts_on_apps(NETDATA_CACHESTAT_MISSES_CHART, - "Files out of page cache", - EBPF_CACHESTAT_DIMENSION_MISSES, - NETDATA_CACHESTAT_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20093, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_hit_ratio", + "Hit ratio", + EBPF_COMMON_DIMENSION_PERCENTAGE, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + "app.ebpf_cachestat_hit_ratio", + 20260, + update_every, + NETDATA_EBPF_MODULE_NAME_CACHESTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION ratio '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_dirty_pages", + "Number of dirty pages", + EBPF_CACHESTAT_DIMENSION_PAGE, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + "app.ebpf_cachestat_dirty_pages", + 20261, + update_every, + NETDATA_EBPF_MODULE_NAME_CACHESTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION pages '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_access", + "Number of accessed files", + EBPF_CACHESTAT_DIMENSION_HITS, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_cachestat_access", + 20262, + update_every, + NETDATA_EBPF_MODULE_NAME_CACHESTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION hits '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_cachestat_misses", + "Files out of page cache", + EBPF_CACHESTAT_DIMENSION_MISSES, + NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_cachestat_misses", + 20263, + update_every, + NETDATA_EBPF_MODULE_NAME_CACHESTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION misses '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } @@ -939,53 +993,42 @@ void ebpf_cache_send_apps_data(struct ebpf_target *root) struct ebpf_target *w; collected_number value; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART); for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - ebpf_cachestat_sum_pids(&w->cachestat, w->root_pid); - netdata_cachestat_pid_t *current = &w->cachestat.current; - netdata_cachestat_pid_t *prev = &w->cachestat.prev; - - uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed; - uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty; - w->cachestat.dirty = mbd; - uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru; - uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied; - - cachestat_update_publish(&w->cachestat, mpa, mbd, apcl, apd); - value = (collected_number) w->cachestat.ratio; - // Here we are using different approach to have a chart more smooth - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + if (unlikely(!(w->charts_created & (1<next) { - if (unlikely(w->exposed && w->processes)) { - value = (collected_number) w->cachestat.dirty; - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + ebpf_cachestat_sum_pids(&w->cachestat, w->root_pid); + netdata_cachestat_pid_t *current = &w->cachestat.current; + netdata_cachestat_pid_t *prev = &w->cachestat.prev; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = (collected_number) w->cachestat.hit; - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed; + uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty; + w->cachestat.dirty = mbd; + uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru; + uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_MISSES_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = (collected_number) w->cachestat.miss; - write_chart_dimension(w->name, value); - } + cachestat_update_publish(&w->cachestat, mpa, mbd, apcl, apd); + + value = (collected_number) w->cachestat.ratio; + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_hit_ratio"); + write_chart_dimension("ratio", value); + ebpf_write_end_chart(); + + value = (collected_number) w->cachestat.dirty; + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_dirty_pages"); + write_chart_dimension("pages", value); + ebpf_write_end_chart(); + + value = (collected_number) w->cachestat.hit; + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_access"); + write_chart_dimension("hits", value); + ebpf_write_end_chart(); + + value = (collected_number) w->cachestat.miss; + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_misses"); + write_chart_dimension("misses", value); + ebpf_write_end_chart(); } - write_end_chart(); } /** @@ -1087,37 +1130,37 @@ static void ebpf_send_systemd_cachestat_charts() { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_cachestat.ratio); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_cachestat.dirty); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_cachestat.hit); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_MISSES_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_MISSES_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_cachestat.miss); } } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -1127,21 +1170,21 @@ static void ebpf_send_systemd_cachestat_charts() */ static void ebpf_send_specific_cachestat_data(char *type, netdata_publish_cachestat_t *npc) { - write_begin_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART); + ebpf_write_begin_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART, ""); write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_RATIO].name, (long long)npc->ratio); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_CACHESTAT_DIRTY_CHART); + ebpf_write_begin_chart(type, NETDATA_CACHESTAT_DIRTY_CHART, ""); write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY].name, (long long)npc->dirty); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_CACHESTAT_HIT_CHART); + ebpf_write_begin_chart(type, NETDATA_CACHESTAT_HIT_CHART, ""); write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT].name, (long long)npc->hit); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_CACHESTAT_MISSES_CHART); + ebpf_write_begin_chart(type, NETDATA_CACHESTAT_MISSES_CHART, ""); write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS].name, (long long)npc->miss); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -1201,24 +1244,28 @@ static void ebpf_create_specific_cachestat_charts(char *type, int update_every) static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every) { ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_RATIO_CHART, + "", "Hit ratio", EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200, update_every); ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_DIRTY_CHART, + "", "Number of dirty pages", EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201, update_every); ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_CHART, + "", "Number of accessed files", EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202, update_every); ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_MISSES_CHART, + "", "Files out of page cache", EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT, @@ -1288,10 +1335,10 @@ static void cachestat_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -1479,7 +1526,7 @@ static int ebpf_cachestat_load_bpf(ebpf_module_t *em) #endif if (ret) - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c index fd4e783db1754f..1aadfbaf83c350 100644 --- a/collectors/ebpf.plugin/ebpf_cgroup.c +++ b/collectors/ebpf.plugin/ebpf_cgroup.c @@ -331,7 +331,7 @@ void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *fam char *algorithm, char *context, char *module, int update_every) { ebpf_cgroup_target_t *w; - ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, title, units, family, charttype, context, + ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, "", title, units, family, charttype, context, order, update_every, module); for (w = ebpf_cgroup_pids; w; w = w->next) { @@ -373,7 +373,7 @@ void *ebpf_cgroup_integration(void *ptr) heartbeat_t hb; heartbeat_init(&hb); //Plugin will be killed when it receives a signal - while (!ebpf_exit_plugin) { + while (!ebpf_plugin_exit) { (void)heartbeat_next(&hb, step); // We are using a small heartbeat time to wake up thread, diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h index 6620ea10a3b3b7..ba8346934fccb2 100644 --- a/collectors/ebpf.plugin/ebpf_cgroup.h +++ b/collectors/ebpf.plugin/ebpf_cgroup.h @@ -21,7 +21,7 @@ struct pid_on_target2 { ebpf_process_stat_t ps; netdata_dcstat_pid_t dc; netdata_publish_shm_t shm; - ebpf_bandwidth_t socket; + netdata_socket_t socket; netdata_cachestat_pid_t cachestat; struct pid_on_target2 *next; diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index feb935b93add81..4ff6c82ab434b0 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -302,6 +302,7 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART, + "", "Percentage of files inside directory cache", EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU, @@ -312,6 +313,7 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_DC_REFERENCE_CHART, + "", "Count file access", EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, @@ -322,6 +324,7 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART, + "", "Files not present inside directory cache", EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, @@ -332,6 +335,7 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART, + "", "Files not found", EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, @@ -372,45 +376,58 @@ static inline void ebpf_obsolete_dc_cgroup_charts(ebpf_module_t *em) { */ void ebpf_obsolete_dc_apps_charts(struct ebpf_module *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_DC_HIT_CHART, - "Percentage of files inside directory cache", - EBPF_COMMON_DIMENSION_PERCENTAGE, - NETDATA_DIRECTORY_CACHE_SUBMENU, - NETDATA_EBPF_CHART_TYPE_LINE, - NULL, - 20100, - em->update_every); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_dc_hit", + "Percentage of files inside directory cache.", + EBPF_COMMON_DIMENSION_PERCENTAGE, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + "app.ebpf_dc_hit", + 20265, + update_every); - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_DC_REQUEST_NOT_CACHE_CHART, - "Files not present inside directory cache", - EBPF_COMMON_DIMENSION_FILES, - NETDATA_DIRECTORY_CACHE_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20102, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_dc_reference", + "Count file access.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_dc_reference", + 20266, + update_every); - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_DC_REQUEST_NOT_FOUND_CHART, - "Files not found", - EBPF_COMMON_DIMENSION_FILES, - NETDATA_DIRECTORY_CACHE_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20103, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_not_cache", + "Files not present inside directory cache.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_dc_not_cache", + 20267, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_not_found", + "Files not found.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_dc_not_found", + 20268, + update_every); + + w->charts_created &= ~(1<update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); - - ebpf_create_charts_on_apps(NETDATA_DC_REFERENCE_CHART, - "Count file access", - EBPF_COMMON_DIMENSION_FILES, - NETDATA_DIRECTORY_CACHE_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20101, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); - - ebpf_create_charts_on_apps(NETDATA_DC_REQUEST_NOT_CACHE_CHART, - "Files not present inside directory cache", - EBPF_COMMON_DIMENSION_FILES, - NETDATA_DIRECTORY_CACHE_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20102, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); - - ebpf_create_charts_on_apps(NETDATA_DC_REQUEST_NOT_FOUND_CHART, - "Files not found", - EBPF_COMMON_DIMENSION_FILES, - NETDATA_DIRECTORY_CACHE_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20103, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_dc_hit", + "Percentage of files inside directory cache.", + EBPF_COMMON_DIMENSION_PERCENTAGE, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + "app.ebpf_dc_hit", + 20265, + update_every, + NETDATA_EBPF_MODULE_NAME_DCSTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION ratio '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_dc_reference", + "Count file access.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_dc_reference", + 20266, + update_every, + NETDATA_EBPF_MODULE_NAME_DCSTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION files '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_not_cache", + "Files not present inside directory cache.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_dc_not_cache", + 20267, + update_every, + NETDATA_EBPF_MODULE_NAME_DCSTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION files '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_not_found", + "Files not found.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_dc_not_found", + 20268, + update_every, + NETDATA_EBPF_MODULE_NAME_DCSTAT); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION files '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } @@ -746,64 +798,53 @@ void ebpf_dcache_send_apps_data(struct ebpf_target *root) struct ebpf_target *w; collected_number value; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_DC_HIT_CHART); for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - ebpf_dcstat_sum_pids(&w->dcstat, w->root_pid); + if (unlikely(!(w->charts_created & (1<dcstat.curr.cache_access; - uint64_t not_found = w->dcstat.curr.not_found; + ebpf_dcstat_sum_pids(&w->dcstat, w->root_pid); - dcstat_update_publish(&w->dcstat, cache, not_found); - value = (collected_number) w->dcstat.ratio; - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + uint64_t cache = w->dcstat.curr.cache_access; + uint64_t not_found = w->dcstat.curr.not_found; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_DC_REFERENCE_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - if (w->dcstat.curr.cache_access < w->dcstat.prev.cache_access) { - w->dcstat.prev.cache_access = 0; - } + dcstat_update_publish(&w->dcstat, cache, not_found); + + value = (collected_number) w->dcstat.ratio; + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_dc_hit"); + write_chart_dimension("ratio", value); + ebpf_write_end_chart(); - w->dcstat.cache_access = (long long)w->dcstat.curr.cache_access - (long long)w->dcstat.prev.cache_access; - value = (collected_number) w->dcstat.cache_access; - write_chart_dimension(w->name, value); - w->dcstat.prev.cache_access = w->dcstat.curr.cache_access; + if (w->dcstat.curr.cache_access < w->dcstat.prev.cache_access) { + w->dcstat.prev.cache_access = 0; } - } - write_end_chart(); + w->dcstat.cache_access = (long long)w->dcstat.curr.cache_access - (long long)w->dcstat.prev.cache_access; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - if (w->dcstat.curr.file_system < w->dcstat.prev.file_system) { - w->dcstat.prev.file_system = 0; - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_dc_reference"); + value = (collected_number) w->dcstat.cache_access; + write_chart_dimension("files", value); + ebpf_write_end_chart(); + w->dcstat.prev.cache_access = w->dcstat.curr.cache_access; - value = (collected_number) (!w->dcstat.cache_access) ? 0 : - (long long )w->dcstat.curr.file_system - (long long)w->dcstat.prev.file_system; - write_chart_dimension(w->name, value); - w->dcstat.prev.file_system = w->dcstat.curr.file_system; + if (w->dcstat.curr.file_system < w->dcstat.prev.file_system) { + w->dcstat.prev.file_system = 0; } - } - write_end_chart(); - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - if (w->dcstat.curr.not_found < w->dcstat.prev.not_found) { - w->dcstat.prev.not_found = 0; - } - value = (collected_number) (!w->dcstat.cache_access) ? 0 : - (long long)w->dcstat.curr.not_found - (long long)w->dcstat.prev.not_found; - write_chart_dimension(w->name, value); - w->dcstat.prev.not_found = w->dcstat.curr.not_found; + value = (collected_number) (!w->dcstat.cache_access) ? 0 : + (long long )w->dcstat.curr.file_system - (long long)w->dcstat.prev.file_system; + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_not_cache"); + write_chart_dimension("files", value); + ebpf_write_end_chart(); + w->dcstat.prev.file_system = w->dcstat.curr.file_system; + + if (w->dcstat.curr.not_found < w->dcstat.prev.not_found) { + w->dcstat.prev.not_found = 0; } + value = (collected_number) (!w->dcstat.cache_access) ? 0 : + (long long)w->dcstat.curr.not_found - (long long)w->dcstat.prev.not_found; + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_not_found"); + write_chart_dimension("files", value); + ebpf_write_end_chart(); + w->dcstat.prev.not_found = w->dcstat.curr.not_found; } - write_end_chart(); } /** @@ -898,24 +939,28 @@ static void ebpf_create_specific_dc_charts(char *type, int update_every) static void ebpf_obsolete_specific_dc_charts(char *type, int update_every) { ebpf_write_chart_obsolete(type, NETDATA_DC_HIT_CHART, + "", "Percentage of files inside directory cache", EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5700, update_every); ebpf_write_chart_obsolete(type, NETDATA_DC_REFERENCE_CHART, + "", "Count file access", EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_REFERENCE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5701, update_every); ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART, + "", "Files not present inside directory cache", EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5702, update_every); ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART, + "", "Files not found", EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT, @@ -1029,23 +1074,23 @@ static void ebpf_send_systemd_dc_charts() { collected_number value; ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_dc.ratio); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REFERENCE_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REFERENCE_CHART, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_dc.cache_access); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { value = (collected_number) (!ect->publish_dc.cache_access) ? 0 : @@ -1055,9 +1100,9 @@ static void ebpf_send_systemd_dc_charts() write_chart_dimension(ect->name, (long long) value); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { value = (collected_number) (!ect->publish_dc.cache_access) ? 0 : @@ -1068,7 +1113,7 @@ static void ebpf_send_systemd_dc_charts() write_chart_dimension(ect->name, (long long) value); } } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -1080,31 +1125,31 @@ static void ebpf_send_systemd_dc_charts() static void ebpf_send_specific_dc_data(char *type, netdata_publish_dcstat_t *pdc) { collected_number value; - write_begin_chart(type, NETDATA_DC_HIT_CHART); + ebpf_write_begin_chart(type, NETDATA_DC_HIT_CHART, ""); write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_RATIO].name, (long long) pdc->ratio); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_DC_REFERENCE_CHART); + ebpf_write_begin_chart(type, NETDATA_DC_REFERENCE_CHART, ""); write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE].name, (long long) pdc->cache_access); - write_end_chart(); + ebpf_write_end_chart(); value = (collected_number) (!pdc->cache_access) ? 0 : (long long )pdc->curr.file_system - (long long)pdc->prev.file_system; pdc->prev.file_system = pdc->curr.file_system; - write_begin_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART); + ebpf_write_begin_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART, ""); write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_SLOW].name, (long long) value); - write_end_chart(); + ebpf_write_end_chart(); value = (collected_number) (!pdc->cache_access) ? 0 : (long long)pdc->curr.not_found - (long long)pdc->prev.not_found; pdc->prev.not_found = pdc->curr.not_found; - write_begin_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART); + ebpf_write_begin_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART, ""); write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_MISS].name, (long long) value); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -1169,10 +1214,10 @@ static void dcstat_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -1311,7 +1356,7 @@ static int ebpf_dcstat_load_bpf(ebpf_module_t *em) #endif if (ret) - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h index 845b65908c2700..4d6aff12e8fa63 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.h +++ b/collectors/ebpf.plugin/ebpf_dcstat.h @@ -13,7 +13,7 @@ #define NETDATA_DC_REQUEST_NOT_CACHE_CHART "dc_not_cache" #define NETDATA_DC_REQUEST_NOT_FOUND_CHART "dc_not_found" -#define NETDATA_DIRECTORY_CACHE_SUBMENU "directory cache (eBPF)" +#define NETDATA_DIRECTORY_CACHE_SUBMENU "directory cache" // configuration file #define NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE "dcstat.conf" diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c index 87945627095bad..466c2e3bb47b2f 100644 --- a/collectors/ebpf.plugin/ebpf_disk.c +++ b/collectors/ebpf.plugin/ebpf_disk.c @@ -485,6 +485,7 @@ static void ebpf_obsolete_disk_global(ebpf_module_t *em) if (flags & NETDATA_DISK_CHART_CREATED) { ebpf_write_chart_obsolete(ned->histogram.name, ned->family, + "", "Disk latency", EBPF_COMMON_DIMENSION_CALL, ned->family, @@ -655,7 +656,7 @@ static void read_hard_disk_tables(int table, int maps_per_core) */ static void ebpf_obsolete_hd_charts(netdata_ebpf_disks_t *w, int update_every) { - ebpf_write_chart_obsolete(w->histogram.name, w->family, w->histogram.title, EBPF_COMMON_DIMENSION_CALL, + ebpf_write_chart_obsolete(w->histogram.name, w->family, "", w->histogram.title, EBPF_COMMON_DIMENSION_CALL, w->family, NETDATA_EBPF_CHART_TYPE_STACKED, "disk.latency_io", w->histogram.order, update_every); @@ -778,10 +779,10 @@ static void disk_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -873,7 +874,7 @@ static int ebpf_disk_load_bpf(ebpf_module_t *em) #endif if (ret) - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c index f039647a1df8aa..3c8f30d3eaac75 100644 --- a/collectors/ebpf.plugin/ebpf_fd.c +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -386,45 +386,49 @@ static void ebpf_obsolete_fd_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN, + "", "Number of open files", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, NETDATA_CGROUP_FD_OPEN_CONTEXT, - 20061, + 20270, em->update_every); if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, + "", "Fails to open files", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT, - 20062, + 20271, em->update_every); } ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED, + "", "Files closed", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, NETDATA_CGROUP_FD_CLOSE_CONTEXT, - 20063, + 20272, em->update_every); if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, + "", "Fails to close files", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT, - 20064, + 20273, em->update_every); } } @@ -460,48 +464,60 @@ static inline void ebpf_obsolete_fd_cgroup_charts(ebpf_module_t *em) { */ void ebpf_obsolete_fd_apps_charts(struct ebpf_module *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_FILE_OPEN, - "Number of open files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20061, - em->update_every); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, - "Fails to open files", + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_file_open", + "Number of open files", EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, + NETDATA_APPS_FILE_FDS, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20062, - em->update_every); - } - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_FILE_CLOSED, - "Files closed", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20063, - em->update_every); + "app.ebpf_file_open", + 20220, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_file_open_error", + "Fails to open files.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_FDS, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_file_open_error", + 20221, + update_every); + } - if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, - "Fails to close files", + w->clean_name, + "_ebpf_file_closed", + "Files closed.", EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, + NETDATA_APPS_FILE_FDS, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20064, - em->update_every); + "app.ebpf_file_closed", + 20222, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, + w->clean_name, + "_ebpf_file_close_error", + "Fails to close files.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_FDS, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_fd_close_error", + 20223, + update_every); + } + w->charts_created &= ~(1<mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_FILE_OPEN_ERR_COUNT, + "", "Open fails", EBPF_COMMON_DIMENSION_CALL, NETDATA_FILE_GROUP, @@ -802,45 +820,30 @@ void ebpf_fd_send_apps_data(ebpf_module_t *em, struct ebpf_target *root) { struct ebpf_target *w; for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - ebpf_fd_sum_pids(&w->fd, w->root_pid); - } - } + if (unlikely(!(w->charts_created & (1<next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->fd.open_call); - } - } - write_end_chart(); + ebpf_fd_sum_pids(&w->fd, w->root_pid); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->fd.open_err); - } - } - write_end_chart(); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_open"); + write_chart_dimension("calls", w->fd.open_call); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->fd.close_call); + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_open_error"); + write_chart_dimension("calls", w->fd.open_err); + ebpf_write_end_chart(); } - } - write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->fd.close_err); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_closed"); + write_chart_dimension("calls", w->fd.close_call); + ebpf_write_end_chart(); + + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_close_error"); + write_chart_dimension("calls", w->fd.close_err); + ebpf_write_end_chart(); } - write_end_chart(); } } @@ -933,25 +936,25 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em) */ static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "", "Number of open files", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "", "Fails to open files", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401, em->update_every); } - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "", "Files closed", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "", "Fails to close files", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403, em->update_every); @@ -968,24 +971,24 @@ static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em) */ static void ebpf_send_specific_fd_data(char *type, netdata_fd_stat_t *values, ebpf_module_t *em) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN, ""); write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, ""); write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_err); - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, ""); write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, ""); write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_err); - write_end_chart(); + ebpf_write_end_chart(); } } @@ -1037,40 +1040,40 @@ static void ebpf_create_systemd_fd_charts(ebpf_module_t *em) static void ebpf_send_systemd_fd_charts(ebpf_module_t *em) { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_fd.open_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_fd.open_err); } } - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_fd.close_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_fd.close_err); } } - write_end_chart(); + ebpf_write_end_chart(); } } @@ -1136,10 +1139,10 @@ static void fd_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -1197,44 +1200,77 @@ static void fd_collector(ebpf_module_t *em) void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr) { struct ebpf_target *root = ptr; - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN, - "Number of open files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20061, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, - "Fails to open files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20062, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD); - } + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_file_open", + "Number of open files", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_FDS, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_file_open", + 20220, + update_every, + NETDATA_EBPF_MODULE_NAME_FD); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_file_open_error", + "Fails to open files.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_FDS, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_file_open_error", + 20221, + update_every, + NETDATA_EBPF_MODULE_NAME_FD); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED, - "Files closed", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20063, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_file_closed", + "Files closed.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_FDS, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_file_closed", + 20222, + update_every, + NETDATA_EBPF_MODULE_NAME_FD); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_file_close_error", + "Fails to close files.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_FDS, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_file_close_error", + 20223, + update_every, + NETDATA_EBPF_MODULE_NAME_FD); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, - "Fails to close files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20064, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD); + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; @@ -1337,7 +1373,7 @@ static int ebpf_fd_load_bpf(ebpf_module_t *em) #endif if (ret) - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c index 2bff738caedd76..b78e6553243946 100644 --- a/collectors/ebpf.plugin/ebpf_filesystem.c +++ b/collectors/ebpf.plugin/ebpf_filesystem.c @@ -351,20 +351,22 @@ static void ebpf_obsolete_fs_charts(int update_every) flags &= ~NETDATA_FILESYSTEM_FLAG_CHART_CREATED; ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hread.name, + "", efp->hread.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hread.order, update_every); ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name, + "", efp->hwrite.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hwrite.order, update_every); - ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, efp->hopen.title, + ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, "", efp->hopen.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hopen.order, update_every); - ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, efp->hadditional.title, + ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name,"", efp->hadditional.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hadditional.order, update_every); @@ -390,9 +392,9 @@ static void ebpf_create_fs_charts(int update_every) ebpf_filesystem_partitions_t *efp = &localfs[i]; uint32_t flags = efp->flags; if (flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION && !(flags & test)) { - snprintfz(title, 255, "%s latency for each read request.", efp->filesystem); - snprintfz(family, 63, "%s_latency", efp->family); - snprintfz(chart_name, 63, "%s_read_latency", efp->filesystem); + snprintfz(title, sizeof(title) - 1, "%s latency for each read request.", efp->filesystem); + snprintfz(family, sizeof(family) - 1, "%s_latency", efp->family); + snprintfz(chart_name, sizeof(chart_name) - 1, "%s_read_latency", efp->filesystem); efp->hread.name = strdupz(chart_name); efp->hread.title = strdupz(title); efp->hread.ctx = NULL; @@ -408,8 +410,8 @@ static void ebpf_create_fs_charts(int update_every) update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM); order++; - snprintfz(title, 255, "%s latency for each write request.", efp->filesystem); - snprintfz(chart_name, 63, "%s_write_latency", efp->filesystem); + snprintfz(title, sizeof(title) - 1, "%s latency for each write request.", efp->filesystem); + snprintfz(chart_name, sizeof(chart_name) - 1, "%s_write_latency", efp->filesystem); efp->hwrite.name = strdupz(chart_name); efp->hwrite.title = strdupz(title); efp->hwrite.ctx = NULL; @@ -423,8 +425,8 @@ static void ebpf_create_fs_charts(int update_every) update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM); order++; - snprintfz(title, 255, "%s latency for each open request.", efp->filesystem); - snprintfz(chart_name, 63, "%s_open_latency", efp->filesystem); + snprintfz(title, sizeof(title) - 1, "%s latency for each open request.", efp->filesystem); + snprintfz(chart_name, sizeof(chart_name) - 1, "%s_open_latency", efp->filesystem); efp->hopen.name = strdupz(chart_name); efp->hopen.title = strdupz(title); efp->hopen.ctx = NULL; @@ -439,9 +441,9 @@ static void ebpf_create_fs_charts(int update_every) order++; char *type = (efp->flags & NETDATA_FILESYSTEM_ATTR_CHARTS) ? "attribute" : "sync"; - snprintfz(title, 255, "%s latency for each %s request.", efp->filesystem, type); - snprintfz(chart_name, 63, "%s_%s_latency", efp->filesystem, type); - snprintfz(ctx, 63, "filesystem.%s_latency", type); + snprintfz(title, sizeof(title) - 1, "%s latency for each %s request.", efp->filesystem, type); + snprintfz(chart_name, sizeof(chart_name) - 1, "%s_%s_latency", efp->filesystem, type); + snprintfz(ctx, sizeof(ctx) - 1, "filesystem.%s_latency", type); efp->hadditional.name = strdupz(chart_name); efp->hadditional.title = strdupz(title); efp->hadditional.ctx = strdupz(ctx); @@ -470,12 +472,12 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em) { pthread_mutex_lock(&lock); int i; - const char *saved_name = em->thread_name; + const char *saved_name = em->info.thread_name; uint64_t kernels = em->kernels; for (i = 0; localfs[i].filesystem; i++) { ebpf_filesystem_partitions_t *efp = &localfs[i]; if (!efp->probe_links && efp->flags & NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM) { - em->thread_name = efp->filesystem; + em->info.thread_name = efp->filesystem; em->kernels = efp->kernels; em->maps = efp->fs_maps; #ifdef LIBBPF_MAJOR_VERSION @@ -484,7 +486,7 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em) if (em->load & EBPF_LOAD_LEGACY) { efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &efp->objects); if (!efp->probe_links) { - em->thread_name = saved_name; + em->info.thread_name = saved_name; em->kernels = kernels; em->maps = NULL; pthread_mutex_unlock(&lock); @@ -495,7 +497,7 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em) else { efp->fs_obj = filesystem_bpf__open(); if (!efp->fs_obj) { - em->thread_name = saved_name; + em->info.thread_name = saved_name; em->kernels = kernels; return -1; } else { @@ -515,7 +517,7 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em) } efp->flags &= ~NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM; } - em->thread_name = saved_name; + em->info.thread_name = saved_name; pthread_mutex_unlock(&lock); em->kernels = kernels; em->maps = NULL; @@ -671,6 +673,7 @@ static void ebpf_obsolete_filesystem_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hread.name, + "", efp->hread.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, @@ -681,6 +684,7 @@ static void ebpf_obsolete_filesystem_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name, + "", efp->hwrite.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, @@ -691,6 +695,7 @@ static void ebpf_obsolete_filesystem_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, + "", efp->hopen.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, @@ -701,6 +706,7 @@ static void ebpf_obsolete_filesystem_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, + "", efp->hadditional.title, EBPF_COMMON_DIMENSION_CALL, efp->family_name, @@ -909,10 +915,10 @@ static void filesystem_collector(ebpf_module_t *em) int counter = update_every - 1; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; diff --git a/collectors/ebpf.plugin/ebpf_functions.c b/collectors/ebpf.plugin/ebpf_functions.c index 7a43692bc051d7..6a481ad64f2839 100644 --- a/collectors/ebpf.plugin/ebpf_functions.c +++ b/collectors/ebpf.plugin/ebpf_functions.c @@ -3,6 +3,40 @@ #include "ebpf.h" #include "ebpf_functions.h" +/***************************************************************** + * EBPF FUNCTION COMMON + *****************************************************************/ + +/** + * Function Start thread + * + * Start a specific thread after user request. + * + * @param em The structure with thread information + * @param period + * @return + */ +static int ebpf_function_start_thread(ebpf_module_t *em, int period) +{ + struct netdata_static_thread *st = em->thread; + // another request for thread that already ran, cleanup and restart + if (st->thread) + freez(st->thread); + + if (period <= 0) + period = EBPF_DEFAULT_LIFETIME; + + st->thread = mallocz(sizeof(netdata_thread_t)); + em->enabled = NETDATA_THREAD_EBPF_FUNCTION_RUNNING; + em->lifetime = period; + +#ifdef NETDATA_INTERNAL_CHECKS + netdata_log_info("Starting thread %s with lifetime = %d", em->info.thread_name, period); +#endif + + return netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em); +} + /***************************************************************** * EBPF SELECT MODULE *****************************************************************/ @@ -13,17 +47,17 @@ * @param thread_name name of the thread we are looking for. * * @return it returns a pointer for the module that has thread_name on success or NULL otherwise. - */ ebpf_module_t *ebpf_functions_select_module(const char *thread_name) { int i; for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { - if (strcmp(ebpf_modules[i].thread_name, thread_name) == 0) { + if (strcmp(ebpf_modules[i].info.thread_name, thread_name) == 0) { return &ebpf_modules[i]; } } return NULL; } + */ /***************************************************************** * EBPF HELP FUNCTIONS @@ -35,11 +69,9 @@ ebpf_module_t *ebpf_functions_select_module(const char *thread_name) { * Shows help with all options accepted by thread function. * * @param transaction the transaction id that Netdata sent for this function execution -*/ static void ebpf_function_thread_manipulation_help(const char *transaction) { - pthread_mutex_lock(&lock); - pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600); - fprintf(stdout, "%s", + BUFFER *wb = buffer_create(0, NULL); + buffer_sprintf(wb, "%s", "ebpf.plugin / thread\n" "\n" "Function `thread` allows user to control eBPF threads.\n" @@ -57,13 +89,13 @@ static void ebpf_function_thread_manipulation_help(const char *transaction) { " Disable a sp.\n" "\n" "Filters can be combined. Each filter can be given only one time.\n" - "Process thread is not controlled by functions until we finish the creation of functions per thread..\n" ); - pluginsd_function_result_end_to_stdout(); - fflush(stdout); - pthread_mutex_unlock(&lock); -} + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + + buffer_free(wb); +} +*/ /***************************************************************** * EBPF ERROR FUNCTIONS @@ -79,12 +111,7 @@ static void ebpf_function_thread_manipulation_help(const char *transaction) { * @param msg the error message */ static void ebpf_function_error(const char *transaction, int code, const char *msg) { - char buffer[PLUGINSD_LINE_MAX + 1]; - json_escape_string(buffer, msg, PLUGINSD_LINE_MAX); - - pluginsd_function_result_begin_to_stdout(transaction, code, "application/json", now_realtime_sec()); - fprintf(stdout, "{\"status\":%d,\"error_message\":\"%s\"}", code, buffer); - pluginsd_function_result_end_to_stdout(); + pluginsd_function_json_error_to_stdout(transaction, code, msg); } /***************************************************************** @@ -92,7 +119,7 @@ static void ebpf_function_error(const char *transaction, int code, const char *m *****************************************************************/ /** - * Function enable + * Function: thread * * Enable a specific thread. * @@ -102,7 +129,6 @@ static void ebpf_function_error(const char *transaction, int code, const char *m * @param line_max Number of arguments given * @param timeout The function timeout * @param em The structure with thread information - */ static void ebpf_function_thread_manipulation(const char *transaction, char *function __maybe_unused, char *line_buffer __maybe_unused, @@ -134,34 +160,22 @@ static void ebpf_function_thread_manipulation(const char *transaction, lem = ebpf_functions_select_module(thread_name); if (!lem) { - snprintfz(message, 511, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name); + snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name); ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message); return; } pthread_mutex_lock(&ebpf_exit_cleanup); if (lem->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) { - struct netdata_static_thread *st = lem->thread; // Load configuration again ebpf_update_module(lem, default_btf, running_on_kernel, isrh); - // another request for thread that already ran, cleanup and restart - if (st->thread) - freez(st->thread); - - if (period <= 0) - period = EBPF_DEFAULT_LIFETIME; - - st->thread = mallocz(sizeof(netdata_thread_t)); - lem->enabled = NETDATA_THREAD_EBPF_FUNCTION_RUNNING; - lem->lifetime = period; - -#ifdef NETDATA_INTERNAL_CHECKS - netdata_log_info("Starting thread %s with lifetime = %d", thread_name, period); -#endif - - netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, - st->start_routine, lem); + if (ebpf_function_start_thread(lem, period)) { + ebpf_function_error(transaction, + HTTP_RESP_INTERNAL_SERVER_ERROR, + "Cannot start thread."); + return; + } } else { lem->running_time = 0; if (period > 0) // user is modifying period to run @@ -175,7 +189,7 @@ static void ebpf_function_thread_manipulation(const char *transaction, const char *name = &keyword[sizeof(EBPF_THREADS_DISABLE_CATEGORY) - 1]; lem = ebpf_functions_select_module(name); if (!lem) { - snprintfz(message, 511, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name); + snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name); ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message); return; } @@ -191,7 +205,7 @@ static void ebpf_function_thread_manipulation(const char *transaction, const char *name = &keyword[sizeof(EBPF_THREADS_SELECT_THREAD) - 1]; lem = ebpf_functions_select_module(name); if (!lem) { - snprintfz(message, 511, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name); + snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name); ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message); return; } @@ -226,10 +240,10 @@ static void ebpf_function_thread_manipulation(const char *transaction, // THE ORDER SHOULD BE THE SAME WITH THE FIELDS! // thread name - buffer_json_add_array_item_string(wb, wem->thread_name); + buffer_json_add_array_item_string(wb, wem->info.thread_name); // description - buffer_json_add_array_item_string(wb, wem->thread_description); + buffer_json_add_array_item_string(wb, wem->info.thread_description); // Either it is not running or received a disabled signal and it is stopping. if (wem->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING || (!wem->lifetime && (int)wem->running_time == wem->update_every)) { @@ -267,7 +281,7 @@ static void ebpf_function_thread_manipulation(const char *transaction, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_UNIQUE_KEY, NULL); buffer_rrdf_table_add_field(wb, fields_id++, "Description", "Thread Desc", RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, @@ -349,19 +363,697 @@ static void ebpf_function_thread_manipulation(const char *transaction, buffer_json_finalize(wb); // Lock necessary to avoid race condition - pthread_mutex_lock(&lock); + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires, wb); + + buffer_free(wb); +} + */ + +/***************************************************************** + * EBPF SOCKET FUNCTION + *****************************************************************/ + +/** + * Thread Help + * + * Shows help with all options accepted by thread function. + * + * @param transaction the transaction id that Netdata sent for this function execution +*/ +static void ebpf_function_socket_help(const char *transaction) { + pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600); + fprintf(stdout, "%s", + "ebpf.plugin / socket\n" + "\n" + "Function `socket` display information for all open sockets during ebpf.plugin runtime.\n" + "During thread runtime the plugin is always collecting data, but when an option is modified, the plugin\n" + "resets completely the previous table and can show a clean data for the first request before to bring the\n" + "modified request.\n" + "\n" + "The following filters are supported:\n" + "\n" + " family:FAMILY\n" + " Shows information for the FAMILY specified. Option accepts IPV4, IPV6 and all, that is the default.\n" + "\n" + " period:PERIOD\n" + " Enable socket to run a specific PERIOD in seconds. When PERIOD is not\n" + " specified plugin will use the default 300 seconds\n" + "\n" + " resolve:BOOL\n" + " Resolve service name, default value is YES.\n" + "\n" + " range:CIDR\n" + " Show sockets that have only a specific destination. Default all addresses.\n" + "\n" + " port:range\n" + " Show sockets that have only a specific destination.\n" + "\n" + " reset\n" + " Send a reset to collector. When a collector receives this command, it uses everything defined in configuration file.\n" + "\n" + " interfaces\n" + " When the collector receives this command, it read all available interfaces on host.\n" + "\n" + "Filters can be combined. Each filter can be given only one time. Default all ports\n" + ); + pluginsd_function_result_end_to_stdout(); + fflush(stdout); +} + +/** + * Fill Fake socket + * + * Fill socket with an invalid request. + * + * @param fake_values is the structure where we are storing the value. + */ +static inline void ebpf_socket_fill_fake_socket(netdata_socket_plus_t *fake_values) +{ + snprintfz(fake_values->socket_string.src_ip, INET6_ADDRSTRLEN, "%s", "127.0.0.1"); + snprintfz(fake_values->socket_string.dst_ip, INET6_ADDRSTRLEN, "%s", "127.0.0.1"); + fake_values->pid = getpid(); + //fake_values->socket_string.src_port = 0; + fake_values->socket_string.dst_port[0] = 0; + snprintfz(fake_values->socket_string.dst_ip, NI_MAXSERV, "%s", "none"); + fake_values->data.family = AF_INET; + fake_values->data.protocol = AF_UNSPEC; +} + +/** + * Fill function buffer + * + * Fill buffer with data to be shown on cloud. + * + * @param wb buffer where we store data. + * @param values data read from hash table + * @param name the process name + */ +static void ebpf_fill_function_buffer(BUFFER *wb, netdata_socket_plus_t *values, char *name) +{ + buffer_json_add_array_item_array(wb); + + // IMPORTANT! + // THE ORDER SHOULD BE THE SAME WITH THE FIELDS! + + // PID + buffer_json_add_array_item_uint64(wb, (uint64_t)values->pid); + + // NAME + buffer_json_add_array_item_string(wb, (name) ? name : "not identified"); + + // Origin + buffer_json_add_array_item_string(wb, (values->data.external_origin) ? "incoming" : "outgoing"); + + // Source IP + buffer_json_add_array_item_string(wb, values->socket_string.src_ip); + + // SRC Port + //buffer_json_add_array_item_uint64(wb, (uint64_t) values->socket_string.src_port); + + // Destination IP + buffer_json_add_array_item_string(wb, values->socket_string.dst_ip); + + // DST Port + buffer_json_add_array_item_string(wb, values->socket_string.dst_port); + + uint64_t connections; + if (values->data.protocol == IPPROTO_TCP) { + // Protocol + buffer_json_add_array_item_string(wb, "TCP"); + + // Bytes received + buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.tcp.tcp_bytes_received); + + // Bytes sent + buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.tcp.tcp_bytes_sent); + + // Connections + connections = values->data.tcp.ipv4_connect + values->data.tcp.ipv6_connect; + } else if (values->data.protocol == IPPROTO_UDP) { + // Protocol + buffer_json_add_array_item_string(wb, "UDP"); + + // Bytes received + buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.udp.udp_bytes_received); + + // Bytes sent + buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.udp.udp_bytes_sent); + + // Connections + connections = values->data.udp.call_udp_sent + values->data.udp.call_udp_received; + } else { + // Protocol + buffer_json_add_array_item_string(wb, "UNSPEC"); + + // Bytes received + buffer_json_add_array_item_uint64(wb, 0); + + // Bytes sent + buffer_json_add_array_item_uint64(wb, 0); + + connections = 1; + } + + // Connections + if (values->flags & NETDATA_SOCKET_FLAGS_ALREADY_OPEN) { + connections++; + } else if (!connections) { + // If no connections, this means that we lost when connection was opened + values->flags |= NETDATA_SOCKET_FLAGS_ALREADY_OPEN; + connections++; + } + buffer_json_add_array_item_uint64(wb, connections); + + buffer_json_array_close(wb); +} + +/** + * Clean Judy array unsafe + * + * Clean all Judy Array allocated to show table when a function is called. + * Before to call this function it is necessary to lock `ebpf_judy_pid.index.rw_spinlock`. + **/ +static void ebpf_socket_clean_judy_array_unsafe() +{ + if (!ebpf_judy_pid.index.JudyLArray) + return; + + Pvoid_t *pid_value, *socket_value; + Word_t local_pid = 0, local_socket = 0; + bool first_pid = true, first_socket = true; + while ((pid_value = JudyLFirstThenNext(ebpf_judy_pid.index.JudyLArray, &local_pid, &first_pid))) { + netdata_ebpf_judy_pid_stats_t *pid_ptr = (netdata_ebpf_judy_pid_stats_t *)*pid_value; + rw_spinlock_write_lock(&pid_ptr->socket_stats.rw_spinlock); + if (pid_ptr->socket_stats.JudyLArray) { + while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_socket, &first_socket))) { + netdata_socket_plus_t *socket_clean = *socket_value; + aral_freez(aral_socket_table, socket_clean); + } + JudyLFreeArray(&pid_ptr->socket_stats.JudyLArray, PJE0); + pid_ptr->socket_stats.JudyLArray = NULL; + } + rw_spinlock_write_unlock(&pid_ptr->socket_stats.rw_spinlock); + } +} + +/** + * Fill function buffer unsafe + * + * Fill the function buffer with socket information. Before to call this function it is necessary to lock + * ebpf_judy_pid.index.rw_spinlock + * + * @param buf buffer used to store data to be shown by function. + * + * @return it returns 0 on success and -1 otherwise. + */ +static void ebpf_socket_fill_function_buffer_unsafe(BUFFER *buf) +{ + int counter = 0; + + Pvoid_t *pid_value, *socket_value; + Word_t local_pid = 0; + bool first_pid = true; + while ((pid_value = JudyLFirstThenNext(ebpf_judy_pid.index.JudyLArray, &local_pid, &first_pid))) { + netdata_ebpf_judy_pid_stats_t *pid_ptr = (netdata_ebpf_judy_pid_stats_t *)*pid_value; + bool first_socket = true; + Word_t local_timestamp = 0; + rw_spinlock_read_lock(&pid_ptr->socket_stats.rw_spinlock); + if (pid_ptr->socket_stats.JudyLArray) { + while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_timestamp, &first_socket))) { + netdata_socket_plus_t *values = (netdata_socket_plus_t *)*socket_value; + ebpf_fill_function_buffer(buf, values, pid_ptr->cmdline); + } + counter++; + } + rw_spinlock_read_unlock(&pid_ptr->socket_stats.rw_spinlock); + } + + if (!counter) { + netdata_socket_plus_t fake_values = { }; + ebpf_socket_fill_fake_socket(&fake_values); + ebpf_fill_function_buffer(buf, &fake_values, NULL); + } +} + +/** + * Socket read hash + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket. + * + * @param buf the buffer to store data; + * @param em the module main structure. + * + * @return It always returns NULL. + */ +void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em) +{ + // thread was not initialized or Array was reset + rw_spinlock_read_lock(&ebpf_judy_pid.index.rw_spinlock); + if (!em->maps || (em->maps[NETDATA_SOCKET_OPEN_SOCKET].map_fd == ND_EBPF_MAP_FD_NOT_INITIALIZED) || + !ebpf_judy_pid.index.JudyLArray){ + netdata_socket_plus_t fake_values = { }; + + ebpf_socket_fill_fake_socket(&fake_values); + + ebpf_fill_function_buffer(buf, &fake_values, NULL); + rw_spinlock_read_unlock(&ebpf_judy_pid.index.rw_spinlock); + return; + } + + rw_spinlock_read_lock(&network_viewer_opt.rw_spinlock); + ebpf_socket_fill_function_buffer_unsafe(buf); + rw_spinlock_read_unlock(&network_viewer_opt.rw_spinlock); + rw_spinlock_read_unlock(&ebpf_judy_pid.index.rw_spinlock); +} + +/** + * Function: Socket + * + * Show information for sockets stored in hash tables. + * + * @param transaction the transaction id that Netdata sent for this function execution + * @param function function name and arguments given to thread. + * @param timeout The function timeout + * @param cancelled Variable used to store function status. + */ +static void ebpf_function_socket_manipulation(const char *transaction, + char *function __maybe_unused, + int timeout __maybe_unused, + bool *cancelled __maybe_unused) +{ + UNUSED(timeout); + ebpf_module_t *em = &ebpf_modules[EBPF_MODULE_SOCKET_IDX]; + + char *words[PLUGINSD_MAX_WORDS] = {NULL}; + size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS); + const char *name; + int period = -1; + rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock); + network_viewer_opt.enabled = CONFIG_BOOLEAN_YES; + uint32_t previous; + + for (int i = 1; i < PLUGINSD_MAX_WORDS; i++) { + const char *keyword = get_word(words, num_words, i); + if (!keyword) + break; + + if (strncmp(keyword, EBPF_FUNCTION_SOCKET_FAMILY, sizeof(EBPF_FUNCTION_SOCKET_FAMILY) - 1) == 0) { + name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_FAMILY) - 1]; + previous = network_viewer_opt.family; + uint32_t family = AF_UNSPEC; + if (!strcmp(name, "IPV4")) + family = AF_INET; + else if (!strcmp(name, "IPV6")) + family = AF_INET6; + + if (family != previous) { + rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); + network_viewer_opt.family = family; + rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); + ebpf_socket_clean_judy_array_unsafe(); + } + } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_PERIOD, sizeof(EBPF_FUNCTION_SOCKET_PERIOD) - 1) == 0) { + name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_PERIOD) - 1]; + pthread_mutex_lock(&ebpf_exit_cleanup); + period = str2i(name); + if (period > 0) { + em->lifetime = period; + } else + em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME; + +#ifdef NETDATA_DEV_MODE + collector_info("Lifetime modified for %u", em->lifetime); +#endif + pthread_mutex_unlock(&ebpf_exit_cleanup); + } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RESOLVE, sizeof(EBPF_FUNCTION_SOCKET_RESOLVE) - 1) == 0) { + previous = network_viewer_opt.service_resolution_enabled; + uint32_t resolution; + name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_RESOLVE) - 1]; + resolution = (!strcasecmp(name, "YES")) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO; + + if (previous != resolution) { + rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); + network_viewer_opt.service_resolution_enabled = resolution; + rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); + + ebpf_socket_clean_judy_array_unsafe(); + } + } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RANGE, sizeof(EBPF_FUNCTION_SOCKET_RANGE) - 1) == 0) { + name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_RANGE) - 1]; + rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); + ebpf_clean_ip_structure(&network_viewer_opt.included_ips); + ebpf_clean_ip_structure(&network_viewer_opt.excluded_ips); + ebpf_parse_ips_unsafe((char *)name); + rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); + + ebpf_socket_clean_judy_array_unsafe(); + } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_PORT, sizeof(EBPF_FUNCTION_SOCKET_PORT) - 1) == 0) { + name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_PORT) - 1]; + rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); + ebpf_clean_port_structure(&network_viewer_opt.included_port); + ebpf_clean_port_structure(&network_viewer_opt.excluded_port); + ebpf_parse_ports((char *)name); + rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); + + ebpf_socket_clean_judy_array_unsafe(); + } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RESET, sizeof(EBPF_FUNCTION_SOCKET_RESET) - 1) == 0) { + rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); + ebpf_clean_port_structure(&network_viewer_opt.included_port); + ebpf_clean_port_structure(&network_viewer_opt.excluded_port); + + ebpf_clean_ip_structure(&network_viewer_opt.included_ips); + ebpf_clean_ip_structure(&network_viewer_opt.excluded_ips); + ebpf_clean_ip_structure(&network_viewer_opt.ipv4_local_ip); + ebpf_clean_ip_structure(&network_viewer_opt.ipv6_local_ip); + + parse_network_viewer_section(&socket_config); + ebpf_read_local_addresses_unsafe(); + network_viewer_opt.enabled = CONFIG_BOOLEAN_YES; + rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); + } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_INTERFACES, sizeof(EBPF_FUNCTION_SOCKET_INTERFACES) - 1) == 0) { + rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); + ebpf_read_local_addresses_unsafe(); + rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); + } else if (strncmp(keyword, "help", 4) == 0) { + ebpf_function_socket_help(transaction); + rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock); + return; + } + } + rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock); + + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) { + // Cleanup when we already had a thread running + rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock); + ebpf_socket_clean_judy_array_unsafe(); + rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock); + + if (ebpf_function_start_thread(em, period)) { + ebpf_function_error(transaction, + HTTP_RESP_INTERNAL_SERVER_ERROR, + "Cannot start thread."); + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + } else { + if (period < 0 && em->lifetime < EBPF_NON_FUNCTION_LIFE_TIME) { + em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME; + } + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + time_t expires = now_realtime_sec() + em->update_every; + + BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, false); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", em->update_every); + buffer_json_member_add_string(wb, "help", EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION); + + // Collect data + buffer_json_member_add_array(wb, "data"); + ebpf_socket_read_open_connections(wb, em); + buffer_json_array_close(wb); // data + + buffer_json_member_add_object(wb, "columns"); + { + int fields_id = 0; + + // IMPORTANT! + // THE ORDER SHOULD BE THE SAME WITH THE VALUES! + buffer_rrdf_table_add_field(wb, fields_id++, "PID", "Process ID", RRDF_FIELD_TYPE_INTEGER, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, + NULL); + + buffer_rrdf_table_add_field(wb, fields_id++, "Process Name", "Process Name", RRDF_FIELD_TYPE_STRING, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + + buffer_rrdf_table_add_field(wb, fields_id++, "Origin", "The connection origin.", RRDF_FIELD_TYPE_STRING, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + + buffer_rrdf_table_add_field(wb, fields_id++, "Request from", "Request from IP", RRDF_FIELD_TYPE_STRING, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + + /* + buffer_rrdf_table_add_field(wb, fields_id++, "SRC PORT", "Source Port", RRDF_FIELD_TYPE_INTEGER, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, + NULL); + */ + + buffer_rrdf_table_add_field(wb, fields_id++, "Destination IP", "Destination IP", RRDF_FIELD_TYPE_STRING, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + + buffer_rrdf_table_add_field(wb, fields_id++, "Destination Port", "Destination Port", RRDF_FIELD_TYPE_STRING, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + + buffer_rrdf_table_add_field(wb, fields_id++, "Protocol", "Communication protocol", RRDF_FIELD_TYPE_STRING, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + + buffer_rrdf_table_add_field(wb, fields_id++, "Incoming Bandwidth", "Bytes received.", RRDF_FIELD_TYPE_INTEGER, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, + NULL); + + buffer_rrdf_table_add_field(wb, fields_id++, "Outgoing Bandwidth", "Bytes sent.", RRDF_FIELD_TYPE_INTEGER, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, + NULL); + + buffer_rrdf_table_add_field(wb, fields_id, "Connections", "Number of calls to tcp_vX_connections and udp_sendmsg, where X is the protocol version.", RRDF_FIELD_TYPE_INTEGER, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, + NULL); + } + buffer_json_object_close(wb); // columns + + buffer_json_member_add_object(wb, "charts"); + { + // OutBound Connections + buffer_json_member_add_object(wb, "IPInboundConn"); + { + buffer_json_member_add_string(wb, "name", "TCP Inbound Connection"); + buffer_json_member_add_string(wb, "type", "line"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "connected_tcp"); + buffer_json_add_array_item_string(wb, "connected_udp"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // OutBound Connections + buffer_json_member_add_object(wb, "IPTCPOutboundConn"); + { + buffer_json_member_add_string(wb, "name", "TCP Outbound Connection"); + buffer_json_member_add_string(wb, "type", "line"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "connected_V4"); + buffer_json_add_array_item_string(wb, "connected_V6"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // TCP Functions + buffer_json_member_add_object(wb, "TCPFunctions"); + { + buffer_json_member_add_string(wb, "name", "TCPFunctions"); + buffer_json_member_add_string(wb, "type", "line"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "received"); + buffer_json_add_array_item_string(wb, "sent"); + buffer_json_add_array_item_string(wb, "close"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // TCP Bandwidth + buffer_json_member_add_object(wb, "TCPBandwidth"); + { + buffer_json_member_add_string(wb, "name", "TCPBandwidth"); + buffer_json_member_add_string(wb, "type", "line"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "received"); + buffer_json_add_array_item_string(wb, "sent"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // UDP Functions + buffer_json_member_add_object(wb, "UDPFunctions"); + { + buffer_json_member_add_string(wb, "name", "UDPFunctions"); + buffer_json_member_add_string(wb, "type", "line"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "received"); + buffer_json_add_array_item_string(wb, "sent"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // UDP Bandwidth + buffer_json_member_add_object(wb, "UDPBandwidth"); + { + buffer_json_member_add_string(wb, "name", "UDPBandwidth"); + buffer_json_member_add_string(wb, "type", "line"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "received"); + buffer_json_add_array_item_string(wb, "sent"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_string(wb, "default_sort_column", "PID"); + + // Do we use only on fields that can be groupped? + buffer_json_member_add_object(wb, "group_by"); + { + // group by PID + buffer_json_member_add_object(wb, "PID"); + { + buffer_json_member_add_string(wb, "name", "Process ID"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "PID"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // group by Process Name + buffer_json_member_add_object(wb, "Process Name"); + { + buffer_json_member_add_string(wb, "name", "Process Name"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Process Name"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // group by Process Name + buffer_json_member_add_object(wb, "Origin"); + { + buffer_json_member_add_string(wb, "name", "Origin"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Origin"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // group by Request From IP + buffer_json_member_add_object(wb, "Request from"); + { + buffer_json_member_add_string(wb, "name", "Request from IP"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Request from"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // group by Destination IP + buffer_json_member_add_object(wb, "Destination IP"); + { + buffer_json_member_add_string(wb, "name", "Destination IP"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Destination IP"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // group by DST Port + buffer_json_member_add_object(wb, "Destination Port"); + { + buffer_json_member_add_string(wb, "name", "Destination Port"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Destination Port"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // group by Protocol + buffer_json_member_add_object(wb, "Protocol"); + { + buffer_json_member_add_string(wb, "name", "Protocol"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Protocol"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // group_by + + buffer_json_member_add_time_t(wb, "expires", expires); + buffer_json_finalize(wb); + + // Lock necessary to avoid race condition pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires); fwrite(buffer_tostring(wb), buffer_strlen(wb), 1, stdout); pluginsd_function_result_end_to_stdout(); fflush(stdout); - pthread_mutex_unlock(&lock); buffer_free(wb); } - /***************************************************************** * EBPF FUNCTION THREAD *****************************************************************/ @@ -375,45 +1067,27 @@ static void ebpf_function_thread_manipulation(const char *transaction, */ void *ebpf_function_thread(void *ptr) { - ebpf_module_t *em = (ebpf_module_t *)ptr; - char buffer[PLUGINSD_LINE_MAX + 1]; - - char *s = NULL; - while(!ebpf_exit_plugin && (s = fgets(buffer, PLUGINSD_LINE_MAX, stdin))) { - char *words[PLUGINSD_MAX_WORDS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(buffer, words, PLUGINSD_MAX_WORDS); - - const char *keyword = get_word(words, num_words, 0); - - if(keyword && strcmp(keyword, PLUGINSD_KEYWORD_FUNCTION) == 0) { - char *transaction = get_word(words, num_words, 1); - char *timeout_s = get_word(words, num_words, 2); - char *function = get_word(words, num_words, 3); - - if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) { - netdata_log_error("Received incomplete %s (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", - keyword, - transaction?transaction:"(unset)", - timeout_s?timeout_s:"(unset)", - function?function:"(unset)"); - } - else { - int timeout = str2i(timeout_s); - if (!strncmp(function, EBPF_FUNCTION_THREAD, sizeof(EBPF_FUNCTION_THREAD) - 1)) - ebpf_function_thread_manipulation(transaction, - function, - buffer, - PLUGINSD_LINE_MAX + 1, - timeout, - em); - else - ebpf_function_error(transaction, - HTTP_RESP_NOT_FOUND, - "No function with this name found in ebpf.plugin."); - } + (void)ptr; + + struct functions_evloop_globals *wg = functions_evloop_init(1, + "EBPF", + &lock, + &ebpf_plugin_exit); + + functions_evloop_add_function(wg, + "ebpf_socket", + ebpf_function_socket_manipulation, + PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT); + + heartbeat_t hb; + heartbeat_init(&hb); + while(!ebpf_plugin_exit) { + (void)heartbeat_next(&hb, USEC_PER_SEC); + + if (ebpf_plugin_exit) { + break; } - else - netdata_log_error("Received unknown command: %s", keyword ? keyword : "(unset)"); } + return NULL; } diff --git a/collectors/ebpf.plugin/ebpf_functions.h b/collectors/ebpf.plugin/ebpf_functions.h index b20dab63421e80..795703b428748d 100644 --- a/collectors/ebpf.plugin/ebpf_functions.h +++ b/collectors/ebpf.plugin/ebpf_functions.h @@ -3,20 +3,25 @@ #ifndef NETDATA_EBPF_FUNCTIONS_H #define NETDATA_EBPF_FUNCTIONS_H 1 +#ifdef NETDATA_DEV_MODE +// Common +static inline void EBPF_PLUGIN_FUNCTIONS(const char *NAME, const char *DESC) { + fprintf(stdout, "%s \"%s\" 10 \"%s\"\n", PLUGINSD_KEYWORD_FUNCTION, NAME, DESC); +} +#endif + // configuration file & description #define NETDATA_DIRECTORY_FUNCTIONS_CONFIG_FILE "functions.conf" #define NETDATA_EBPF_FUNCTIONS_MODULE_DESC "Show information about current function status." // function list #define EBPF_FUNCTION_THREAD "ebpf_thread" +#define EBPF_FUNCTION_SOCKET "ebpf_socket" +// thread constants #define EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION "Detailed information about eBPF threads." #define EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND "ebpf.plugin does not have thread named " -#define EBPF_PLUGIN_FUNCTIONS(NAME, DESC) do { \ - fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " \"" NAME "\" 10 \"%s\"\n", DESC); \ -} while(0) - #define EBPF_THREADS_SELECT_THREAD "thread:" #define EBPF_THREADS_ENABLE_CATEGORY "enable:" #define EBPF_THREADS_DISABLE_CATEGORY "disable:" @@ -24,6 +29,16 @@ #define EBPF_THREAD_STATUS_RUNNING "running" #define EBPF_THREAD_STATUS_STOPPED "stopped" +// socket constants +#define EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION "Detailed information about open sockets." +#define EBPF_FUNCTION_SOCKET_FAMILY "family:" +#define EBPF_FUNCTION_SOCKET_PERIOD "period:" +#define EBPF_FUNCTION_SOCKET_RESOLVE "resolve:" +#define EBPF_FUNCTION_SOCKET_RANGE "range:" +#define EBPF_FUNCTION_SOCKET_PORT "port:" +#define EBPF_FUNCTION_SOCKET_RESET "reset" +#define EBPF_FUNCTION_SOCKET_INTERFACES "interfaces" + void *ebpf_function_thread(void *ptr); #endif diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c index 9092c7ac3032fd..465ee6434c59b5 100644 --- a/collectors/ebpf.plugin/ebpf_hardirq.c +++ b/collectors/ebpf.plugin/ebpf_hardirq.c @@ -226,6 +226,7 @@ static void ebpf_obsolete_hardirq_global(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency", + "", "Hardware IRQ latency", EBPF_COMMON_DIMENSION_MILLISECONDS, "interrupts", @@ -580,10 +581,10 @@ static void hardirq_collector(ebpf_module_t *em) //This will be cancelled by its parent uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -593,10 +594,10 @@ static void hardirq_collector(ebpf_module_t *em) pthread_mutex_lock(&lock); // write dims now for all hitherto discovered IRQs. - write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency"); + ebpf_write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency", ""); avl_traverse_lock(&hardirq_pub, hardirq_write_dims, NULL); hardirq_write_static_dims(); - write_end_chart(); + ebpf_write_end_chart(); pthread_mutex_unlock(&lock); diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c index 3548d673bae1a3..fe33ff6a478f97 100644 --- a/collectors/ebpf.plugin/ebpf_mdflush.c +++ b/collectors/ebpf.plugin/ebpf_mdflush.c @@ -140,6 +140,7 @@ static void ebpf_obsolete_mdflush_global(ebpf_module_t *em) { ebpf_write_chart_obsolete("mdstat", "mdstat_flush", + "", "MD flushes", "flushes", "flush (eBPF)", @@ -345,19 +346,19 @@ static void mdflush_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; mdflush_read_count_map(maps_per_core); pthread_mutex_lock(&lock); // write dims now for all hitherto discovered devices. - write_begin_chart("mdstat", "mdstat_flush"); + ebpf_write_begin_chart("mdstat", "mdstat_flush", ""); avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL); - write_end_chart(); + ebpf_write_end_chart(); pthread_mutex_unlock(&lock); diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c index 57ea5b2f453b80..05c76540a554b2 100644 --- a/collectors/ebpf.plugin/ebpf_mount.c +++ b/collectors/ebpf.plugin/ebpf_mount.c @@ -233,6 +233,7 @@ static void ebpf_obsolete_mount_global(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_CALLS, + "", "Calls to mount and umount syscalls", EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY, @@ -243,6 +244,7 @@ static void ebpf_obsolete_mount_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_ERRORS, + "", "Errors to mount and umount file systems", EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY, @@ -367,9 +369,9 @@ static void mount_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -466,7 +468,7 @@ static int ebpf_mount_load_bpf(ebpf_module_t *em) #endif if (ret) - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c index 84830160a09142..2c34650c358560 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -57,6 +57,7 @@ static void ebpf_obsolete_oomkill_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART, + "", "OOM kills. This chart is provided by eBPF plugin.", EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP, @@ -98,15 +99,25 @@ static inline void ebpf_obsolete_oomkill_cgroup_charts(ebpf_module_t *em) */ static void ebpf_obsolete_oomkill_apps(ebpf_module_t *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_OOMKILL_CHART, - "OOM kills", - EBPF_COMMON_DIMENSION_KILLS, - "mem", - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20020, - em->update_every); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<clean_name, + "_app_oomkill", + "OOM kills.", + EBPF_COMMON_DIMENSION_KILLS, + NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "ebpf.app_oomkill", + 20020, + update_every); + + w->charts_created &= ~(1<next) { - if (likely(w->exposed && w->processes)) { - bool was_oomkilled = false; + if (unlikely(!(w->charts_created & (1<root_pid; while (pids) { uint32_t j; @@ -165,10 +179,11 @@ static void oomkill_write_data(int32_t *keys, uint32_t total) } pids = pids->next; } - - write_dim:; - write_chart_dimension(w->name, was_oomkilled); } +write_dim: + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_oomkill"); + write_chart_dimension(EBPF_COMMON_DIMENSION_KILLS, was_oomkilled); + ebpf_write_end_chart(); } // for any remaining keys for which we couldn't find a group, this could be @@ -231,14 +246,14 @@ static void ebpf_create_systemd_oomkill_charts(int update_every) static void ebpf_send_systemd_oomkill_charts() { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->oomkill); ect->oomkill = 0; } } - write_end_chart(); + ebpf_write_end_chart(); } /* @@ -251,9 +266,9 @@ static void ebpf_send_systemd_oomkill_charts() */ static void ebpf_send_specific_oomkill_data(char *type, int value) { - write_begin_chart(type, NETDATA_OOMKILL_CHART); + ebpf_write_begin_chart(type, NETDATA_OOMKILL_CHART, ""); write_chart_dimension(oomkill_publish_aggregated.name, (long long)value); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -266,7 +281,7 @@ static void ebpf_send_specific_oomkill_data(char *type, int value) */ static void ebpf_obsolete_specific_oomkill_charts(char *type, int update_every) { - ebpf_write_chart_obsolete(type, NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.", + ebpf_write_chart_obsolete(type, NETDATA_OOMKILL_CHART, "", "OOM kills. This chart is provided by eBPF plugin.", EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_OOMKILLS_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5600, update_every); @@ -420,9 +435,9 @@ static void oomkill_collector(ebpf_module_t *em) uint32_t running_time = 0; uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -430,7 +445,6 @@ static void oomkill_collector(ebpf_module_t *em) uint32_t count = oomkill_read_data(keys); if (!count) { running_time = ebpf_update_oomkill_period(running_time, em); - continue; } stats[NETDATA_CONTROLLER_PID_TABLE_ADD] += (uint64_t) count; @@ -438,16 +452,14 @@ static void oomkill_collector(ebpf_module_t *em) pthread_mutex_lock(&collect_data_mutex); pthread_mutex_lock(&lock); - if (cgroups) { + if (cgroups && count) { ebpf_update_oomkill_cgroup(keys, count); // write everything from the ebpf map. ebpf_oomkill_send_cgroup_data(update_every); } if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_OOMKILL_CHART); oomkill_write_data(keys, count); - write_end_chart(); } pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); @@ -466,14 +478,29 @@ static void oomkill_collector(ebpf_module_t *em) void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr) { struct ebpf_target *root = ptr; - ebpf_create_charts_on_apps(NETDATA_OOMKILL_CHART, - "OOM kills", - EBPF_COMMON_DIMENSION_KILLS, - "mem", - NETDATA_EBPF_CHART_TYPE_STACKED, - 20020, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_OOMKILL); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_oomkill", + "OOM kills.", + EBPF_COMMON_DIMENSION_KILLS, + NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_oomkill", + 20072, + update_every, + NETDATA_EBPF_MODULE_NAME_OOMKILL); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION kills '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c index 3537efc553450d..e3e2b884ebe26a 100644 --- a/collectors/ebpf.plugin/ebpf_process.c +++ b/collectors/ebpf.plugin/ebpf_process.c @@ -116,12 +116,12 @@ static void ebpf_update_global_publish(netdata_publish_syscall_t *publish, netda */ static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc) { - write_begin_chart(family, NETDATA_PROCESS_STATUS_NAME); + ebpf_write_begin_chart(family, NETDATA_PROCESS_STATUS_NAME, ""); write_chart_dimension(status[0], (long long)pvc->running); write_chart_dimension(status[1], (long long)pvc->zombie); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -201,56 +201,43 @@ void ebpf_process_remove_pids() void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em) { struct ebpf_target *w; - collected_number value; + // This algorithm is improved in https://github.com/netdata/netdata/pull/16030 + collected_number values[5]; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS); for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_process)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_thread)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + if (unlikely(!(w->charts_created & (1<next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, + values[0] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_process)); + values[1] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_thread)); + values[2] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, exit_call)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, + values[3] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, release_call)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + values[4] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, + task_err)); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, - task_err)); - write_chart_dimension(w->name, value); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_process_start"); + write_chart_dimension("calls", values[0]); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_thread_start"); + write_chart_dimension("calls", values[1]); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_exit"); + write_chart_dimension("calls", values[2]); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_released"); + write_chart_dimension("calls", values[3]); + ebpf_write_end_chart(); + + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_error"); + write_chart_dimension("calls", values[4]); + ebpf_write_end_chart(); } - write_end_chart(); } ebpf_process_remove_pids(); @@ -433,52 +420,89 @@ static void ebpf_create_global_charts(ebpf_module_t *em) void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr) { struct ebpf_target *root = ptr; - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS, - "Process started", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20065, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_THREAD, - "Threads started", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20066, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_EXIT, - "Tasks starts exit process.", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20067, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_CLOSE, - "Tasks closed", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20068, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_ERROR, - "Errors to create process or threads.", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20069, - ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root, - em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_process_start", + "Process started.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_process_start", + 20161, + update_every, + NETDATA_EBPF_MODULE_NAME_PROCESS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_thread_start", + "Threads started.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_thread_start", + 20162, + update_every, + NETDATA_EBPF_MODULE_NAME_PROCESS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_task_exit", + "Tasks starts exit process.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_task_exit", + 20163, + update_every, + NETDATA_EBPF_MODULE_NAME_PROCESS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_task_released", + "Tasks released.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_task_released", + 20164, + update_every, + NETDATA_EBPF_MODULE_NAME_PROCESS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_task_error", + "Errors to create process or threads.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_task_error", + 20165, + update_every, + NETDATA_EBPF_MODULE_NAME_PROCESS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + } + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; @@ -503,6 +527,7 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS, + "", "Process started", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, @@ -513,6 +538,7 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD, + "", "Threads started", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, @@ -523,6 +549,7 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE, + "", "Tasks starts exit process.", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, @@ -533,6 +560,7 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT, + "", "Tasks closed", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, @@ -544,6 +572,7 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR, + "", "Errors to create process or threads.", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, @@ -585,56 +614,70 @@ static inline void ebpf_obsolete_process_cgroup_charts(ebpf_module_t *em) { */ void ebpf_obsolete_process_apps_charts(struct ebpf_module *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_TASK_PROCESS, - "Process started", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20065, - em->update_every); - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_TASK_THREAD, - "Threads started", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20066, - em->update_every); - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_TASK_EXIT, - "Tasks starts exit process.", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20067, - em->update_every); - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_TASK_CLOSE, - "Tasks closed", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20068, - em->update_every); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_TASK_ERROR, - "Errors to create process or threads.", + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_process_start", + "Process started.", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20069, - em->update_every); + "app.ebpf_process_start", + 20161, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_thread_start", + "Threads started.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_thread_start", + 20162, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_task_exit", + "Tasks starts exit process.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_task_exit", + 20163, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_task_released", + "Tasks released.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_task_released", + 20164, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_task_error", + "Errors to create process or threads.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_task_error", + 20165, + update_every); + } + + w->charts_created &= ~(1<mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_PROCESS_ERROR_NAME, + "", "Fails to create process", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, @@ -819,31 +866,31 @@ static void ebpf_process_sum_cgroup_pids(ebpf_process_stat_t *ps, struct pid_on_ */ static void ebpf_send_specific_process_data(char *type, ebpf_process_stat_t *values, ebpf_module_t *em) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, ""); write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK].name, (long long) values->create_process); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD, ""); write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_CLONE].name, (long long) values->create_thread); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT, ""); write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name, (long long) values->release_call); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, ""); write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].name, (long long) values->release_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR, ""); write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name, (long long) values->task_err); - write_end_chart(); + ebpf_write_end_chart(); } } @@ -909,28 +956,28 @@ static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em) */ static void ebpf_obsolete_specific_process_charts(char *type, ebpf_module_t *em) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "", "Process started", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000, em->update_every); - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "", "Threads started", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001, em->update_every); - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_EXIT,"Tasks starts exit process.", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_EXIT, "","Tasks starts exit process.", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_PROCESS_EXIT_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002, em->update_every); - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_CLOSE,"Tasks closed", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, "","Tasks closed", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_ERROR,"Errors to create process or threads.", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_ERROR, "","Errors to create process or threads.", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_PROCESS_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004, em->update_every); @@ -989,46 +1036,46 @@ static void ebpf_create_systemd_process_charts(ebpf_module_t *em) static void ebpf_send_systemd_process_charts(ebpf_module_t *em) { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_ps.create_process); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_ps.create_thread); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_ps.exit_call); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_ps.release_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_ps.task_err); } } - write_end_chart(); + ebpf_write_end_chart(); } } @@ -1118,10 +1165,10 @@ static void process_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { usec_t dt = heartbeat_next(&hb, USEC_PER_SEC); (void)dt; - if (ebpf_exit_plugin) + if (ebpf_plugin_exit) break; if (++counter == update_every) { diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h index d49e384525e016..310b321d6356a9 100644 --- a/collectors/ebpf.plugin/ebpf_process.h +++ b/collectors/ebpf.plugin/ebpf_process.h @@ -52,7 +52,8 @@ enum netdata_ebpf_stats_order { NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL, NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED, NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED, - NETATA_EBPF_ORDER_STAT_ARAL_BEGIN + NETATA_EBPF_ORDER_STAT_ARAL_BEGIN, + NETDATA_EBPF_ORDER_FUNCTION_PER_THREAD, }; enum netdata_ebpf_load_mode_stats{ diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c index baeb7204e2fa2f..f14eb67d01df37 100644 --- a/collectors/ebpf.plugin/ebpf_shm.c +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -305,7 +305,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART, - "Calls to syscall shmget(2).", + "", + "Calls to syscall shmget(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -315,7 +316,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART, - "Calls to syscall shmat(2).", + "", + "Calls to syscall shmat(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -325,7 +327,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART, - "Calls to syscall shmdt(2).", + "", + "Calls to syscall shmdt(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -335,7 +338,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART, - "Calls to syscall shmctl(2).", + "", + "Calls to syscall shmctl(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -375,45 +379,58 @@ static inline void ebpf_obsolete_shm_cgroup_charts(ebpf_module_t *em) { */ void ebpf_obsolete_shm_apps_charts(struct ebpf_module *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SHMGET_CHART, - "Calls to syscall shmget(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20191, - em->update_every); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<shmat(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20192, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmget_call", + "Calls to syscall shmget(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmget_call", + 20191, + update_every); - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SHMDT_CHART, - "Calls to syscall shmdt(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20193, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmat_call", + "Calls to syscall shmat(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmat_call", + 20192, + update_every); - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SHMCTL_CHART, - "Calls to syscall shmctl(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20194, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmdt_call", + "Calls to syscall shmdt(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmdt_call", + 20193, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmctl_call", + "Calls to syscall shmctl(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmctl_call", + 20194, + update_every); + + w->charts_created &= ~(1<next) { - if (unlikely(w->exposed && w->processes)) { - ebpf_shm_sum_pids(&w->shm, w->root_pid); - } - } + if (unlikely(!(w->charts_created & (1<next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, (long long) w->shm.get); - } - } - write_end_chart(); + ebpf_shm_sum_pids(&w->shm, w->root_pid); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMAT_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, (long long) w->shm.at); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmget_call"); + write_chart_dimension("calls", (long long) w->shm.get); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMDT_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, (long long) w->shm.dt); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmat_call"); + write_chart_dimension("calls", (long long) w->shm.at); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMCTL_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, (long long) w->shm.ctl); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmdt_call"); + write_chart_dimension("calls", (long long) w->shm.dt); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmctl_call"); + write_chart_dimension("calls", (long long) w->shm.ctl); + ebpf_write_end_chart(); } - write_end_chart(); } /** @@ -768,7 +771,7 @@ static void ebpf_shm_sum_cgroup_pids(netdata_publish_shm_t *shm, struct pid_on_t static void ebpf_create_specific_shm_charts(char *type, int update_every) { ebpf_create_chart(type, NETDATA_SHMGET_CHART, - "Calls to syscall shmget(2).", + "Calls to syscall shmget(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_CGROUP_SHM_GET_CONTEXT, @@ -781,7 +784,7 @@ static void ebpf_create_specific_shm_charts(char *type, int update_every) NETDATA_EBPF_MODULE_NAME_SHM); ebpf_create_chart(type, NETDATA_SHMAT_CHART, - "Calls to syscall shmat(2).", + "Calls to syscall shmat(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_CGROUP_SHM_AT_CONTEXT, @@ -794,7 +797,7 @@ static void ebpf_create_specific_shm_charts(char *type, int update_every) NETDATA_EBPF_MODULE_NAME_SHM); ebpf_create_chart(type, NETDATA_SHMDT_CHART, - "Calls to syscall shmdt(2).", + "Calls to syscall shmdt(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_CGROUP_SHM_DT_CONTEXT, @@ -807,7 +810,7 @@ static void ebpf_create_specific_shm_charts(char *type, int update_every) NETDATA_EBPF_MODULE_NAME_SHM); ebpf_create_chart(type, NETDATA_SHMCTL_CHART, - "Calls to syscall shmctl(2).", + "Calls to syscall shmctl(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_CGROUP_SHM_CTL_CONTEXT, @@ -831,28 +834,32 @@ static void ebpf_create_specific_shm_charts(char *type, int update_every) static void ebpf_obsolete_specific_shm_charts(char *type, int update_every) { ebpf_write_chart_obsolete(type, NETDATA_SHMGET_CHART, - "Calls to syscall shmget(2).", + "", + "Calls to syscall shmget(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_GET_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5800, update_every); ebpf_write_chart_obsolete(type, NETDATA_SHMAT_CHART, - "Calls to syscall shmat(2).", + "", + "Calls to syscall shmat(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_AT_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5801, update_every); ebpf_write_chart_obsolete(type, NETDATA_SHMDT_CHART, - "Calls to syscall shmdt(2).", + "", + "Calls to syscall shmdt(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_DT_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5802, update_every); ebpf_write_chart_obsolete(type, NETDATA_SHMCTL_CHART, - "Calls to syscall shmctl(2).", + "", + "Calls to syscall shmctl(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_CTL_CONTEXT, @@ -869,7 +876,7 @@ static void ebpf_obsolete_specific_shm_charts(char *type, int update_every) static void ebpf_create_systemd_shm_charts(int update_every) { ebpf_create_charts_on_systemd(NETDATA_SHMGET_CHART, - "Calls to syscall shmget(2).", + "Calls to syscall shmget(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -878,7 +885,7 @@ static void ebpf_create_systemd_shm_charts(int update_every) NETDATA_SYSTEMD_SHM_GET_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every); ebpf_create_charts_on_systemd(NETDATA_SHMAT_CHART, - "Calls to syscall shmat(2).", + "Calls to syscall shmat(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -887,7 +894,7 @@ static void ebpf_create_systemd_shm_charts(int update_every) NETDATA_SYSTEMD_SHM_AT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every); ebpf_create_charts_on_systemd(NETDATA_SHMDT_CHART, - "Calls to syscall shmdt(2).", + "Calls to syscall shmdt(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -896,7 +903,7 @@ static void ebpf_create_systemd_shm_charts(int update_every) NETDATA_SYSTEMD_SHM_DT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every); ebpf_create_charts_on_systemd(NETDATA_SHMCTL_CHART, - "Calls to syscall shmctl(2).", + "Calls to syscall shmctl(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_IPC_SHM_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -913,37 +920,37 @@ static void ebpf_create_systemd_shm_charts(int update_every) static void ebpf_send_systemd_shm_charts() { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_shm.get); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_shm.at); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_shm.dt); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART, ""); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_shm.ctl); } } - write_end_chart(); + ebpf_write_end_chart(); } /* @@ -956,21 +963,21 @@ static void ebpf_send_systemd_shm_charts() */ static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *values) { - write_begin_chart(type, NETDATA_SHMGET_CHART); + ebpf_write_begin_chart(type, NETDATA_SHMGET_CHART, ""); write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].name, (long long)values->get); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SHMAT_CHART); + ebpf_write_begin_chart(type, NETDATA_SHMAT_CHART, ""); write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].name, (long long)values->at); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SHMDT_CHART); + ebpf_write_begin_chart(type, NETDATA_SHMDT_CHART, ""); write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].name, (long long)values->dt); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SHMCTL_CHART); + ebpf_write_begin_chart(type, NETDATA_SHMCTL_CHART, ""); write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].name, (long long)values->ctl); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -1035,9 +1042,9 @@ static void shm_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -1097,41 +1104,74 @@ static void shm_collector(ebpf_module_t *em) void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr) { struct ebpf_target *root = ptr; - ebpf_create_charts_on_apps(NETDATA_SHMGET_CHART, - "Calls to syscall shmget(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20191, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); - - ebpf_create_charts_on_apps(NETDATA_SHMAT_CHART, - "Calls to syscall shmat(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20192, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); - - ebpf_create_charts_on_apps(NETDATA_SHMDT_CHART, - "Calls to syscall shmdt(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20193, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); - - ebpf_create_charts_on_apps(NETDATA_SHMCTL_CHART, - "Calls to syscall shmctl(2).", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_IPC_SHM_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20194, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmget_call", + "Calls to syscall shmget(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmget_call", + 20191, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmat_call", + "Calls to syscall shmat(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmat_call", + 20192, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmdt_call", + "Calls to syscall shmdt(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmdt_call", + 20193, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_shmctl_call", + "Calls to syscall shmctl(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_shmctl_call", + 20194, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } @@ -1222,7 +1262,7 @@ static int ebpf_shm_load_bpf(ebpf_module_t *em) if (ret) - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c index e4798b30c8a49c..bbb5dca1b486d1 100644 --- a/collectors/ebpf.plugin/ebpf_socket.c +++ b/collectors/ebpf.plugin/ebpf_socket.c @@ -5,9 +5,6 @@ #include "ebpf.h" #include "ebpf_socket.h" -// ---------------------------------------------------------------------------- -// ARAL vectors used to speed up processing - /***************************************************************** * * GLOBAL VARIABLES @@ -23,16 +20,7 @@ static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_cleanup_rbuf", "tcp_connect_v4", "tcp_connect_v6", "inet_csk_accept_tcp", "inet_csk_accept_udp" }; -static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_bandwidth", - .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED, - .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED, - .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, - .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED, -#ifdef LIBBPF_MAJOR_VERSION - .map_type = BPF_MAP_TYPE_PERCPU_HASH -#endif - }, - {.name = "tbl_global_sock", +static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_global_sock", .internal_input = NETDATA_SOCKET_COUNTER, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED, @@ -48,16 +36,7 @@ static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_bandwidth", .map_type = BPF_MAP_TYPE_PERCPU_HASH #endif }, - {.name = "tbl_conn_ipv4", - .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED, - .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED, - .type = NETDATA_EBPF_MAP_STATIC, - .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED, -#ifdef LIBBPF_MAJOR_VERSION - .map_type = BPF_MAP_TYPE_PERCPU_HASH -#endif - }, - {.name = "tbl_conn_ipv6", + {.name = "tbl_nd_socket", .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED, .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED, .type = NETDATA_EBPF_MAP_STATIC, @@ -93,14 +72,10 @@ static netdata_idx_t *socket_hash_values = NULL; static netdata_syscall_stat_t socket_aggregated_data[NETDATA_MAX_SOCKET_VECTOR]; static netdata_publish_syscall_t socket_publish_aggregated[NETDATA_MAX_SOCKET_VECTOR]; -static ebpf_bandwidth_t *bandwidth_vector = NULL; - -pthread_mutex_t nv_mutex; -netdata_vector_plot_t inbound_vectors = { .plot = NULL, .next = 0, .last = 0 }; -netdata_vector_plot_t outbound_vectors = { .plot = NULL, .next = 0, .last = 0 }; netdata_socket_t *socket_values; ebpf_network_viewer_port_list_t *listen_ports = NULL; +ebpf_addresses_t tcp_v6_connect_address = {.function = "tcp_v6_connect", .hash = 0, .addr = 0, .type = 0}; struct config socket_config = { .first_section = NULL, .last_section = NULL, @@ -108,28 +83,30 @@ struct config socket_config = { .first_section = NULL, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "tcp_retransmit_skb", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "tcp_cleanup_rbuf", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "tcp_close", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "udp_recvmsg", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "tcp_sendmsg", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "udp_sendmsg", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "tcp_v4_connect", .mode = EBPF_LOAD_TRAMPOLINE}, - {.name = "tcp_v6_connect", .mode = EBPF_LOAD_TRAMPOLINE}, +netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = EBPF_LOAD_PROBE}, + {.name = "tcp_retransmit_skb", .mode = EBPF_LOAD_PROBE}, + {.name = "tcp_cleanup_rbuf", .mode = EBPF_LOAD_PROBE}, + {.name = "tcp_close", .mode = EBPF_LOAD_PROBE}, + {.name = "udp_recvmsg", .mode = EBPF_LOAD_PROBE}, + {.name = "tcp_sendmsg", .mode = EBPF_LOAD_PROBE}, + {.name = "udp_sendmsg", .mode = EBPF_LOAD_PROBE}, + {.name = "tcp_v4_connect", .mode = EBPF_LOAD_PROBE}, + {.name = "tcp_v6_connect", .mode = EBPF_LOAD_PROBE}, {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; -struct netdata_static_thread socket_threads = { - .name = "EBPF SOCKET READ", - .config_section = NULL, - .config_name = NULL, - .env_name = NULL, - .enabled = 1, - .thread = NULL, - .init_routine = NULL, - .start_routine = NULL +struct netdata_static_thread ebpf_read_socket = { + .name = "EBPF_READ_SOCKET", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL }; +ARAL *aral_socket_table = NULL; + #ifdef NETDATA_DEV_MODE int socket_disable_priority; #endif @@ -145,7 +122,9 @@ int socket_disable_priority; static void ebpf_socket_disable_probes(struct socket_bpf *obj) { bpf_program__set_autoload(obj->progs.netdata_inet_csk_accept_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kretprobe, false); bpf_program__set_autoload(obj->progs.netdata_tcp_retransmit_skb_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_tcp_cleanup_rbuf_kprobe, false); @@ -156,7 +135,6 @@ static void ebpf_socket_disable_probes(struct socket_bpf *obj) bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kretprobe, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kprobe, false); - bpf_program__set_autoload(obj->progs.netdata_socket_release_task_kprobe, false); } /** @@ -168,8 +146,10 @@ static void ebpf_socket_disable_probes(struct socket_bpf *obj) */ static void ebpf_socket_disable_trampoline(struct socket_bpf *obj) { - bpf_program__set_autoload(obj->progs.netdata_inet_csk_accept_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_inet_csk_accept_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fentry, false); bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fentry, false); bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fexit, false); bpf_program__set_autoload(obj->progs.netdata_tcp_retransmit_skb_fentry, false); bpf_program__set_autoload(obj->progs.netdata_tcp_cleanup_rbuf_fentry, false); @@ -180,7 +160,6 @@ static void ebpf_socket_disable_trampoline(struct socket_bpf *obj) bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_fexit, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fentry, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fexit, false); - bpf_program__set_autoload(obj->progs.netdata_socket_release_task_fentry, false); } /** @@ -190,14 +169,22 @@ static void ebpf_socket_disable_trampoline(struct socket_bpf *obj) */ static void ebpf_set_trampoline_target(struct socket_bpf *obj) { - bpf_program__set_attach_target(obj->progs.netdata_inet_csk_accept_fentry, 0, + bpf_program__set_attach_target(obj->progs.netdata_inet_csk_accept_fexit, 0, socket_targets[NETDATA_FCNT_INET_CSK_ACCEPT].name); + bpf_program__set_attach_target(obj->progs.netdata_tcp_v4_connect_fentry, 0, + socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name); + bpf_program__set_attach_target(obj->progs.netdata_tcp_v4_connect_fexit, 0, socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name); - bpf_program__set_attach_target(obj->progs.netdata_tcp_v6_connect_fexit, 0, + if (tcp_v6_connect_address.type == 'T') { + bpf_program__set_attach_target( + obj->progs.netdata_tcp_v6_connect_fentry, 0, socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name); + + bpf_program__set_attach_target(obj->progs.netdata_tcp_v6_connect_fexit, 0, socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name); + } bpf_program__set_attach_target(obj->progs.netdata_tcp_retransmit_skb_fentry, 0, socket_targets[NETDATA_FCNT_TCP_RETRANSMIT].name); @@ -205,7 +192,8 @@ static void ebpf_set_trampoline_target(struct socket_bpf *obj) bpf_program__set_attach_target(obj->progs.netdata_tcp_cleanup_rbuf_fentry, 0, socket_targets[NETDATA_FCNT_CLEANUP_RBUF].name); - bpf_program__set_attach_target(obj->progs.netdata_tcp_close_fentry, 0, socket_targets[NETDATA_FCNT_TCP_CLOSE].name); + bpf_program__set_attach_target(obj->progs.netdata_tcp_close_fentry, 0, + socket_targets[NETDATA_FCNT_TCP_CLOSE].name); bpf_program__set_attach_target(obj->progs.netdata_udp_recvmsg_fentry, 0, socket_targets[NETDATA_FCNT_UDP_RECEVMSG].name); @@ -224,8 +212,6 @@ static void ebpf_set_trampoline_target(struct socket_bpf *obj) bpf_program__set_attach_target(obj->progs.netdata_udp_sendmsg_fexit, 0, socket_targets[NETDATA_FCNT_UDP_SENDMSG].name); - - bpf_program__set_attach_target(obj->progs.netdata_socket_release_task_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP); } @@ -241,9 +227,13 @@ static inline void ebpf_socket_disable_specific_trampoline(struct socket_bpf *ob { if (sel == MODE_RETURN) { bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fentry, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fentry, false); } else { bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fexit, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fexit, false); } } @@ -260,9 +250,13 @@ static inline void ebpf_socket_disable_specific_probe(struct socket_bpf *obj, ne { if (sel == MODE_RETURN) { bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kprobe, false); } else { bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kretprobe, false); bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kretprobe, false); } } @@ -275,26 +269,12 @@ static inline void ebpf_socket_disable_specific_probe(struct socket_bpf *obj, ne * @param obj is the main structure for bpf objects. * @param sel option selected by user. */ -static int ebpf_socket_attach_probes(struct socket_bpf *obj, netdata_run_mode_t sel) +static long ebpf_socket_attach_probes(struct socket_bpf *obj, netdata_run_mode_t sel) { obj->links.netdata_inet_csk_accept_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_inet_csk_accept_kretprobe, true, socket_targets[NETDATA_FCNT_INET_CSK_ACCEPT].name); - int ret = libbpf_get_error(obj->links.netdata_inet_csk_accept_kretprobe); - if (ret) - return -1; - - obj->links.netdata_tcp_v4_connect_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v4_connect_kretprobe, - true, - socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name); - ret = libbpf_get_error(obj->links.netdata_tcp_v4_connect_kretprobe); - if (ret) - return -1; - - obj->links.netdata_tcp_v6_connect_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v6_connect_kretprobe, - true, - socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name); - ret = libbpf_get_error(obj->links.netdata_tcp_v6_connect_kretprobe); + long ret = libbpf_get_error(obj->links.netdata_inet_csk_accept_kretprobe); if (ret) return -1; @@ -347,6 +327,21 @@ static int ebpf_socket_attach_probes(struct socket_bpf *obj, netdata_run_mode_t ret = libbpf_get_error(obj->links.netdata_udp_sendmsg_kretprobe); if (ret) return -1; + + obj->links.netdata_tcp_v4_connect_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v4_connect_kretprobe, + true, + socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name); + ret = libbpf_get_error(obj->links.netdata_tcp_v4_connect_kretprobe); + if (ret) + return -1; + + if (tcp_v6_connect_address.type == 'T') { + obj->links.netdata_tcp_v6_connect_kretprobe = bpf_program__attach_kprobe( + obj->progs.netdata_tcp_v6_connect_kretprobe, true, socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name); + ret = libbpf_get_error(obj->links.netdata_tcp_v6_connect_kretprobe); + if (ret) + return -1; + } } else { obj->links.netdata_tcp_sendmsg_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_sendmsg_kprobe, false, @@ -361,13 +356,23 @@ static int ebpf_socket_attach_probes(struct socket_bpf *obj, netdata_run_mode_t ret = libbpf_get_error(obj->links.netdata_udp_sendmsg_kprobe); if (ret) return -1; - } - obj->links.netdata_socket_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_socket_release_task_kprobe, - false, EBPF_COMMON_FNCT_CLEAN_UP); - ret = libbpf_get_error(obj->links.netdata_socket_release_task_kprobe); - if (ret) - return -1; + obj->links.netdata_tcp_v4_connect_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v4_connect_kprobe, + false, + socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name); + ret = libbpf_get_error(obj->links.netdata_tcp_v4_connect_kprobe); + if (ret) + return -1; + + if (tcp_v6_connect_address.type == 'T') { + obj->links.netdata_tcp_v6_connect_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v6_connect_kprobe, + false, + socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name); + ret = libbpf_get_error(obj->links.netdata_tcp_v6_connect_kprobe); + if (ret) + return -1; + } + } return 0; } @@ -381,11 +386,9 @@ static int ebpf_socket_attach_probes(struct socket_bpf *obj, netdata_run_mode_t */ static void ebpf_socket_set_hash_tables(struct socket_bpf *obj) { - socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd = bpf_map__fd(obj->maps.tbl_bandwidth); socket_maps[NETDATA_SOCKET_GLOBAL].map_fd = bpf_map__fd(obj->maps.tbl_global_sock); socket_maps[NETDATA_SOCKET_LPORTS].map_fd = bpf_map__fd(obj->maps.tbl_lports); - socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd = bpf_map__fd(obj->maps.tbl_conn_ipv4); - socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd = bpf_map__fd(obj->maps.tbl_conn_ipv6); + socket_maps[NETDATA_SOCKET_OPEN_SOCKET].map_fd = bpf_map__fd(obj->maps.tbl_nd_socket); socket_maps[NETDATA_SOCKET_TABLE_UDP].map_fd = bpf_map__fd(obj->maps.tbl_nv_udp); socket_maps[NETDATA_SOCKET_TABLE_CTRL].map_fd = bpf_map__fd(obj->maps.socket_ctrl); } @@ -400,28 +403,30 @@ static void ebpf_socket_set_hash_tables(struct socket_bpf *obj) */ static void ebpf_socket_adjust_map(struct socket_bpf *obj, ebpf_module_t *em) { - ebpf_update_map_size(obj->maps.tbl_bandwidth, &socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH], - em, bpf_map__name(obj->maps.tbl_bandwidth)); - - ebpf_update_map_size(obj->maps.tbl_conn_ipv4, &socket_maps[NETDATA_SOCKET_TABLE_IPV4], - em, bpf_map__name(obj->maps.tbl_conn_ipv4)); - - ebpf_update_map_size(obj->maps.tbl_conn_ipv6, &socket_maps[NETDATA_SOCKET_TABLE_IPV6], - em, bpf_map__name(obj->maps.tbl_conn_ipv6)); + ebpf_update_map_size(obj->maps.tbl_nd_socket, &socket_maps[NETDATA_SOCKET_OPEN_SOCKET], + em, bpf_map__name(obj->maps.tbl_nd_socket)); ebpf_update_map_size(obj->maps.tbl_nv_udp, &socket_maps[NETDATA_SOCKET_TABLE_UDP], em, bpf_map__name(obj->maps.tbl_nv_udp)); - - ebpf_update_map_type(obj->maps.tbl_bandwidth, &socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH]); - ebpf_update_map_type(obj->maps.tbl_conn_ipv4, &socket_maps[NETDATA_SOCKET_TABLE_IPV4]); - ebpf_update_map_type(obj->maps.tbl_conn_ipv6, &socket_maps[NETDATA_SOCKET_TABLE_IPV6]); + ebpf_update_map_type(obj->maps.tbl_nd_socket, &socket_maps[NETDATA_SOCKET_OPEN_SOCKET]); ebpf_update_map_type(obj->maps.tbl_nv_udp, &socket_maps[NETDATA_SOCKET_TABLE_UDP]); ebpf_update_map_type(obj->maps.socket_ctrl, &socket_maps[NETDATA_SOCKET_TABLE_CTRL]); ebpf_update_map_type(obj->maps.tbl_global_sock, &socket_maps[NETDATA_SOCKET_GLOBAL]); ebpf_update_map_type(obj->maps.tbl_lports, &socket_maps[NETDATA_SOCKET_LPORTS]); } +/** + * Disable TCP V6 connect + */ +static void ebpf_disable_tcp_v6_connect(struct socket_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fentry, false); +} + /** * Load and attach * @@ -450,6 +455,10 @@ static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_modul ebpf_socket_adjust_map(obj, em); + if (tcp_v6_connect_address.type != 'T') { + ebpf_disable_tcp_v6_connect(obj); + } + int ret = socket_bpf__load(obj); if (ret) { fprintf(stderr, "failed to load BPF object: %d\n", ret); @@ -459,7 +468,7 @@ static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_modul if (test == EBPF_LOAD_TRAMPOLINE) { ret = socket_bpf__attach(obj); } else { - ret = ebpf_socket_attach_probes(obj, em->mode); + ret = (int)ebpf_socket_attach_probes(obj, em->mode); } if (!ret) { @@ -479,182 +488,393 @@ static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_modul *****************************************************************/ /** - * Clean internal socket plot + * Socket Free * - * Clean all structures allocated with strdupz. + * Cleanup variables after child threads to stop * - * @param ptr the pointer with addresses to clean. + * @param ptr thread data. */ -static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr) +static void ebpf_socket_free(ebpf_module_t *em ) { - freez(ptr->dimension_recv); - freez(ptr->dimension_sent); - freez(ptr->resolved_name); - freez(ptr->dimension_retransmit); + pthread_mutex_lock(&ebpf_exit_cleanup); + em->enabled = NETDATA_THREAD_EBPF_STOPPED; + ebpf_update_stats(&plugin_statistics, em); + ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE); + pthread_mutex_unlock(&ebpf_exit_cleanup); } /** - * Clean socket plot + * Obsolete Systemd Socket Charts * - * Clean the allocated data for inbound and outbound vectors. -static void clean_allocated_socket_plot() -{ - if (!network_viewer_opt.enabled) - return; - - uint32_t i; - uint32_t end = inbound_vectors.last; - netdata_socket_plot_t *plot = inbound_vectors.plot; - for (i = 0; i < end; i++) { - clean_internal_socket_plot(&plot[i]); - } - - clean_internal_socket_plot(&plot[inbound_vectors.last]); - - end = outbound_vectors.last; - plot = outbound_vectors.plot; - for (i = 0; i < end; i++) { - clean_internal_socket_plot(&plot[i]); - } - clean_internal_socket_plot(&plot[outbound_vectors.last]); -} - */ - -/** - * Clean network ports allocated during initialization. + * Obsolete charts when systemd is enabled * - * @param ptr a pointer to the link list. -static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr) + * @param update_every value to overwrite the update frequency set by the server. + **/ +static void ebpf_obsolete_systemd_socket_charts(int update_every) { - if (unlikely(!ptr)) - return; - - while (ptr) { - ebpf_network_viewer_port_list_t *next = ptr->next; - freez(ptr->value); - freez(ptr); - ptr = next; + int order = 20080; + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_CONNECTION_TCP_V4, + "", + "Calls to tcp_v4_connection", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT, + order++, + update_every); + + if (tcp_v6_connect_address.type == 'T') { + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_CONNECTION_TCP_V6, + "", + "Calls to tcp_v6_connection", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT, + order++, + update_every); } + + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_BANDWIDTH_RECV, + "", + "Bytes received", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT, + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_BANDWIDTH_SENT, + "", + "Bytes sent", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT, + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, + "", + "Calls to tcp_cleanup_rbuf.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT, + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, + "", + "Calls to tcp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT, + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, + "", + "Calls to tcp_retransmit", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT, + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, + "", + "Calls to udp_sendmsg", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT, + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, + NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, + "", + "Calls to udp_recvmsg", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT, + order++, + update_every); } - */ +static void ebpf_obsolete_specific_socket_charts(char *type, int update_every); /** - * Clean service names + * Obsolete cgroup chart * - * Clean the allocated link list that stores names. + * Send obsolete for all charts created before to close. * - * @param names the link list. -static void clean_service_names(ebpf_network_viewer_dim_name_t *names) -{ - if (unlikely(!names)) - return; - - while (names) { - ebpf_network_viewer_dim_name_t *next = names->next; - freez(names->name); - freez(names); - names = next; - } -} + * @param em a pointer to `struct ebpf_module` */ +static inline void ebpf_obsolete_socket_cgroup_charts(ebpf_module_t *em) { + pthread_mutex_lock(&mutex_cgroup_shm); -/** - * Clean hostnames - * - * @param hostnames the hostnames to clean -static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames) -{ - if (unlikely(!hostnames)) - return; + ebpf_obsolete_systemd_socket_charts(em->update_every); + + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; - while (hostnames) { - ebpf_network_viewer_hostname_list_t *next = hostnames->next; - freez(hostnames->value); - simple_pattern_free(hostnames->value_pattern); - freez(hostnames); - hostnames = next; + ebpf_obsolete_specific_socket_charts(ect->name, em->update_every); } + pthread_mutex_unlock(&mutex_cgroup_shm); } - */ /** - * Clean port Structure + * Create apps charts * - * Clean the allocated list. + * Call ebpf_create_chart to create the charts on apps submenu. * - * @param clean the list that will be cleaned + * @param em a pointer to the structure with the default values. */ -void clean_port_structure(ebpf_network_viewer_port_list_t **clean) +void ebpf_socket_obsolete_apps_charts(struct ebpf_module *em) { - ebpf_network_viewer_port_list_t *move = *clean; - while (move) { - ebpf_network_viewer_port_list_t *next = move->next; - freez(move->value); - freez(move); + int order = 20130; + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<clean_name, + "_ebpf_call_tcp_v4_connection", + "Calls to tcp_v4_connection.", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_v4_connection", + order++, + update_every); -/** - * Clean IP structure - * - * Clean the allocated list. - * - * @param clean the list that will be cleaned - */ -static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean) -{ - ebpf_network_viewer_ip_list_t *move = *clean; - while (move) { - ebpf_network_viewer_ip_list_t *next = move->next; - freez(move->value); - freez(move); + if (tcp_v6_connect_address.type == 'T') { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_v6_connection", + "Calls to tcp_v6_connection.", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_v6_connection", + order++, + update_every); + } + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_sock_bytes_sent", + "Bytes sent.", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_sock_bytes_sent", + order++, + update_every); - move = next; + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_sock_bytes_received", + "Bytes received.", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_sock_bytes_received", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_sendmsg", + "Calls to tcp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_sendmsg", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_cleanup_rbuf", + "Calls to tcp_cleanup_rbuf.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_cleanup_rbuf", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_retransmit", + "Calls to tcp_retransmit.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_retransmit", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_udp_sendmsg", + "Calls to udp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_udp_sendmsg", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_udp_recvmsg", + "Calls to udp_recvmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_udp_recvmsg", + order++, + update_every); + + w->charts_created &= ~(1<update_every); + + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_TCP_OUTBOUND_CONNECTIONS, + "", + "TCP outbound connections.", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); + + + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_TCP_FUNCTION_COUNT, + "", + "Calls to internal functions", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); + + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_TCP_FUNCTION_BITS, + "", + "TCP bandwidth", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); - clean_network_ports(network_viewer_opt.included_port); - clean_network_ports(network_viewer_opt.excluded_port); - clean_service_names(network_viewer_opt.names); - clean_hostnames(network_viewer_opt.included_hostnames); - clean_hostnames(network_viewer_opt.excluded_hostnames); - */ + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_TCP_FUNCTION_ERROR, + "", + "TCP errors", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); + } + + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_TCP_RETRANSMIT, + "", + "Packages retransmitted", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); + + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_UDP_FUNCTION_COUNT, + "", + "UDP calls", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); + + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_UDP_FUNCTION_BITS, + "", + "UDP bandwidth", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); - pthread_mutex_destroy(&nv_mutex); + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY, + NETDATA_UDP_FUNCTION_ERROR, + "", + "UDP errors", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SOCKET_KERNEL_FUNCTIONS, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order++, + em->update_every); + } - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPED; - ebpf_update_stats(&plugin_statistics, em); - ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE); - pthread_mutex_unlock(&ebpf_exit_cleanup); + fflush(stdout); } - /** * Socket exit * @@ -665,23 +885,33 @@ static void ebpf_socket_free(ebpf_module_t *em ) static void ebpf_socket_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - pthread_mutex_lock(&nv_mutex); - if (socket_threads.thread) - netdata_thread_cancel(*socket_threads.thread); - pthread_mutex_unlock(&nv_mutex); - ebpf_socket_free(em); -} -/** - * Socket cleanup - * - * Clean up allocated addresses. - * - * @param ptr thread data. - */ -void ebpf_socket_cleanup(void *ptr) -{ - UNUSED(ptr); + if (ebpf_read_socket.thread) + netdata_thread_cancel(*ebpf_read_socket.thread); + + if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) { + pthread_mutex_lock(&lock); + + if (em->cgroup_charts) { + ebpf_obsolete_socket_cgroup_charts(em); + fflush(stdout); + } + + if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) { + ebpf_socket_obsolete_apps_charts(em); + fflush(stdout); + } + + ebpf_socket_obsolete_global_charts(em); + +#ifdef NETDATA_DEV_MODE + if (ebpf_aral_socket_pid) + ebpf_statistic_obsolete_aral_chart(em, socket_disable_priority); +#endif + pthread_mutex_unlock(&lock); + } + + ebpf_socket_free(em); } /***************************************************************** @@ -737,212 +967,44 @@ static void ebpf_update_global_publish( } /** - * Update Network Viewer plot data - * - * @param plot the structure where the data will be stored - * @param sock the last update from the socket - */ -static inline void update_nv_plot_data(netdata_plot_values_t *plot, netdata_socket_t *sock) -{ - if (sock->ct != plot->last_time) { - plot->last_time = sock->ct; - plot->plot_recv_packets = sock->recv_packets; - plot->plot_sent_packets = sock->sent_packets; - plot->plot_recv_bytes = sock->recv_bytes; - plot->plot_sent_bytes = sock->sent_bytes; - plot->plot_retransmit = sock->retransmit; - } - - sock->recv_packets = 0; - sock->sent_packets = 0; - sock->recv_bytes = 0; - sock->sent_bytes = 0; - sock->retransmit = 0; -} - -/** - * Calculate Network Viewer Plot + * Send Global Inbound connection * - * Do math with collected values before to plot data. + * Send number of connections read per protocol. */ -static inline void calculate_nv_plot() +static void ebpf_socket_send_global_inbound_conn() { - pthread_mutex_lock(&nv_mutex); - uint32_t i; - uint32_t end = inbound_vectors.next; - for (i = 0; i < end; i++) { - update_nv_plot_data(&inbound_vectors.plot[i].plot, &inbound_vectors.plot[i].sock); - } - inbound_vectors.max_plot = end; - - // The 'Other' dimension is always calculated for the chart to have at least one dimension - update_nv_plot_data(&inbound_vectors.plot[inbound_vectors.last].plot, - &inbound_vectors.plot[inbound_vectors.last].sock); - - end = outbound_vectors.next; - for (i = 0; i < end; i++) { - update_nv_plot_data(&outbound_vectors.plot[i].plot, &outbound_vectors.plot[i].sock); - } - outbound_vectors.max_plot = end; - - /* - // The 'Other' dimension is always calculated for the chart to have at least one dimension - update_nv_plot_data(&outbound_vectors.plot[outbound_vectors.last].plot, - &outbound_vectors.plot[outbound_vectors.last].sock); - */ - pthread_mutex_unlock(&nv_mutex); -} + uint64_t udp_conn = 0; + uint64_t tcp_conn = 0; + ebpf_network_viewer_port_list_t *move = listen_ports; + while (move) { + if (move->protocol == IPPROTO_TCP) + tcp_conn += move->connections; + else + udp_conn += move->connections; -/** - * Network viewer send bytes - * - * @param ptr the structure with values to plot - * @param chart the chart name. - */ -static inline void ebpf_socket_nv_send_bytes(netdata_vector_plot_t *ptr, char *chart) -{ - uint32_t i; - uint32_t end = ptr->last_plot; - netdata_socket_plot_t *w = ptr->plot; - collected_number value; - - write_begin_chart(NETDATA_EBPF_FAMILY, chart); - for (i = 0; i < end; i++) { - value = ((collected_number) w[i].plot.plot_sent_bytes); - write_chart_dimension(w[i].dimension_sent, value); - value = (collected_number) w[i].plot.plot_recv_bytes; - write_chart_dimension(w[i].dimension_recv, value); - } - - i = ptr->last; - value = ((collected_number) w[i].plot.plot_sent_bytes); - write_chart_dimension(w[i].dimension_sent, value); - value = (collected_number) w[i].plot.plot_recv_bytes; - write_chart_dimension(w[i].dimension_recv, value); - write_end_chart(); -} + move = move->next; + } -/** - * Network Viewer Send packets - * - * @param ptr the structure with values to plot - * @param chart the chart name. - */ -static inline void ebpf_socket_nv_send_packets(netdata_vector_plot_t *ptr, char *chart) -{ - uint32_t i; - uint32_t end = ptr->last_plot; - netdata_socket_plot_t *w = ptr->plot; - collected_number value; - - write_begin_chart(NETDATA_EBPF_FAMILY, chart); - for (i = 0; i < end; i++) { - value = ((collected_number)w[i].plot.plot_sent_packets); - write_chart_dimension(w[i].dimension_sent, value); - value = (collected_number) w[i].plot.plot_recv_packets; - write_chart_dimension(w[i].dimension_recv, value); - } - - i = ptr->last; - value = ((collected_number)w[i].plot.plot_sent_packets); - write_chart_dimension(w[i].dimension_sent, value); - value = (collected_number)w[i].plot.plot_recv_packets; - write_chart_dimension(w[i].dimension_recv, value); - write_end_chart(); + ebpf_write_begin_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_INBOUND_CONNECTIONS, ""); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_INCOMING_CONNECTION_TCP].name, (long long) tcp_conn); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_INCOMING_CONNECTION_UDP].name, (long long) udp_conn); + ebpf_write_end_chart(); } /** - * Network Viewer Send Retransmit + * Send data to Netdata calling auxiliary functions. * - * @param ptr the structure with values to plot - * @param chart the chart name. + * @param em the structure with thread information */ -static inline void ebpf_socket_nv_send_retransmit(netdata_vector_plot_t *ptr, char *chart) +static void ebpf_socket_send_data(ebpf_module_t *em) { - uint32_t i; - uint32_t end = ptr->last_plot; - netdata_socket_plot_t *w = ptr->plot; - collected_number value; - - write_begin_chart(NETDATA_EBPF_FAMILY, chart); - for (i = 0; i < end; i++) { - value = (collected_number) w[i].plot.plot_retransmit; - write_chart_dimension(w[i].dimension_retransmit, value); - } + netdata_publish_vfs_common_t common_tcp; + netdata_publish_vfs_common_t common_udp; + ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data); - i = ptr->last; - value = (collected_number)w[i].plot.plot_retransmit; - write_chart_dimension(w[i].dimension_retransmit, value); - write_end_chart(); -} - -/** - * Send network viewer data - * - * @param ptr the pointer to plot data - */ -static void ebpf_socket_send_nv_data(netdata_vector_plot_t *ptr) -{ - if (!ptr->flags) - return; - - if (ptr == (netdata_vector_plot_t *)&outbound_vectors) { - ebpf_socket_nv_send_bytes(ptr, NETDATA_NV_OUTBOUND_BYTES); - fflush(stdout); - - ebpf_socket_nv_send_packets(ptr, NETDATA_NV_OUTBOUND_PACKETS); - fflush(stdout); - - ebpf_socket_nv_send_retransmit(ptr, NETDATA_NV_OUTBOUND_RETRANSMIT); - fflush(stdout); - } else { - ebpf_socket_nv_send_bytes(ptr, NETDATA_NV_INBOUND_BYTES); - fflush(stdout); - - ebpf_socket_nv_send_packets(ptr, NETDATA_NV_INBOUND_PACKETS); - fflush(stdout); - } -} - -/** - * Send Global Inbound connection - * - * Send number of connections read per protocol. - */ -static void ebpf_socket_send_global_inbound_conn() -{ - uint64_t udp_conn = 0; - uint64_t tcp_conn = 0; - ebpf_network_viewer_port_list_t *move = listen_ports; - while (move) { - if (move->protocol == IPPROTO_TCP) - tcp_conn += move->connections; - else - udp_conn += move->connections; - - move = move->next; - } - - write_begin_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_INBOUND_CONNECTIONS); - write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_INCOMING_CONNECTION_TCP].name, (long long) tcp_conn); - write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_INCOMING_CONNECTION_UDP].name, (long long) udp_conn); - write_end_chart(); -} - -/** - * Send data to Netdata calling auxiliary functions. - * - * @param em the structure with thread information - */ -static void ebpf_socket_send_data(ebpf_module_t *em) -{ - netdata_publish_vfs_common_t common_tcp; - netdata_publish_vfs_common_t common_udp; - ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data); - - ebpf_socket_send_global_inbound_conn(); - write_count_chart(NETDATA_TCP_OUTBOUND_CONNECTIONS, NETDATA_EBPF_IP_FAMILY, - &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4], 2); + ebpf_socket_send_global_inbound_conn(); + write_count_chart(NETDATA_TCP_OUTBOUND_CONNECTIONS, NETDATA_EBPF_IP_FAMILY, + &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4], 2); // We read bytes from function arguments, but bandwidth is given in bits, // so we need to multiply by 8 to convert for the final value. @@ -967,31 +1029,6 @@ static void ebpf_socket_send_data(ebpf_module_t *em) } } -/** - * Sum values for pid - * - * @param root the structure with all available PIDs - * - * @param offset the address that we are reading - * - * @return it returns the sum of all PIDs - */ -long long ebpf_socket_sum_values_for_pids(struct ebpf_pid_on_target *root, size_t offset) -{ - long long ret = 0; - while (root) { - int32_t pid = root->pid; - ebpf_socket_publish_apps_t *w = socket_bandwidth_curr[pid]; - if (w) { - ret += get_value_from_structure((char *)w, offset); - } - - root = root->next; - } - - return ret; -} - /** * Send data to Netdata calling auxiliary functions. * @@ -1003,100 +1040,74 @@ void ebpf_socket_send_apps_data(ebpf_module_t *em, struct ebpf_target *root) UNUSED(em); struct ebpf_target *w; - collected_number value; + // This algorithm is improved in https://github.com/netdata/netdata/pull/16030 + collected_number values[9]; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V4); for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - call_tcp_v4_connection)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + if (unlikely(!(w->charts_created & (1<next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - call_tcp_v6_connection)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + struct ebpf_pid_on_target *move = w->root_pid; + // Simplify algorithm, but others will appear only in https://github.com/netdata/netdata/pull/16030 + memset(values, 0, sizeof(values)); + while (move) { + int32_t pid = move->pid; + ebpf_socket_publish_apps_t *ws = socket_bandwidth_curr[pid]; + if (ws) { + values[0] += (collected_number) ws->call_tcp_v4_connection; + values[1] += (collected_number) ws->call_tcp_v6_connection; + values[2] += (collected_number) ws->bytes_sent; + values[3] += (collected_number) ws->bytes_received; + values[4] += (collected_number) ws->call_tcp_sent; + values[5] += (collected_number) ws->call_tcp_received; + values[6] += (collected_number) ws->retransmit; + values[7] += (collected_number) ws->call_udp_sent; + values[8] += (collected_number) ws->call_udp_received; + } - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - bytes_sent)); - // We multiply by 0.008, because we read bytes, but we display bits - write_chart_dimension(w->name, ((value)*8)/1000); + move = move->next; } - } - write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - bytes_received)); - // We multiply by 0.008, because we read bytes, but we display bits - write_chart_dimension(w->name, ((value)*8)/1000); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_v4_connection"); + write_chart_dimension("connections", values[0]); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - call_tcp_sent)); - write_chart_dimension(w->name, value); + if (tcp_v6_connect_address.type == 'T') { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_call_tcp_v6_connection"); + write_chart_dimension("calls", values[1]); + ebpf_write_end_chart(); } - } - write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - call_tcp_received)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_sock_bytes_sent"); + // We multiply by 0.008, because we read bytes, but we display bits + write_chart_dimension("bandwidth", ((values[2])*8)/1000); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - retransmit)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_sock_bytes_received"); + // We multiply by 0.008, because we read bytes, but we display bits + write_chart_dimension("bandwidth", ((values[3])*8)/1000); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - call_udp_sent)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_sendmsg"); + write_chart_dimension("calls", values[4]); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t, - call_udp_received)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_cleanup_rbuf"); + write_chart_dimension("calls", values[5]); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_retransmit"); + write_chart_dimension("calls", values[6]); + ebpf_write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_udp_sendmsg"); + write_chart_dimension("calls", values[7]); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_udp_recvmsg"); + write_chart_dimension("calls", values[8]); + ebpf_write_end_chart(); + } } /***************************************************************** @@ -1112,7 +1123,7 @@ void ebpf_socket_send_apps_data(ebpf_module_t *em, struct ebpf_target *root) * * @param em a pointer to the structure with the default values. */ -static void ebpf_create_global_charts(ebpf_module_t *em) +static void ebpf_socket_create_global_charts(ebpf_module_t *em) { int order = 21070; ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, @@ -1238,217 +1249,153 @@ static void ebpf_create_global_charts(ebpf_module_t *em) void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) { struct ebpf_target *root = ptr; - int order = 20080; - ebpf_create_charts_on_apps(NETDATA_NET_APPS_CONNECTION_TCP_V4, - "Calls to tcp_v4_connection", EBPF_COMMON_DIMENSION_CONNECTIONS, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_CONNECTION_TCP_V6, - "Calls to tcp_v6_connection", EBPF_COMMON_DIMENSION_CONNECTIONS, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_SENT, - "Bytes sent", EBPF_COMMON_DIMENSION_BITS, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_RECV, - "bytes received", EBPF_COMMON_DIMENSION_BITS, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, - "Calls for tcp_sendmsg", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, - "Calls for tcp_cleanup_rbuf", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, - "Calls for tcp_retransmit", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, - "Calls for udp_sendmsg", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, - "Calls for udp_recvmsg", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - - em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; -} - -/** - * Create network viewer chart - * - * Create common charts. - * - * @param id chart id - * @param title chart title - * @param units units label - * @param family group name used to attach the chart on dashboard - * @param order chart order - * @param update_every value to overwrite the update frequency set by the server. - * @param ptr plot structure with values. - */ -static void ebpf_socket_create_nv_chart(char *id, char *title, char *units, - char *family, int order, int update_every, netdata_vector_plot_t *ptr) -{ - ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY, - id, - title, - units, - family, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - order, - update_every, - NETDATA_EBPF_MODULE_NAME_SOCKET); - - uint32_t i; - uint32_t end = ptr->last_plot; - netdata_socket_plot_t *w = ptr->plot; - for (i = 0; i < end; i++) { - fprintf(stdout, "DIMENSION %s '' incremental -1 1\n", w[i].dimension_sent); - fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[i].dimension_recv); - } - - end = ptr->last; - fprintf(stdout, "DIMENSION %s '' incremental -1 1\n", w[end].dimension_sent); - fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[end].dimension_recv); -} - -/** - * Create network viewer retransmit - * - * Create a specific chart. - * - * @param id the chart id - * @param title the chart title - * @param units the units label - * @param family the group name used to attach the chart on dashboard - * @param order the chart order - * @param update_every value to overwrite the update frequency set by the server. - * @param ptr the plot structure with values. - */ -static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units, - char *family, int order, int update_every, netdata_vector_plot_t *ptr) -{ - ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY, - id, - title, - units, - family, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - order, - update_every, - NETDATA_EBPF_MODULE_NAME_SOCKET); - - uint32_t i; - uint32_t end = ptr->last_plot; - netdata_socket_plot_t *w = ptr->plot; - for (i = 0; i < end; i++) { - fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[i].dimension_retransmit); - } - - end = ptr->last; - fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[end].dimension_retransmit); -} - -/** - * Create Network Viewer charts - * - * Recreate the charts when new sockets are created. - * - * @param ptr a pointer for inbound or outbound vectors. - * @param update_every value to overwrite the update frequency set by the server. - */ -static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr, int update_every) -{ - // We do not have new sockets, so we do not need move forward - if (ptr->max_plot == ptr->last_plot) - return; - - ptr->last_plot = ptr->max_plot; - - if (ptr == (netdata_vector_plot_t *)&outbound_vectors) { - ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_BYTES, - "Outbound connections (bytes).", EBPF_COMMON_DIMENSION_BYTES, - NETDATA_NETWORK_CONNECTIONS_GROUP, - 21080, - update_every, ptr); - - ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_PACKETS, - "Outbound connections (packets)", - EBPF_COMMON_DIMENSION_PACKETS, - NETDATA_NETWORK_CONNECTIONS_GROUP, - 21082, - update_every, ptr); - - ebpf_socket_create_nv_retransmit(NETDATA_NV_OUTBOUND_RETRANSMIT, - "Retransmitted packets", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_NETWORK_CONNECTIONS_GROUP, - 21083, - update_every, ptr); - } else { - ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_BYTES, - "Inbound connections (bytes)", EBPF_COMMON_DIMENSION_BYTES, - NETDATA_NETWORK_CONNECTIONS_GROUP, - 21084, - update_every, ptr); + struct ebpf_target *w; + int order = 20130; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; - ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_PACKETS, - "Inbound connections (packets)", - EBPF_COMMON_DIMENSION_PACKETS, - NETDATA_NETWORK_CONNECTIONS_GROUP, - 21085, - update_every, ptr); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_v4_connection", + "Calls to tcp_v4_connection.", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_v4_connection", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION connections '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (tcp_v6_connect_address.type == 'T') { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_v6_connection", + "Calls to tcp_v6_connection.", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_v6_connection", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION connections '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_sock_bytes_sent", + "Bytes sent.", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_sock_bytes_sent", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION bandwidth '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_sock_bytes_received", + "Bytes received.", + EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_sock_bytes_received", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION bandwidth '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_sendmsg", + "Calls to tcp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_sendmsg", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_cleanup_rbuf", + "Calls to tcp_cleanup_rbuf.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_cleanup_rbuf", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_tcp_retransmit", + "Calls to tcp_retransmit.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_tcp_retransmit", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_udp_sendmsg", + "Calls to udp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_udp_sendmsg", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_udp_recvmsg", + "Calls to udp_recvmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_udp_recvmsg", + order, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + w->charts_created |= 1<flags |= NETWORK_VIEWER_CHARTS_CREATED; + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /***************************************************************** @@ -1517,7 +1464,7 @@ static int ebpf_is_specific_ip_inside_range(union netdata_ip_t *cmp, int family) * * @return It returns 1 when cmp is inside and 0 otherwise. */ -static int is_port_inside_range(uint16_t cmp) +static int ebpf_is_port_inside_range(uint16_t cmp) { // We do not have restrictions for ports. if (!network_viewer_opt.excluded_port && !network_viewer_opt.included_port) @@ -1525,7 +1472,6 @@ static int is_port_inside_range(uint16_t cmp) // Test if port is excluded ebpf_network_viewer_port_list_t *move = network_viewer_opt.excluded_port; - cmp = htons(cmp); while (move) { if (move->cmp_first <= cmp && cmp <= move->cmp_last) return 0; @@ -1583,493 +1529,322 @@ int hostname_matches_pattern(char *cmp) * Compare destination addresses and destination ports to define next steps * * @param key the socket read from kernel ring - * @param family the family used to compare IPs (AF_INET and AF_INET6) + * @param data the socket data used also used to refuse some sockets. * * @return It returns 1 if this socket is inside the ranges and 0 otherwise. */ -int is_socket_allowed(netdata_socket_idx_t *key, int family) -{ - if (!is_port_inside_range(key->dport)) - return 0; - - return ebpf_is_specific_ip_inside_range(&key->daddr, family); -} - -/** - * Compare sockets - * - * Compare destination address and destination port. - * We do not compare source port, because it is random. - * We also do not compare source address, because inbound and outbound connections are stored in separated AVL trees. - * - * @param a pointer to netdata_socket_plot - * @param b pointer to netdata_socket_plot - * - * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b. - */ -static int ebpf_compare_sockets(void *a, void *b) +int ebpf_is_socket_allowed(netdata_socket_idx_t *key, netdata_socket_t *data) { - struct netdata_socket_plot *val1 = a; - struct netdata_socket_plot *val2 = b; - int cmp = 0; - - // We do not need to compare val2 family, because data inside hash table is always from the same family - if (val1->family == AF_INET) { //IPV4 - if (network_viewer_opt.included_port || network_viewer_opt.excluded_port) - cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t)); + int ret = 0; + // If family is not AF_UNSPEC and it is different of specified + if (network_viewer_opt.family && network_viewer_opt.family != data->family) + goto endsocketallowed; - if (!cmp) { - cmp = memcmp(&val1->index.daddr.addr32[0], &val2->index.daddr.addr32[0], sizeof(uint32_t)); - } - } else { - if (network_viewer_opt.included_port || network_viewer_opt.excluded_port) - cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t)); + if (!ebpf_is_port_inside_range(key->dport)) + goto endsocketallowed; - if (!cmp) { - cmp = memcmp(&val1->index.daddr.addr32, &val2->index.daddr.addr32, 4*sizeof(uint32_t)); - } - } + ret = ebpf_is_specific_ip_inside_range(&key->daddr, data->family); - return cmp; +endsocketallowed: + return ret; } /** - * Build dimension name - * - * Fill dimension name vector with values given - * - * @param dimname the output vector - * @param hostname the hostname for the socket. - * @param service_name the service used to connect. - * @param proto the protocol used in this connection - * @param family is this IPV4(AF_INET) or IPV6(AF_INET6) + * Hash accumulator * - * @return it returns the size of the data copied on success and -1 otherwise. + * @param values the values used to calculate the data. + * @param family the connection family + * @param end the values size. */ -static inline int ebpf_build_outbound_dimension_name(char *dimname, char *hostname, char *service_name, - char *proto, int family) +static void ebpf_hash_socket_accumulator(netdata_socket_t *values, int end) { - if (network_viewer_opt.included_port || network_viewer_opt.excluded_port) - return snprintf(dimname, CONFIG_MAX_NAME - 7, (family == AF_INET)?"%s:%s:%s_":"%s:%s:[%s]_", - service_name, proto, hostname); - - return snprintf(dimname, CONFIG_MAX_NAME - 7, (family == AF_INET)?"%s:%s_":"%s:[%s]_", - proto, hostname); -} + int i; + uint8_t protocol = values[0].protocol; + uint64_t ct = values[0].current_timestamp; + uint64_t ft = values[0].first_timestamp; + uint16_t family = AF_UNSPEC; + uint32_t external_origin = values[0].external_origin; + for (i = 1; i < end; i++) { + netdata_socket_t *w = &values[i]; -/** - * Fill inbound dimension name - * - * Mount the dimension name with the input given - * - * @param dimname the output vector - * @param service_name the service used to connect. - * @param proto the protocol used in this connection - * - * @return it returns the size of the data copied on success and -1 otherwise. - */ -static inline int build_inbound_dimension_name(char *dimname, char *service_name, char *proto) -{ - return snprintf(dimname, CONFIG_MAX_NAME - 7, "%s:%s_", service_name, - proto); -} + values[0].tcp.call_tcp_sent += w->tcp.call_tcp_sent; + values[0].tcp.call_tcp_received += w->tcp.call_tcp_received; + values[0].tcp.tcp_bytes_received += w->tcp.tcp_bytes_received; + values[0].tcp.tcp_bytes_sent += w->tcp.tcp_bytes_sent; + values[0].tcp.close += w->tcp.close; + values[0].tcp.retransmit += w->tcp.retransmit; + values[0].tcp.ipv4_connect += w->tcp.ipv4_connect; + values[0].tcp.ipv6_connect += w->tcp.ipv6_connect; -/** - * Fill Resolved Name - * - * Fill the resolved name structure with the value given. - * The hostname is the largest value possible, if it is necessary to cut some value, it must be cut. - * - * @param ptr the output vector - * @param hostname the hostname resolved or IP. - * @param length the length for the hostname. - * @param service_name the service name associated to the connection - * @param is_outbound the is this an outbound connection - */ -static inline void fill_resolved_name(netdata_socket_plot_t *ptr, char *hostname, size_t length, - char *service_name, int is_outbound) -{ - if (length < NETDATA_MAX_NETWORK_COMBINED_LENGTH) - ptr->resolved_name = strdupz(hostname); - else { - length = NETDATA_MAX_NETWORK_COMBINED_LENGTH; - ptr->resolved_name = mallocz( NETDATA_MAX_NETWORK_COMBINED_LENGTH + 1); - memcpy(ptr->resolved_name, hostname, length); - ptr->resolved_name[length] = '\0'; - } - - char dimname[CONFIG_MAX_NAME]; - int size; - char *protocol; - if (ptr->sock.protocol == IPPROTO_UDP) { - protocol = "UDP"; - } else if (ptr->sock.protocol == IPPROTO_TCP) { - protocol = "TCP"; - } else { - protocol = "ALL"; - } + if (!protocol) + protocol = w->protocol; - if (is_outbound) - size = ebpf_build_outbound_dimension_name(dimname, hostname, service_name, protocol, ptr->family); - else - size = build_inbound_dimension_name(dimname,service_name, protocol); + if (family == AF_UNSPEC) + family = w->family; - if (size > 0) { - strcpy(&dimname[size], "sent"); - dimname[size + 4] = '\0'; - ptr->dimension_sent = strdupz(dimname); + if (w->current_timestamp > ct) + ct = w->current_timestamp; - strcpy(&dimname[size], "recv"); - ptr->dimension_recv = strdupz(dimname); + if (!ft) + ft = w->first_timestamp; - dimname[size - 1] = '\0'; - ptr->dimension_retransmit = strdupz(dimname); + if (w->external_origin) + external_origin = NETDATA_EBPF_SRC_IP_ORIGIN_EXTERNAL; } + + values[0].protocol = (!protocol)?IPPROTO_TCP:protocol; + values[0].current_timestamp = ct; + values[0].first_timestamp = ft; + values[0].external_origin = external_origin; } /** - * Mount dimension names + * Translate socket * - * Fill the vector names after to resolve the addresses + * Convert socket address to string * - * @param ptr a pointer to the structure where the values are stored. - * @param is_outbound is a outbound ptr value? - * - * @return It returns 1 if the name is valid and 0 otherwise. + * @param dst structure where we will store + * @param key the socket address */ -int fill_names(netdata_socket_plot_t *ptr, int is_outbound) +static void ebpf_socket_translate(netdata_socket_plus_t *dst, netdata_socket_idx_t *key) { - char hostname[NI_MAXHOST], service_name[NI_MAXSERV]; - if (ptr->resolved) - return 1; - + uint32_t resolve = network_viewer_opt.service_resolution_enabled; + char service[NI_MAXSERV]; int ret; - static int resolve_name = -1; - static int resolve_service = -1; - if (resolve_name == -1) - resolve_name = network_viewer_opt.hostname_resolution_enabled; - - if (resolve_service == -1) - resolve_service = network_viewer_opt.service_resolution_enabled; - - netdata_socket_idx_t *idx = &ptr->index; - - char *errname = { "Not resolved" }; - // Resolve Name - if (ptr->family == AF_INET) { //IPV4 - struct sockaddr_in myaddr; - memset(&myaddr, 0 , sizeof(myaddr)); - - myaddr.sin_family = ptr->family; - if (is_outbound) { - myaddr.sin_port = idx->dport; - myaddr.sin_addr.s_addr = idx->daddr.addr32[0]; - } else { - myaddr.sin_port = idx->sport; - myaddr.sin_addr.s_addr = idx->saddr.addr32[0]; - } - - ret = (!resolve_name)?-1:getnameinfo((struct sockaddr *)&myaddr, sizeof(myaddr), hostname, - sizeof(hostname), service_name, sizeof(service_name), NI_NAMEREQD); - - if (!ret && !resolve_service) { - snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr.sin_port)); + if (dst->data.family == AF_INET) { + struct sockaddr_in ipv4_addr = { }; + ipv4_addr.sin_port = 0; + ipv4_addr.sin_addr.s_addr = key->saddr.addr32[0]; + ipv4_addr.sin_family = AF_INET; + if (resolve) { + // NI_NAMEREQD : It is too slow + ret = getnameinfo((struct sockaddr *) &ipv4_addr, sizeof(ipv4_addr), dst->socket_string.src_ip, + INET6_ADDRSTRLEN, service, NI_MAXSERV, NI_NUMERICHOST | NI_NUMERICSERV); + if (ret) { + collector_error("Cannot resolve name: %s", gai_strerror(ret)); + resolve = 0; + } else { + ipv4_addr.sin_addr.s_addr = key->daddr.addr32[0]; + + ipv4_addr.sin_port = key->dport; + ret = getnameinfo((struct sockaddr *) &ipv4_addr, sizeof(ipv4_addr), dst->socket_string.dst_ip, + INET6_ADDRSTRLEN, dst->socket_string.dst_port, NI_MAXSERV, + NI_NUMERICHOST); + if (ret) { + collector_error("Cannot resolve name: %s", gai_strerror(ret)); + resolve = 0; + } + } } - if (ret) { - // I cannot resolve the name, I will use the IP - if (!inet_ntop(AF_INET, &myaddr.sin_addr.s_addr, hostname, NI_MAXHOST)) { - strncpy(hostname, errname, 13); - } + // When resolution fail, we should use addresses + if (!resolve) { + ipv4_addr.sin_addr.s_addr = key->saddr.addr32[0]; - snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr.sin_port)); - ret = 1; - } - } else { // IPV6 - struct sockaddr_in6 myaddr6; - memset(&myaddr6, 0 , sizeof(myaddr6)); - - myaddr6.sin6_family = AF_INET6; - if (is_outbound) { - myaddr6.sin6_port = idx->dport; - memcpy(myaddr6.sin6_addr.s6_addr, idx->daddr.addr8, sizeof(union netdata_ip_t)); - } else { - myaddr6.sin6_port = idx->sport; - memcpy(myaddr6.sin6_addr.s6_addr, idx->saddr.addr8, sizeof(union netdata_ip_t)); - } + if(!inet_ntop(AF_INET, &ipv4_addr.sin_addr, dst->socket_string.src_ip, INET6_ADDRSTRLEN)) + netdata_log_info("Cannot convert IP %u .", ipv4_addr.sin_addr.s_addr); - ret = (!resolve_name)?-1:getnameinfo((struct sockaddr *)&myaddr6, sizeof(myaddr6), hostname, - sizeof(hostname), service_name, sizeof(service_name), NI_NAMEREQD); + ipv4_addr.sin_addr.s_addr = key->daddr.addr32[0]; - if (!ret && !resolve_service) { - snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr6.sin6_port)); + if(!inet_ntop(AF_INET, &ipv4_addr.sin_addr, dst->socket_string.dst_ip, INET6_ADDRSTRLEN)) + netdata_log_info("Cannot convert IP %u .", ipv4_addr.sin_addr.s_addr); + snprintfz(dst->socket_string.dst_port, NI_MAXSERV, "%u", ntohs(key->dport)); } - - if (ret) { - // I cannot resolve the name, I will use the IP - if (!inet_ntop(AF_INET6, myaddr6.sin6_addr.s6_addr, hostname, NI_MAXHOST)) { - strncpy(hostname, errname, 13); + } else { + struct sockaddr_in6 ipv6_addr = { }; + memcpy(&ipv6_addr.sin6_addr, key->saddr.addr8, sizeof(key->saddr.addr8)); + ipv6_addr.sin6_family = AF_INET6; + if (resolve) { + ret = getnameinfo((struct sockaddr *) &ipv6_addr, sizeof(ipv6_addr), dst->socket_string.src_ip, + INET6_ADDRSTRLEN, service, NI_MAXSERV, NI_NUMERICHOST | NI_NUMERICSERV); + if (ret) { + collector_error("Cannot resolve name: %s", gai_strerror(ret)); + resolve = 0; + } else { + memcpy(&ipv6_addr.sin6_addr, key->daddr.addr8, sizeof(key->daddr.addr8)); + ret = getnameinfo((struct sockaddr *) &ipv6_addr, sizeof(ipv6_addr), dst->socket_string.dst_ip, + INET6_ADDRSTRLEN, dst->socket_string.dst_port, NI_MAXSERV, + NI_NUMERICHOST); + if (ret) { + collector_error("Cannot resolve name: %s", gai_strerror(ret)); + resolve = 0; + } } + } - snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr6.sin6_port)); + if (!resolve) { + memcpy(&ipv6_addr.sin6_addr, key->saddr.addr8, sizeof(key->saddr.addr8)); + if(!inet_ntop(AF_INET6, &ipv6_addr.sin6_addr, dst->socket_string.src_ip, INET6_ADDRSTRLEN)) + netdata_log_info("Cannot convert IPv6 Address."); - ret = 1; + memcpy(&ipv6_addr.sin6_addr, key->daddr.addr8, sizeof(key->daddr.addr8)); + if(!inet_ntop(AF_INET6, &ipv6_addr.sin6_addr, dst->socket_string.dst_ip, INET6_ADDRSTRLEN)) + netdata_log_info("Cannot convert IPv6 Address."); + snprintfz(dst->socket_string.dst_port, NI_MAXSERV, "%u", ntohs(key->dport)); } } + dst->pid = key->pid; - fill_resolved_name(ptr, hostname, - strlen(hostname) + strlen(service_name)+ NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH, - service_name, is_outbound); - - if (resolve_name && !ret) - ret = hostname_matches_pattern(hostname); - - ptr->resolved++; - - return ret; + if (!strcmp(dst->socket_string.dst_port, "0")) + snprintfz(dst->socket_string.dst_port, NI_MAXSERV, "%u", ntohs(key->dport)); +#ifdef NETDATA_DEV_MODE + collector_info("New socket: { ORIGIN IP: %s, ORIGIN : %u, DST IP:%s, DST PORT: %s, PID: %u, PROTO: %d, FAMILY: %d}", + dst->socket_string.src_ip, + dst->data.external_origin, + dst->socket_string.dst_ip, + dst->socket_string.dst_port, + dst->pid, + dst->data.protocol, + dst->data.family + ); +#endif } /** - * Fill last Network Viewer Dimension + * Update array vectors * - * Fill the unique dimension that is always plotted. + * Read data from hash table and update vectors. * - * @param ptr the pointer for the last dimension - * @param is_outbound is this an inbound structure? + * @param em the structure with configuration */ -static void fill_last_nv_dimension(netdata_socket_plot_t *ptr, int is_outbound) +static void ebpf_update_array_vectors(ebpf_module_t *em) { - char hostname[NI_MAXHOST], service_name[NI_MAXSERV]; - char *other = { "other" }; - // We are also copying the NULL bytes to avoid warnings in new compilers - strncpy(hostname, other, 6); - strncpy(service_name, other, 6); - - ptr->family = AF_INET; - ptr->sock.protocol = 255; - ptr->flags = (!is_outbound)?NETDATA_INBOUND_DIRECTION:NETDATA_OUTBOUND_DIRECTION; + netdata_thread_disable_cancelability(); + netdata_socket_idx_t key = {}; + netdata_socket_idx_t next_key = {}; - fill_resolved_name(ptr, hostname, 10 + NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH, service_name, is_outbound); + int maps_per_core = em->maps_per_core; + int fd = em->maps[NETDATA_SOCKET_OPEN_SOCKET].map_fd; -#ifdef NETDATA_INTERNAL_CHECKS - netdata_log_info("Last %s dimension added: ID = %u, IP = OTHER, NAME = %s, DIM1 = %s, DIM2 = %s, DIM3 = %s", - (is_outbound)?"outbound":"inbound", network_viewer_opt.max_dim - 1, ptr->resolved_name, - ptr->dimension_recv, ptr->dimension_sent, ptr->dimension_retransmit); -#endif -} + netdata_socket_t *values = socket_values; + size_t length = sizeof(netdata_socket_t); + int test, end; + if (maps_per_core) { + length *= ebpf_nprocs; + end = ebpf_nprocs; + } else + end = 1; -/** - * Update Socket Data - * - * Update the socket information with last collected data - * - * @param sock - * @param lvalues - */ -static inline void update_socket_data(netdata_socket_t *sock, netdata_socket_t *lvalues) -{ - sock->recv_packets = lvalues->recv_packets; - sock->sent_packets = lvalues->sent_packets; - sock->recv_bytes = lvalues->recv_bytes; - sock->sent_bytes = lvalues->sent_bytes; - sock->retransmit = lvalues->retransmit; - sock->ct = lvalues->ct; -} + // We need to reset the values when we are working on kernel 4.15 or newer, because kernel does not create + // values for specific processor unless it is used to store data. As result of this behavior one the next socket + // can have values from the previous one. + memset(values, 0, length); + time_t update_time = time(NULL); + while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { + test = bpf_map_lookup_elem(fd, &key, values); + if (test < 0) { + goto end_socket_loop; + } -/** - * Store socket inside avl - * - * Store the socket values inside the avl tree. - * - * @param out the structure with information used to plot charts. - * @param lvalues Values read from socket ring. - * @param lindex the index information, the real socket. - * @param family the family associated to the socket - * @param flags the connection flags - */ -static void store_socket_inside_avl(netdata_vector_plot_t *out, netdata_socket_t *lvalues, - netdata_socket_idx_t *lindex, int family, uint32_t flags) -{ - netdata_socket_plot_t test, *ret ; + if (key.pid > (uint32_t)pid_max) { + goto end_socket_loop; + } - memcpy(&test.index, lindex, sizeof(netdata_socket_idx_t)); - test.flags = flags; + ebpf_hash_socket_accumulator(values, end); + ebpf_socket_fill_publish_apps(key.pid, values); - ret = (netdata_socket_plot_t *) avl_search_lock(&out->tree, (avl_t *)&test); - if (ret) { - if (lvalues->ct != ret->plot.last_time) { - update_socket_data(&ret->sock, lvalues); + // We update UDP to show info with charts, but we do not show them with functions + /* + if (key.dport == NETDATA_EBPF_UDP_PORT && values[0].protocol == IPPROTO_UDP) { + bpf_map_delete_elem(fd, &key); + goto end_socket_loop; } - } else { - uint32_t curr = out->next; - uint32_t last = out->last; + */ - netdata_socket_plot_t *w = &out->plot[curr]; + // Discard non-bind sockets + if (!key.daddr.addr64[0] && !key.daddr.addr64[1] && !key.saddr.addr64[0] && !key.saddr.addr64[1]) { + bpf_map_delete_elem(fd, &key); + goto end_socket_loop; + } - int resolved; - if (curr == last) { - if (lvalues->ct != w->plot.last_time) { - update_socket_data(&w->sock, lvalues); - } - return; - } else { - memcpy(&w->sock, lvalues, sizeof(netdata_socket_t)); - memcpy(&w->index, lindex, sizeof(netdata_socket_idx_t)); - w->family = family; + // When socket is not allowed, we do not append it to table, but we are still keeping it to accumulate data. + if (!ebpf_is_socket_allowed(&key, values)) { + goto end_socket_loop; + } - resolved = fill_names(w, out != (netdata_vector_plot_t *)&inbound_vectors); + // Get PID structure + rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock); + PPvoid_t judy_array = &ebpf_judy_pid.index.JudyLArray; + netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(judy_array, key.pid); + if (!pid_ptr) { + goto end_socket_loop; } - if (!resolved) { - freez(w->resolved_name); - freez(w->dimension_sent); - freez(w->dimension_recv); - freez(w->dimension_retransmit); + // Get Socket structure + rw_spinlock_write_lock(&pid_ptr->socket_stats.rw_spinlock); + netdata_socket_plus_t **socket_pptr = (netdata_socket_plus_t **)ebpf_judy_insert_unsafe( + &pid_ptr->socket_stats.JudyLArray, values[0].first_timestamp); + netdata_socket_plus_t *socket_ptr = *socket_pptr; + bool translate = false; + if (likely(*socket_pptr == NULL)) { + *socket_pptr = aral_mallocz(aral_socket_table); - memset(w, 0, sizeof(netdata_socket_plot_t)); + socket_ptr = *socket_pptr; - return; + translate = true; + } + uint64_t prev_period = socket_ptr->data.current_timestamp; + memcpy(&socket_ptr->data, &values[0], sizeof(netdata_socket_t)); + if (translate) + ebpf_socket_translate(socket_ptr, &key); + else { // Check socket was updated + if (prev_period) { + if (values[0].current_timestamp > prev_period) // Socket updated + socket_ptr->last_update = update_time; + else if ((update_time - socket_ptr->last_update) > em->update_every) { + // Socket was not updated since last read + JudyLDel(&pid_ptr->socket_stats.JudyLArray, values[0].first_timestamp, PJE0); + aral_freez(aral_socket_table, socket_ptr); + } + } else // First time + socket_ptr->last_update = update_time; } - w->flags = flags; - netdata_socket_plot_t *check ; - check = (netdata_socket_plot_t *) avl_insert_lock(&out->tree, (avl_t *)w); - if (check != w) - netdata_log_error("Internal error, cannot insert the AVL tree."); + rw_spinlock_write_unlock(&pid_ptr->socket_stats.rw_spinlock); + rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock); -#ifdef NETDATA_INTERNAL_CHECKS - char iptext[INET6_ADDRSTRLEN]; - if (inet_ntop(family, &w->index.daddr.addr8, iptext, sizeof(iptext))) - netdata_log_info("New %s dimension added: ID = %u, IP = %s, NAME = %s, DIM1 = %s, DIM2 = %s, DIM3 = %s", - (out == &inbound_vectors)?"inbound":"outbound", curr, iptext, w->resolved_name, - w->dimension_recv, w->dimension_sent, w->dimension_retransmit); -#endif - curr++; - if (curr > last) - curr = last; - out->next = curr; +end_socket_loop: + memset(values, 0, length); + memcpy(&key, &next_key, sizeof(key)); } + netdata_thread_enable_cancelability(); } /** - * Compare Vector to store + * Socket thread * - * Compare input values with local address to select table to store. + * Thread used to generate socket charts. * - * @param direction store inbound and outbound direction. - * @param cmp index read from hash table. - * @param proto the protocol read. + * @param ptr a pointer to `struct ebpf_module` * - * @return It returns the structure with address to compare. + * @return It always return NULL */ -netdata_vector_plot_t * select_vector_to_store(uint32_t *direction, netdata_socket_idx_t *cmp, uint8_t proto) +void *ebpf_read_socket_thread(void *ptr) { - if (!listen_ports) { - *direction = NETDATA_OUTBOUND_DIRECTION; - return &outbound_vectors; - } - - ebpf_network_viewer_port_list_t *move_ports = listen_ports; - while (move_ports) { - if (move_ports->protocol == proto && move_ports->first == cmp->sport) { - *direction = NETDATA_INBOUND_DIRECTION; - return &inbound_vectors; - } - - move_ports = move_ports->next; - } - - *direction = NETDATA_OUTBOUND_DIRECTION; - return &outbound_vectors; -} - -/** - * Hash accumulator - * - * @param values the values used to calculate the data. - * @param key the key to store data. - * @param family the connection family - * @param end the values size. - */ -static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key, int family, int end) -{ - if (!network_viewer_opt.enabled || !is_socket_allowed(key, family)) - return; - - uint64_t bsent = 0, brecv = 0, psent = 0, precv = 0; - uint16_t retransmit = 0; - int i; - uint8_t protocol = values[0].protocol; - uint64_t ct = values[0].ct; - for (i = 1; i < end; i++) { - netdata_socket_t *w = &values[i]; - - precv += w->recv_packets; - psent += w->sent_packets; - brecv += w->recv_bytes; - bsent += w->sent_bytes; - retransmit += w->retransmit; - - if (!protocol) - protocol = w->protocol; - - if (w->ct != ct) - ct = w->ct; - } - - values[0].recv_packets += precv; - values[0].sent_packets += psent; - values[0].recv_bytes += brecv; - values[0].sent_bytes += bsent; - values[0].retransmit += retransmit; - values[0].protocol = (!protocol)?IPPROTO_TCP:protocol; - values[0].ct = ct; + heartbeat_t hb; + heartbeat_init(&hb); - uint32_t dir; - netdata_vector_plot_t *table = select_vector_to_store(&dir, key, protocol); - store_socket_inside_avl(table, &values[0], key, family, dir); -} + ebpf_module_t *em = (ebpf_module_t *)ptr; -/** - * Read socket hash table - * - * Read data from hash tables created on kernel ring. - * - * @param fd the hash table with data. - * @param family the family associated to the hash table - * @param maps_per_core do I need to read all cores? - * - * @return it returns 0 on success and -1 otherwise. - */ -static void ebpf_read_socket_hash_table(int fd, int family, int maps_per_core) -{ - netdata_socket_idx_t key = {}; - netdata_socket_idx_t next_key = {}; + ebpf_update_array_vectors(em); - netdata_socket_t *values = socket_values; - size_t length = sizeof(netdata_socket_t); - int test, end; - if (maps_per_core) { - length *= ebpf_nprocs; - end = ebpf_nprocs; - } else - end = 1; + int update_every = em->update_every; + int counter = update_every - 1; - while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { - // We need to reset the values when we are working on kernel 4.15 or newer, because kernel does not create - // values for specific processor unless it is used to store data. As result of this behavior one the next socket - // can have values from the previous one. - memset(values, 0, length); - test = bpf_map_lookup_elem(fd, &key, values); - if (test < 0) { - key = next_key; + uint32_t running_time = 0; + uint32_t lifetime = em->lifetime; + usec_t period = update_every * USEC_PER_SEC; + while (!ebpf_plugin_exit && running_time < lifetime) { + (void)heartbeat_next(&hb, period); + if (ebpf_plugin_exit || ++counter != update_every) continue; - } - hash_accumulator(values, &key, family, end); + ebpf_update_array_vectors(em); - key = next_key; + counter = 0; } + + return NULL; } /** @@ -2164,44 +1939,6 @@ static void read_listen_table() } } -/** - * Socket read hash - * - * This is the thread callback. - * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket. - * - * @param ptr It is a NULL value for this thread. - * - * @return It always returns NULL. - */ -void *ebpf_socket_read_hash(void *ptr) -{ - netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr); - ebpf_module_t *em = (ebpf_module_t *)ptr; - - heartbeat_t hb; - heartbeat_init(&hb); - int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd; - int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd; - int maps_per_core = em->maps_per_core; - // This thread is cancelled from another thread - uint32_t running_time; - uint32_t lifetime = em->lifetime; - for (running_time = 0;!ebpf_exit_plugin && running_time < lifetime; running_time++) { - (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin) - break; - - pthread_mutex_lock(&nv_mutex); - ebpf_read_socket_hash_table(fd_ipv4, AF_INET, maps_per_core); - ebpf_read_socket_hash_table(fd_ipv6, AF_INET6, maps_per_core); - pthread_mutex_unlock(&nv_mutex); - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - /** * Read the hash table and store data to allocated vectors. * @@ -2251,9 +1988,9 @@ static void ebpf_socket_read_hash_global_tables(netdata_idx_t *stats, int maps_p * Fill publish apps when necessary. * * @param current_pid the PID that I am updating - * @param eb the structure with data read from memory. + * @param ns the structure with data read from memory. */ -void ebpf_socket_fill_publish_apps(uint32_t current_pid, ebpf_bandwidth_t *eb) +void ebpf_socket_fill_publish_apps(uint32_t current_pid, netdata_socket_t *ns) { ebpf_socket_publish_apps_t *curr = socket_bandwidth_curr[current_pid]; if (!curr) { @@ -2261,98 +1998,33 @@ void ebpf_socket_fill_publish_apps(uint32_t current_pid, ebpf_bandwidth_t *eb) socket_bandwidth_curr[current_pid] = curr; } - curr->bytes_sent = eb->bytes_sent; - curr->bytes_received = eb->bytes_received; - curr->call_tcp_sent = eb->call_tcp_sent; - curr->call_tcp_received = eb->call_tcp_received; - curr->retransmit = eb->retransmit; - curr->call_udp_sent = eb->call_udp_sent; - curr->call_udp_received = eb->call_udp_received; - curr->call_close = eb->close; - curr->call_tcp_v4_connection = eb->tcp_v4_connection; - curr->call_tcp_v6_connection = eb->tcp_v6_connection; -} + curr->bytes_sent += ns->tcp.tcp_bytes_sent; + curr->bytes_received += ns->tcp.tcp_bytes_received; + curr->call_tcp_sent += ns->tcp.call_tcp_sent; + curr->call_tcp_received += ns->tcp.call_tcp_received; + curr->retransmit += ns->tcp.retransmit; + curr->call_close += ns->tcp.close; + curr->call_tcp_v4_connection += ns->tcp.ipv4_connect; + curr->call_tcp_v6_connection += ns->tcp.ipv6_connect; -/** - * Bandwidth accumulator. - * - * @param out the vector with the values to sum - */ -void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out, int maps_per_core) -{ - int i, end = (maps_per_core) ? ebpf_nprocs : 1; - ebpf_bandwidth_t *total = &out[0]; - for (i = 1; i < end; i++) { - ebpf_bandwidth_t *move = &out[i]; - total->bytes_sent += move->bytes_sent; - total->bytes_received += move->bytes_received; - total->call_tcp_sent += move->call_tcp_sent; - total->call_tcp_received += move->call_tcp_received; - total->retransmit += move->retransmit; - total->call_udp_sent += move->call_udp_sent; - total->call_udp_received += move->call_udp_received; - total->close += move->close; - total->tcp_v4_connection += move->tcp_v4_connection; - total->tcp_v6_connection += move->tcp_v6_connection; - } -} - -/** - * Update the apps data reading information from the hash table - * - * @param maps_per_core do I need to read all cores? - */ -static void ebpf_socket_update_apps_data(int maps_per_core) -{ - int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd; - ebpf_bandwidth_t *eb = bandwidth_vector; - uint32_t key; - struct ebpf_pid_stat *pids = ebpf_root_of_pids; - size_t length = sizeof(ebpf_bandwidth_t); - if (maps_per_core) - length *= ebpf_nprocs; - while (pids) { - key = pids->pid; - - if (bpf_map_lookup_elem(fd, &key, eb)) { - pids = pids->next; - continue; - } - - ebpf_socket_bandwidth_accumulator(eb, maps_per_core); - - ebpf_socket_fill_publish_apps(key, eb); - - memset(eb, 0, length); - - pids = pids->next; - } + curr->call_udp_sent += ns->udp.call_udp_sent; + curr->call_udp_received += ns->udp.call_udp_received; } /** * Update cgroup * * Update cgroup data based in PIDs. - * - * @param maps_per_core do I need to read all cores? */ -static void ebpf_update_socket_cgroup(int maps_per_core) +static void ebpf_update_socket_cgroup() { ebpf_cgroup_target_t *ect ; - ebpf_bandwidth_t *eb = bandwidth_vector; - int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd; - - size_t length = sizeof(ebpf_bandwidth_t); - if (maps_per_core) - length *= ebpf_nprocs; - pthread_mutex_lock(&mutex_cgroup_shm); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { struct pid_on_target2 *pids; for (pids = ect->pids; pids; pids = pids->next) { int pid = pids->pid; - ebpf_bandwidth_t *out = &pids->socket; ebpf_socket_publish_apps_t *publish = &ect->publish_socket; if (likely(socket_bandwidth_curr) && socket_bandwidth_curr[pid]) { ebpf_socket_publish_apps_t *in = socket_bandwidth_curr[pid]; @@ -2367,25 +2039,6 @@ static void ebpf_update_socket_cgroup(int maps_per_core) publish->call_close = in->call_close; publish->call_tcp_v4_connection = in->call_tcp_v4_connection; publish->call_tcp_v6_connection = in->call_tcp_v6_connection; - } else { - if (!bpf_map_lookup_elem(fd, &pid, eb)) { - ebpf_socket_bandwidth_accumulator(eb, maps_per_core); - - memcpy(out, eb, sizeof(ebpf_bandwidth_t)); - - publish->bytes_sent = out->bytes_sent; - publish->bytes_received = out->bytes_received; - publish->call_tcp_sent = out->call_tcp_sent; - publish->call_tcp_received = out->call_tcp_received; - publish->retransmit = out->retransmit; - publish->call_udp_sent = out->call_udp_sent; - publish->call_udp_received = out->call_udp_received; - publish->call_close = out->close; - publish->call_tcp_v4_connection = out->tcp_v4_connection; - publish->call_tcp_v6_connection = out->tcp_v6_connection; - - memset(eb, 0, length); - } } } } @@ -2406,18 +2059,18 @@ static void ebpf_socket_sum_cgroup_pids(ebpf_socket_publish_apps_t *socket, stru memset(&accumulator, 0, sizeof(accumulator)); while (pids) { - ebpf_bandwidth_t *w = &pids->socket; - - accumulator.bytes_received += w->bytes_received; - accumulator.bytes_sent += w->bytes_sent; - accumulator.call_tcp_received += w->call_tcp_received; - accumulator.call_tcp_sent += w->call_tcp_sent; - accumulator.retransmit += w->retransmit; - accumulator.call_udp_received += w->call_udp_received; - accumulator.call_udp_sent += w->call_udp_sent; - accumulator.call_close += w->close; - accumulator.call_tcp_v4_connection += w->tcp_v4_connection; - accumulator.call_tcp_v6_connection += w->tcp_v6_connection; + netdata_socket_t *w = &pids->socket; + + accumulator.bytes_received += w->tcp.tcp_bytes_received; + accumulator.bytes_sent += w->tcp.tcp_bytes_sent; + accumulator.call_tcp_received += w->tcp.call_tcp_received; + accumulator.call_tcp_sent += w->tcp.call_tcp_sent; + accumulator.retransmit += w->tcp.retransmit; + accumulator.call_close += w->tcp.close; + accumulator.call_tcp_v4_connection += w->tcp.ipv4_connect; + accumulator.call_tcp_v6_connection += w->tcp.ipv6_connect; + accumulator.call_udp_received += w->udp.call_udp_received; + accumulator.call_udp_sent += w->udp.call_udp_sent; pids = pids->next; } @@ -2457,15 +2110,21 @@ static void ebpf_create_specific_socket_charts(char *type, int update_every) &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4], 1, update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - ebpf_create_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V6, - "Calls to tcp_v6_connection", - EBPF_COMMON_DIMENSION_CONNECTIONS, NETDATA_CGROUP_NET_GROUP, - NETDATA_CGROUP_TCP_V6_CONN_CONTEXT, - NETDATA_EBPF_CHART_TYPE_LINE, - NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, - ebpf_create_global_dimension, - &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V6], 1, - update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); + if (tcp_v6_connect_address.type == 'T') { + ebpf_create_chart(type, + NETDATA_NET_APPS_CONNECTION_TCP_V6, + "Calls to tcp_v6_connection", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_TCP_V6_CONN_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, + ebpf_create_global_dimension, + &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V6], + 1, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); + } ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV, "Bytes received", @@ -2549,47 +2208,55 @@ static void ebpf_create_specific_socket_charts(char *type, int update_every) static void ebpf_obsolete_specific_socket_charts(char *type, int update_every) { int order_basis = 5300; - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_CONNECTION_TCP_V4, "Calls to tcp_v4_connection", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_CONNECTION_TCP_V4, "", "Calls to tcp_v4_connection", EBPF_COMMON_DIMENSION_CONNECTIONS, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_CONNECTION_TCP_V6,"Calls to tcp_v6_connection", - EBPF_COMMON_DIMENSION_CONNECTIONS, NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT, - NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); + if (tcp_v6_connect_address.type == 'T') { + ebpf_write_chart_obsolete(type, + NETDATA_NET_APPS_CONNECTION_TCP_V6, + "", + "Calls to tcp_v6_connection", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, + update_every); + } - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_RECV, "Bytes received", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_RECV, "", "Bytes received", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_SENT,"Bytes sent", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_SENT, "","Bytes sent", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "Calls to tcp_cleanup_rbuf.", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "", "Calls to tcp_cleanup_rbuf.", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "Calls to tcp_sendmsg.", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "", "Calls to tcp_sendmsg.", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "Calls to tcp_retransmit.", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "", "Calls to tcp_retransmit.", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "Calls to udp_sendmsg", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "", "Calls to udp_sendmsg", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); - ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "Calls to udp_recvmsg", + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "", "Calls to udp_recvmsg", EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every); @@ -2605,50 +2272,52 @@ static void ebpf_obsolete_specific_socket_charts(char *type, int update_every) */ static void ebpf_send_specific_socket_data(char *type, ebpf_socket_publish_apps_t *values) { - write_begin_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V4); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V4, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4].name, (long long) values->call_tcp_v4_connection); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V6); - write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V6].name, - (long long) values->call_tcp_v6_connection); - write_end_chart(); + if (tcp_v6_connect_address.type == 'T') { + ebpf_write_begin_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V6, ""); + write_chart_dimension( + socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V6].name, (long long)values->call_tcp_v6_connection); + ebpf_write_end_chart(); + } - write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name, (long long) values->bytes_sent); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name, (long long) values->bytes_received); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name, (long long) values->call_tcp_sent); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name, (long long) values->call_tcp_received); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT].name, (long long) values->retransmit); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_SENDMSG].name, (long long) values->call_udp_sent); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS); + ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, ""); write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF].name, (long long) values->call_udp_received); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -2670,14 +2339,18 @@ static void ebpf_create_systemd_socket_charts(int update_every) NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, update_every); - ebpf_create_charts_on_systemd(NETDATA_NET_APPS_CONNECTION_TCP_V6, - "Calls to tcp_v6_connection", EBPF_COMMON_DIMENSION_CONNECTIONS, - NETDATA_APPS_NET_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - order++, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, - update_every); + if (tcp_v6_connect_address.type == 'T') { + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_CONNECTION_TCP_V6, + "Calls to tcp_v6_connection", + EBPF_COMMON_DIMENSION_CONNECTIONS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + order++, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT, + NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); + } ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_RECV, "Bytes received", EBPF_COMMON_DIMENSION_BITS, @@ -2756,77 +2429,79 @@ static void ebpf_create_systemd_socket_charts(int update_every) static void ebpf_send_systemd_socket_charts() { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V4); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V4, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v4_connection); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V6); - for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { - if (unlikely(ect->systemd) && unlikely(ect->updated)) { - write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v6_connection); + if (tcp_v6_connect_address.type == 'T') { + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V6, ""); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v6_connection); + } } + ebpf_write_end_chart(); } - write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_sent); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_received); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_sent); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_received); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.retransmit); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_sent); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_received); } } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -2902,15 +2577,6 @@ static void socket_collector(ebpf_module_t *em) { heartbeat_t hb; heartbeat_init(&hb); - uint32_t network_connection = network_viewer_opt.enabled; - - if (network_connection) { - socket_threads.thread = mallocz(sizeof(netdata_thread_t)); - socket_threads.start_routine = ebpf_socket_read_hash; - - netdata_thread_create(socket_threads.thread, socket_threads.name, - NETDATA_THREAD_OPTION_DEFAULT, ebpf_socket_read_hash, em); - } int cgroups = em->cgroup_charts; if (cgroups) @@ -2924,9 +2590,9 @@ static void socket_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -2937,14 +2603,8 @@ static void socket_collector(ebpf_module_t *em) } pthread_mutex_lock(&collect_data_mutex); - if (socket_apps_enabled) - ebpf_socket_update_apps_data(maps_per_core); - if (cgroups) - ebpf_update_socket_cgroup(maps_per_core); - - if (network_connection) - calculate_nv_plot(); + ebpf_update_socket_cgroup(); pthread_mutex_lock(&lock); if (socket_global_enabled) @@ -2963,20 +2623,6 @@ static void socket_collector(ebpf_module_t *em) fflush(stdout); - if (network_connection) { - // We are calling fflush many times, because when we have a lot of dimensions - // we began to have not expected outputs and Netdata closed the plugin. - pthread_mutex_lock(&nv_mutex); - ebpf_socket_create_nv_charts(&inbound_vectors, update_every); - fflush(stdout); - ebpf_socket_send_nv_data(&inbound_vectors); - - ebpf_socket_create_nv_charts(&outbound_vectors, update_every); - fflush(stdout); - ebpf_socket_send_nv_data(&outbound_vectors); - pthread_mutex_unlock(&nv_mutex); - - } pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); @@ -2998,42 +2644,26 @@ static void socket_collector(ebpf_module_t *em) *****************************************************************/ /** - * Allocate vectors used with this thread. + * Initialize vectors used with this thread. + * * We are not testing the return, because callocz does this and shutdown the software * case it was not possible to allocate. - * - * @param apps is apps enabled? */ -static void ebpf_socket_allocate_global_vectors(int apps) +static void ebpf_socket_initialize_global_vectors() { memset(socket_aggregated_data, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_syscall_stat_t)); memset(socket_publish_aggregated, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_publish_syscall_t)); socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t)); - if (apps) { - ebpf_socket_aral_init(); - socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *)); - bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t)); - } + ebpf_socket_aral_init(); + socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *)); + + aral_socket_table = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_TABLE_NAME, + sizeof(netdata_socket_plus_t)); socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t)); - if (network_viewer_opt.enabled) { - inbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t)); - outbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t)); - } -} -/** - * Initialize Inbound and Outbound - * - * Initialize the common outbound and inbound sockets. - */ -static void initialize_inbound_outbound() -{ - inbound_vectors.last = network_viewer_opt.max_dim - 1; - outbound_vectors.last = inbound_vectors.last; - fill_last_nv_dimension(&inbound_vectors.plot[inbound_vectors.last], 0); - fill_last_nv_dimension(&outbound_vectors.plot[outbound_vectors.last], 1); + ebpf_load_addresses(&tcp_v6_connect_address, -1); } /***************************************************************** @@ -3042,793 +2672,6 @@ static void initialize_inbound_outbound() * *****************************************************************/ -/** - * Fill Port list - * - * @param out a pointer to the link list. - * @param in the structure that will be linked. - */ -static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_network_viewer_port_list_t *in) -{ - if (likely(*out)) { - ebpf_network_viewer_port_list_t *move = *out, *store = *out; - uint16_t first = ntohs(in->first); - uint16_t last = ntohs(in->last); - while (move) { - uint16_t cmp_first = ntohs(move->first); - uint16_t cmp_last = ntohs(move->last); - if (cmp_first <= first && first <= cmp_last && - cmp_first <= last && last <= cmp_last ) { - netdata_log_info("The range/value (%u, %u) is inside the range/value (%u, %u) already inserted, it will be ignored.", - first, last, cmp_first, cmp_last); - freez(in->value); - freez(in); - return; - } else if (first <= cmp_first && cmp_first <= last && - first <= cmp_last && cmp_last <= last) { - netdata_log_info("The range (%u, %u) is bigger than previous range (%u, %u) already inserted, the previous will be ignored.", - first, last, cmp_first, cmp_last); - freez(move->value); - move->value = in->value; - move->first = in->first; - move->last = in->last; - freez(in); - return; - } - - store = move; - move = move->next; - } - - store->next = in; - } else { - *out = in; - } - -#ifdef NETDATA_INTERNAL_CHECKS - netdata_log_info("Adding values %s( %u, %u) to %s port list used on network viewer", - in->value, ntohs(in->first), ntohs(in->last), - (*out == network_viewer_opt.included_port)?"included":"excluded"); -#endif -} - -/** - * Parse Service List - * - * @param out a pointer to store the link list - * @param service the service used to create the structure that will be linked. - */ -static void parse_service_list(void **out, char *service) -{ - ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; - struct servent *serv = getservbyname((const char *)service, "tcp"); - if (!serv) - serv = getservbyname((const char *)service, "udp"); - - if (!serv) { - netdata_log_info("Cannot resolv the service '%s' with protocols TCP and UDP, it will be ignored", service); - return; - } - - ebpf_network_viewer_port_list_t *w = callocz(1, sizeof(ebpf_network_viewer_port_list_t)); - w->value = strdupz(service); - w->hash = simple_hash(service); - - w->first = w->last = (uint16_t)serv->s_port; - - fill_port_list(list, w); -} - -/** - * Netmask - * - * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h) - * - * @param prefix create the netmask based in the CIDR value. - * - * @return - */ -static inline in_addr_t netmask(int prefix) { - - if (prefix == 0) - return (~((in_addr_t) - 1)); - else - return (in_addr_t)(~((1 << (32 - prefix)) - 1)); - -} - -/** - * Broadcast - * - * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h) - * - * @param addr is the ip address - * @param prefix is the CIDR value. - * - * @return It returns the last address of the range - */ -static inline in_addr_t broadcast(in_addr_t addr, int prefix) -{ - return (addr | ~netmask(prefix)); -} - -/** - * Network - * - * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h) - * - * @param addr is the ip address - * @param prefix is the CIDR value. - * - * @return It returns the first address of the range. - */ -static inline in_addr_t ipv4_network(in_addr_t addr, int prefix) -{ - return (addr & netmask(prefix)); -} - -/** - * IP to network long - * - * @param dst the vector to store the result - * @param ip the source ip given by our users. - * @param domain the ip domain (IPV4 or IPV6) - * @param source the original string - * - * @return it returns 0 on success and -1 otherwise. - */ -static inline int ip2nl(uint8_t *dst, char *ip, int domain, char *source) -{ - if (inet_pton(domain, ip, dst) <= 0) { - netdata_log_error("The address specified (%s) is invalid ", source); - return -1; - } - - return 0; -} - -/** - * Get IPV6 Last Address - * - * @param out the address to store the last address. - * @param in the address used to do the math. - * @param prefix number of bits used to calculate the address - */ -static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix) -{ - uint64_t mask,tmp; - uint64_t ret[2]; - memcpy(ret, in->addr32, sizeof(union netdata_ip_t)); - - if (prefix == 128) { - memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t)); - return; - } else if (!prefix) { - ret[0] = ret[1] = 0xFFFFFFFFFFFFFFFF; - memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); - return; - } else if (prefix <= 64) { - ret[1] = 0xFFFFFFFFFFFFFFFFULL; - - tmp = be64toh(ret[0]); - if (prefix > 0) { - mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix); - tmp |= ~mask; - } - ret[0] = htobe64(tmp); - } else { - mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix); - tmp = be64toh(ret[1]); - tmp |= ~mask; - ret[1] = htobe64(tmp); - } - - memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); -} - -/** - * Calculate ipv6 first address - * - * @param out the address to store the first address. - * @param in the address used to do the math. - * @param prefix number of bits used to calculate the address - */ -static void get_ipv6_first_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix) -{ - uint64_t mask,tmp; - uint64_t ret[2]; - - memcpy(ret, in->addr32, sizeof(union netdata_ip_t)); - - if (prefix == 128) { - memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t)); - return; - } else if (!prefix) { - ret[0] = ret[1] = 0; - memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); - return; - } else if (prefix <= 64) { - ret[1] = 0ULL; - - tmp = be64toh(ret[0]); - if (prefix > 0) { - mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix); - tmp &= mask; - } - ret[0] = htobe64(tmp); - } else { - mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix); - tmp = be64toh(ret[1]); - tmp &= mask; - ret[1] = htobe64(tmp); - } - - memcpy(out->addr32, ret, sizeof(union netdata_ip_t)); -} - -/** - * Is ip inside the range - * - * Check if the ip is inside a IP range - * - * @param rfirst the first ip address of the range - * @param rlast the last ip address of the range - * @param cmpfirst the first ip to compare - * @param cmplast the last ip to compare - * @param family the IP family - * - * @return It returns 1 if the IP is inside the range and 0 otherwise - */ -static int ebpf_is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast, - union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family) -{ - if (family == AF_INET) { - if ((rfirst->addr32[0] <= cmpfirst->addr32[0]) && (rlast->addr32[0] >= cmplast->addr32[0])) - return 1; - } else { - if (memcmp(rfirst->addr8, cmpfirst->addr8, sizeof(union netdata_ip_t)) <= 0 && - memcmp(rlast->addr8, cmplast->addr8, sizeof(union netdata_ip_t)) >= 0) { - return 1; - } - - } - return 0; -} - -/** - * Fill IP list - * - * @param out a pointer to the link list. - * @param in the structure that will be linked. - * @param table the modified table. - */ -void ebpf_fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table) -{ -#ifndef NETDATA_INTERNAL_CHECKS - UNUSED(table); -#endif - if (in->ver == AF_INET) { // It is simpler to compare using host order - in->first.addr32[0] = ntohl(in->first.addr32[0]); - in->last.addr32[0] = ntohl(in->last.addr32[0]); - } - if (likely(*out)) { - ebpf_network_viewer_ip_list_t *move = *out, *store = *out; - while (move) { - if (in->ver == move->ver && - ebpf_is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) { - netdata_log_info("The range/value (%s) is inside the range/value (%s) already inserted, it will be ignored.", - in->value, move->value); - freez(in->value); - freez(in); - return; - } - store = move; - move = move->next; - } - - store->next = in; - } else { - *out = in; - } - -#ifdef NETDATA_INTERNAL_CHECKS - char first[256], last[512]; - if (in->ver == AF_INET) { - netdata_log_info("Adding values %s: (%u - %u) to %s IP list \"%s\" used on network viewer", - in->value, in->first.addr32[0], in->last.addr32[0], - (*out == network_viewer_opt.included_ips)?"included":"excluded", - table); - } else { - if (inet_ntop(AF_INET6, in->first.addr8, first, INET6_ADDRSTRLEN) && - inet_ntop(AF_INET6, in->last.addr8, last, INET6_ADDRSTRLEN)) - netdata_log_info("Adding values %s - %s to %s IP list \"%s\" used on network viewer", - first, last, - (*out == network_viewer_opt.included_ips)?"included":"excluded", - table); - } -#endif -} - -/** - * Parse IP List - * - * Parse IP list and link it. - * - * @param out a pointer to store the link list - * @param ip the value given as parameter - */ -static void ebpf_parse_ip_list(void **out, char *ip) -{ - ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out; - - char *ipdup = strdupz(ip); - union netdata_ip_t first = { }; - union netdata_ip_t last = { }; - char *is_ipv6; - if (*ip == '*' && *(ip+1) == '\0') { - memset(first.addr8, 0, sizeof(first.addr8)); - memset(last.addr8, 0xFF, sizeof(last.addr8)); - - is_ipv6 = ip; - - clean_ip_structure(list); - goto storethisip; - } - - char *end = ip; - // Move while I cannot find a separator - while (*end && *end != '/' && *end != '-') end++; - - // We will use only the classic IPV6 for while, but we could consider the base 85 in a near future - // https://tools.ietf.org/html/rfc1924 - is_ipv6 = strchr(ip, ':'); - - int select; - if (*end && !is_ipv6) { // IPV4 range - select = (*end == '/') ? 0 : 1; - *end++ = '\0'; - if (*end == '!') { - netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup); - goto cleanipdup; - } - - if (!select) { // CIDR - select = ip2nl(first.addr8, ip, AF_INET, ipdup); - if (select) - goto cleanipdup; - - select = (int) str2i(end); - if (select < NETDATA_MINIMUM_IPV4_CIDR || select > NETDATA_MAXIMUM_IPV4_CIDR) { - netdata_log_info("The specified CIDR %s is not valid, the IP %s will be ignored.", end, ip); - goto cleanipdup; - } - - last.addr32[0] = htonl(broadcast(ntohl(first.addr32[0]), select)); - // This was added to remove - // https://app.codacy.com/manual/netdata/netdata/pullRequest?prid=5810941&bid=19021977 - UNUSED(last.addr32[0]); - - uint32_t ipv4_test = htonl(ipv4_network(ntohl(first.addr32[0]), select)); - if (first.addr32[0] != ipv4_test) { - first.addr32[0] = ipv4_test; - struct in_addr ipv4_convert; - ipv4_convert.s_addr = ipv4_test; - char ipv4_msg[INET_ADDRSTRLEN]; - if(inet_ntop(AF_INET, &ipv4_convert, ipv4_msg, INET_ADDRSTRLEN)) - netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv4_msg); - } - } else { // Range - select = ip2nl(first.addr8, ip, AF_INET, ipdup); - if (select) - goto cleanipdup; - - select = ip2nl(last.addr8, end, AF_INET, ipdup); - if (select) - goto cleanipdup; - } - - if (htonl(first.addr32[0]) > htonl(last.addr32[0])) { - netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.", - ipdup); - goto cleanipdup; - } - } else if (is_ipv6) { // IPV6 - if (!*end) { // Unique - select = ip2nl(first.addr8, ip, AF_INET6, ipdup); - if (select) - goto cleanipdup; - - memcpy(last.addr8, first.addr8, sizeof(first.addr8)); - } else if (*end == '-') { - *end++ = 0x00; - if (*end == '!') { - netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup); - goto cleanipdup; - } - - select = ip2nl(first.addr8, ip, AF_INET6, ipdup); - if (select) - goto cleanipdup; - - select = ip2nl(last.addr8, end, AF_INET6, ipdup); - if (select) - goto cleanipdup; - } else { // CIDR - *end++ = 0x00; - if (*end == '!') { - netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup); - goto cleanipdup; - } - - select = str2i(end); - if (select < 0 || select > 128) { - netdata_log_info("The CIDR %s is not valid, the address %s will be ignored.", end, ip); - goto cleanipdup; - } - - uint64_t prefix = (uint64_t)select; - select = ip2nl(first.addr8, ip, AF_INET6, ipdup); - if (select) - goto cleanipdup; - - get_ipv6_last_addr(&last, &first, prefix); - - union netdata_ip_t ipv6_test; - get_ipv6_first_addr(&ipv6_test, &first, prefix); - - if (memcmp(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)) != 0) { - memcpy(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)); - - struct in6_addr ipv6_convert; - memcpy(ipv6_convert.s6_addr, ipv6_test.addr8, sizeof(union netdata_ip_t)); - - char ipv6_msg[INET6_ADDRSTRLEN]; - if(inet_ntop(AF_INET6, &ipv6_convert, ipv6_msg, INET6_ADDRSTRLEN)) - netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv6_msg); - } - } - - if ((be64toh(*(uint64_t *)&first.addr32[2]) > be64toh(*(uint64_t *)&last.addr32[2]) && - !memcmp(first.addr32, last.addr32, 2*sizeof(uint32_t))) || - (be64toh(*(uint64_t *)&first.addr32) > be64toh(*(uint64_t *)&last.addr32)) ) { - netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.", - ipdup); - goto cleanipdup; - } - } else { // Unique ip - select = ip2nl(first.addr8, ip, AF_INET, ipdup); - if (select) - goto cleanipdup; - - memcpy(last.addr8, first.addr8, sizeof(first.addr8)); - } - - ebpf_network_viewer_ip_list_t *store; - -storethisip: - store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t)); - store->value = ipdup; - store->hash = simple_hash(ipdup); - store->ver = (uint8_t)(!is_ipv6)?AF_INET:AF_INET6; - memcpy(store->first.addr8, first.addr8, sizeof(first.addr8)); - memcpy(store->last.addr8, last.addr8, sizeof(last.addr8)); - - ebpf_fill_ip_list(list, store, "socket"); - return; - -cleanipdup: - freez(ipdup); -} - -/** - * Parse IP Range - * - * Parse the IP ranges given and create Network Viewer IP Structure - * - * @param ptr is a pointer with the text to parse. - */ -static void ebpf_parse_ips(char *ptr) -{ - // No value - if (unlikely(!ptr)) - return; - - while (likely(ptr)) { - // Move forward until next valid character - while (isspace(*ptr)) ptr++; - - // No valid value found - if (unlikely(!*ptr)) - return; - - // Find space that ends the list - char *end = strchr(ptr, ' '); - if (end) { - *end++ = '\0'; - } - - int neg = 0; - if (*ptr == '!') { - neg++; - ptr++; - } - - if (isascii(*ptr)) { // Parse port - ebpf_parse_ip_list((!neg)?(void **)&network_viewer_opt.included_ips: - (void **)&network_viewer_opt.excluded_ips, - ptr); - } - - ptr = end; - } -} - - - -/** - * Parse port list - * - * Parse an allocated port list with the range given - * - * @param out a pointer to store the link list - * @param range the informed range for the user. - */ -static void parse_port_list(void **out, char *range) -{ - int first, last; - ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; - - char *copied = strdupz(range); - if (*range == '*' && *(range+1) == '\0') { - first = 1; - last = 65535; - - clean_port_structure(list); - goto fillenvpl; - } - - char *end = range; - //Move while I cannot find a separator - while (*end && *end != ':' && *end != '-') end++; - - //It has a range - if (likely(*end)) { - *end++ = '\0'; - if (*end == '!') { - netdata_log_info("The exclusion cannot be in the second part of the range, the range %s will be ignored.", copied); - freez(copied); - return; - } - last = str2i((const char *)end); - } else { - last = 0; - } - - first = str2i((const char *)range); - if (first < NETDATA_MINIMUM_PORT_VALUE || first > NETDATA_MAXIMUM_PORT_VALUE) { - netdata_log_info("The first port %d of the range \"%s\" is invalid and it will be ignored!", first, copied); - freez(copied); - return; - } - - if (!last) - last = first; - - if (last < NETDATA_MINIMUM_PORT_VALUE || last > NETDATA_MAXIMUM_PORT_VALUE) { - netdata_log_info("The second port %d of the range \"%s\" is invalid and the whole range will be ignored!", last, copied); - freez(copied); - return; - } - - if (first > last) { - netdata_log_info("The specified order %s is wrong, the smallest value is always the first, it will be ignored!", copied); - freez(copied); - return; - } - - ebpf_network_viewer_port_list_t *w; -fillenvpl: - w = callocz(1, sizeof(ebpf_network_viewer_port_list_t)); - w->value = copied; - w->hash = simple_hash(copied); - w->first = (uint16_t)htons((uint16_t)first); - w->last = (uint16_t)htons((uint16_t)last); - w->cmp_first = (uint16_t)first; - w->cmp_last = (uint16_t)last; - - fill_port_list(list, w); -} - -/** - * Read max dimension. - * - * Netdata plot two dimensions per connection, so it is necessary to adjust the values. - * - * @param cfg the configuration structure - */ -static void read_max_dimension(struct config *cfg) -{ - int maxdim ; - maxdim = (int) appconfig_get_number(cfg, - EBPF_NETWORK_VIEWER_SECTION, - EBPF_MAXIMUM_DIMENSIONS, - NETDATA_NV_CAP_VALUE); - if (maxdim < 0) { - netdata_log_error("'maximum dimensions = %d' must be a positive number, Netdata will change for default value %ld.", - maxdim, NETDATA_NV_CAP_VALUE); - maxdim = NETDATA_NV_CAP_VALUE; - } - - maxdim /= 2; - if (!maxdim) { - netdata_log_info("The number of dimensions is too small (%u), we are setting it to minimum 2", network_viewer_opt.max_dim); - network_viewer_opt.max_dim = 1; - return; - } - - network_viewer_opt.max_dim = (uint32_t)maxdim; -} - -/** - * Parse Port Range - * - * Parse the port ranges given and create Network Viewer Port Structure - * - * @param ptr is a pointer with the text to parse. - */ -static void parse_ports(char *ptr) -{ - // No value - if (unlikely(!ptr)) - return; - - while (likely(ptr)) { - // Move forward until next valid character - while (isspace(*ptr)) ptr++; - - // No valid value found - if (unlikely(!*ptr)) - return; - - // Find space that ends the list - char *end = strchr(ptr, ' '); - if (end) { - *end++ = '\0'; - } - - int neg = 0; - if (*ptr == '!') { - neg++; - ptr++; - } - - if (isdigit(*ptr)) { // Parse port - parse_port_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port, - ptr); - } else if (isalpha(*ptr)) { // Parse service - parse_service_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port, - ptr); - } else if (*ptr == '*') { // All - parse_port_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port, - ptr); - } - - ptr = end; - } -} - -/** - * Link hostname - * - * @param out is the output link list - * @param in the hostname to add to list. - */ -static void link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_network_viewer_hostname_list_t *in) -{ - if (likely(*out)) { - ebpf_network_viewer_hostname_list_t *move = *out; - for (; move->next ; move = move->next ) { - if (move->hash == in->hash && !strcmp(move->value, in->value)) { - netdata_log_info("The hostname %s was already inserted, it will be ignored.", in->value); - freez(in->value); - simple_pattern_free(in->value_pattern); - freez(in); - return; - } - } - - move->next = in; - } else { - *out = in; - } -#ifdef NETDATA_INTERNAL_CHECKS - netdata_log_info("Adding value %s to %s hostname list used on network viewer", - in->value, - (*out == network_viewer_opt.included_hostnames)?"included":"excluded"); -#endif -} - -/** - * Link Hostnames - * - * Parse the list of hostnames to create the link list. - * This is not associated with the IP, because simple patterns like *example* cannot be resolved to IP. - * - * @param out is the output link list - * @param parse is a pointer with the text to parser. - */ -static void link_hostnames(char *parse) -{ - // No value - if (unlikely(!parse)) - return; - - while (likely(parse)) { - // Find the first valid value - while (isspace(*parse)) parse++; - - // No valid value found - if (unlikely(!*parse)) - return; - - // Find space that ends the list - char *end = strchr(parse, ' '); - if (end) { - *end++ = '\0'; - } - - int neg = 0; - if (*parse == '!') { - neg++; - parse++; - } - - ebpf_network_viewer_hostname_list_t *hostname = callocz(1 , sizeof(ebpf_network_viewer_hostname_list_t)); - hostname->value = strdupz(parse); - hostname->hash = simple_hash(parse); - hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT, true); - - link_hostname((!neg)?&network_viewer_opt.included_hostnames:&network_viewer_opt.excluded_hostnames, - hostname); - - parse = end; - } -} - -/** - * Parse network viewer section - * - * @param cfg the configuration structure - */ -void parse_network_viewer_section(struct config *cfg) -{ - read_max_dimension(cfg); - - network_viewer_opt.hostname_resolution_enabled = appconfig_get_boolean(cfg, - EBPF_NETWORK_VIEWER_SECTION, - EBPF_CONFIG_RESOLVE_HOSTNAME, - CONFIG_BOOLEAN_NO); - - network_viewer_opt.service_resolution_enabled = appconfig_get_boolean(cfg, - EBPF_NETWORK_VIEWER_SECTION, - EBPF_CONFIG_RESOLVE_SERVICE, - CONFIG_BOOLEAN_NO); - - char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL); - parse_ports(value); - - if (network_viewer_opt.hostname_resolution_enabled) { - value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_HOSTNAMES, NULL); - link_hostnames(value); - } else { - netdata_log_info("Name resolution is disabled, collector will not parser \"hostnames\" list."); - } - - value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, - "ips", "!127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128"); - ebpf_parse_ips(value); -} - /** * Link dimension name * @@ -3838,7 +2681,7 @@ void parse_network_viewer_section(struct config *cfg) * @param hash the calculated hash for the dimension name. * @param name the dimension name. */ -static void link_dimension_name(char *port, uint32_t hash, char *value) +static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value) { int test = str2i(port); if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){ @@ -3883,13 +2726,13 @@ static void link_dimension_name(char *port, uint32_t hash, char *value) * * @param cfg the configuration structure */ -void parse_service_name_section(struct config *cfg) +void ebpf_parse_service_name_section(struct config *cfg) { struct section *co = appconfig_get_section(cfg, EBPF_SERVICE_NAME_SECTION); if (co) { struct config_option *cv; for (cv = co->values; cv ; cv = cv->next) { - link_dimension_name(cv->name, cv->hash, cv->value); + ebpf_link_dimension_name(cv->name, cv->hash, cv->value); } } @@ -3910,23 +2753,21 @@ void parse_service_name_section(struct config *cfg) // if variable has an invalid value, we assume netdata is using 19999 int default_port = str2i(port_string); if (default_port > 0 && default_port < 65536) - link_dimension_name(port_string, simple_hash(port_string), "Netdata"); + ebpf_link_dimension_name(port_string, simple_hash(port_string), "Netdata"); } } +/** + * Parse table size options + * + * @param cfg configuration options read from user file. + */ void parse_table_size_options(struct config *cfg) { - socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].user_input = (uint32_t) appconfig_get_number(cfg, - EBPF_GLOBAL_SECTION, - EBPF_CONFIG_BANDWIDTH_SIZE, NETDATA_MAXIMUM_CONNECTIONS_ALLOWED); - - socket_maps[NETDATA_SOCKET_TABLE_IPV4].user_input = (uint32_t) appconfig_get_number(cfg, - EBPF_GLOBAL_SECTION, - EBPF_CONFIG_IPV4_SIZE, NETDATA_MAXIMUM_CONNECTIONS_ALLOWED); - - socket_maps[NETDATA_SOCKET_TABLE_IPV6].user_input = (uint32_t) appconfig_get_number(cfg, - EBPF_GLOBAL_SECTION, - EBPF_CONFIG_IPV6_SIZE, NETDATA_MAXIMUM_CONNECTIONS_ALLOWED); + socket_maps[NETDATA_SOCKET_OPEN_SOCKET].user_input = (uint32_t) appconfig_get_number(cfg, + EBPF_GLOBAL_SECTION, + EBPF_CONFIG_SOCKET_MONITORING_SIZE, + NETDATA_MAXIMUM_CONNECTIONS_ALLOWED); socket_maps[NETDATA_SOCKET_TABLE_UDP].user_input = (uint32_t) appconfig_get_number(cfg, EBPF_GLOBAL_SECTION, @@ -3965,7 +2806,7 @@ static int ebpf_socket_load_bpf(ebpf_module_t *em) #endif if (ret) { - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); } return ret; @@ -3985,25 +2826,23 @@ void *ebpf_socket_thread(void *ptr) netdata_thread_cleanup_push(ebpf_socket_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; - em->maps = socket_maps; - - parse_table_size_options(&socket_config); - - if (pthread_mutex_init(&nv_mutex, NULL)) { - netdata_log_error("Cannot initialize local mutex"); - goto endsocket; + if (em->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) { + collector_error("There is already a thread %s running", em->info.thread_name); + return NULL; } - ebpf_socket_allocate_global_vectors(em->apps_charts); + em->maps = socket_maps; - if (network_viewer_opt.enabled) { - memset(&inbound_vectors.tree, 0, sizeof(avl_tree_lock)); - memset(&outbound_vectors.tree, 0, sizeof(avl_tree_lock)); - avl_init_lock(&inbound_vectors.tree, ebpf_compare_sockets); - avl_init_lock(&outbound_vectors.tree, ebpf_compare_sockets); + rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock); + // It was not enabled from main config file (ebpf.d.conf) + if (!network_viewer_opt.enabled) + network_viewer_opt.enabled = appconfig_get_boolean(&socket_config, EBPF_NETWORK_VIEWER_SECTION, "enabled", + CONFIG_BOOLEAN_YES); + rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock); - initialize_inbound_outbound(); - } + parse_table_size_options(&socket_config); + + ebpf_socket_initialize_global_vectors(); if (running_on_kernel < NETDATA_EBPF_KERNEL_5_0) em->mode = MODE_ENTRY; @@ -4026,8 +2865,15 @@ void *ebpf_socket_thread(void *ptr) socket_aggregated_data, socket_publish_aggregated, socket_dimension_names, socket_id_names, algorithms, NETDATA_MAX_SOCKET_VECTOR); + ebpf_read_socket.thread = mallocz(sizeof(netdata_thread_t)); + netdata_thread_create(ebpf_read_socket.thread, + ebpf_read_socket.name, + NETDATA_THREAD_OPTION_DEFAULT, + ebpf_read_socket_thread, + em); + pthread_mutex_lock(&lock); - ebpf_create_global_charts(em); + ebpf_socket_create_global_charts(em); ebpf_update_stats(&plugin_statistics, em); ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD); diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h index ae2ee28abcd68f..a6d3e03b6dabeb 100644 --- a/collectors/ebpf.plugin/ebpf_socket.h +++ b/collectors/ebpf.plugin/ebpf_socket.h @@ -4,6 +4,11 @@ #include #include "libnetdata/avl/avl.h" +#include +#ifdef HAVE_NETDB_H +#include +#endif + // Module name & description #define NETDATA_EBPF_MODULE_NAME_SOCKET "socket" #define NETDATA_EBPF_SOCKET_MODULE_DESC "Monitors TCP and UDP bandwidth. This thread is integrated with apps and cgroup." @@ -11,8 +16,6 @@ // Vector indexes #define NETDATA_UDP_START 3 -#define NETDATA_SOCKET_READ_SLEEP_MS 800000ULL - // config file #define NETDATA_NETWORK_CONFIG_FILE "network.conf" #define EBPF_NETWORK_VIEWER_SECTION "network connections" @@ -21,18 +24,13 @@ #define EBPF_CONFIG_RESOLVE_SERVICE "resolve service names" #define EBPF_CONFIG_PORTS "ports" #define EBPF_CONFIG_HOSTNAMES "hostnames" -#define EBPF_CONFIG_BANDWIDTH_SIZE "bandwidth table size" -#define EBPF_CONFIG_IPV4_SIZE "ipv4 connection table size" -#define EBPF_CONFIG_IPV6_SIZE "ipv6 connection table size" +#define EBPF_CONFIG_SOCKET_MONITORING_SIZE "socket monitoring table size" #define EBPF_CONFIG_UDP_SIZE "udp connection table size" -#define EBPF_MAXIMUM_DIMENSIONS "maximum dimensions" enum ebpf_socket_table_list { - NETDATA_SOCKET_TABLE_BANDWIDTH, NETDATA_SOCKET_GLOBAL, NETDATA_SOCKET_LPORTS, - NETDATA_SOCKET_TABLE_IPV4, - NETDATA_SOCKET_TABLE_IPV6, + NETDATA_SOCKET_OPEN_SOCKET, NETDATA_SOCKET_TABLE_UDP, NETDATA_SOCKET_TABLE_CTRL }; @@ -122,13 +120,6 @@ typedef enum ebpf_socket_idx { #define NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS "bandwidth_udp_send" #define NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS "bandwidth_udp_recv" -// Network viewer charts -#define NETDATA_NV_OUTBOUND_BYTES "outbound_bytes" -#define NETDATA_NV_OUTBOUND_PACKETS "outbound_packets" -#define NETDATA_NV_OUTBOUND_RETRANSMIT "outbound_retransmit" -#define NETDATA_NV_INBOUND_BYTES "inbound_bytes" -#define NETDATA_NV_INBOUND_PACKETS "inbound_packets" - // Port range #define NETDATA_MINIMUM_PORT_VALUE 1 #define NETDATA_MAXIMUM_PORT_VALUE 65535 @@ -163,6 +154,8 @@ typedef enum ebpf_socket_idx { // ARAL name #define NETDATA_EBPF_SOCKET_ARAL_NAME "ebpf_socket" +#define NETDATA_EBPF_PID_SOCKET_ARAL_TABLE_NAME "ebpf_pid_socket" +#define NETDATA_EBPF_SOCKET_ARAL_TABLE_NAME "ebpf_socket_tbl" typedef struct ebpf_socket_publish_apps { // Data read @@ -246,10 +239,11 @@ typedef struct ebpf_network_viewer_hostname_list { struct ebpf_network_viewer_hostname_list *next; } ebpf_network_viewer_hostname_list_t; -#define NETDATA_NV_CAP_VALUE 50L typedef struct ebpf_network_viewer_options { + RW_SPINLOCK rw_spinlock; + uint32_t enabled; - uint32_t max_dim; // Store value read from 'maximum dimensions' + uint32_t family; // AF_INET, AF_INET6 or AF_UNSPEC (both) uint32_t hostname_resolution_enabled; uint32_t service_resolution_enabled; @@ -275,98 +269,78 @@ extern ebpf_network_viewer_options_t network_viewer_opt; * Structure to store socket information */ typedef struct netdata_socket { - uint64_t recv_packets; - uint64_t sent_packets; - uint64_t recv_bytes; - uint64_t sent_bytes; - uint64_t first; // First timestamp - uint64_t ct; // Current timestamp - uint32_t retransmit; // It is never used with UDP + // Timestamp + uint64_t first_timestamp; + uint64_t current_timestamp; + // Socket additional info uint16_t protocol; - uint16_t reserved; + uint16_t family; + uint32_t external_origin; + struct { + uint32_t call_tcp_sent; + uint32_t call_tcp_received; + uint64_t tcp_bytes_sent; + uint64_t tcp_bytes_received; + uint32_t close; //It is never used with UDP + uint32_t retransmit; //It is never used with UDP + uint32_t ipv4_connect; + uint32_t ipv6_connect; + } tcp; + + struct { + uint32_t call_udp_sent; + uint32_t call_udp_received; + uint64_t udp_bytes_sent; + uint64_t udp_bytes_received; + } udp; } netdata_socket_t; -typedef struct netdata_plot_values { - // Values used in the previous iteration - uint64_t recv_packets; - uint64_t sent_packets; - uint64_t recv_bytes; - uint64_t sent_bytes; - uint32_t retransmit; +typedef enum netdata_socket_flags { + NETDATA_SOCKET_FLAGS_ALREADY_OPEN = (1<<0) +} netdata_socket_flags_t; + +typedef enum netdata_socket_src_ip_origin { + NETDATA_EBPF_SRC_IP_ORIGIN_LOCAL, + NETDATA_EBPF_SRC_IP_ORIGIN_EXTERNAL +} netdata_socket_src_ip_origin_t; - uint64_t last_time; +typedef struct netata_socket_plus { + netdata_socket_t data; // Data read from database + uint32_t pid; + time_t last_update; + netdata_socket_flags_t flags; + + struct { + char src_ip[INET6_ADDRSTRLEN + 1]; + // uint16_t src_port; + char dst_ip[INET6_ADDRSTRLEN+ 1]; + char dst_port[NI_MAXSERV + 1]; + } socket_string; +} netdata_socket_plus_t; - // Values used to plot - uint64_t plot_recv_packets; - uint64_t plot_sent_packets; - uint64_t plot_recv_bytes; - uint64_t plot_sent_bytes; - uint16_t plot_retransmit; -} netdata_plot_values_t; +extern ARAL *aral_socket_table; /** * Index used together previous structure */ typedef struct netdata_socket_idx { union netdata_ip_t saddr; - uint16_t sport; + //uint16_t sport; union netdata_ip_t daddr; uint16_t dport; + uint32_t pid; } netdata_socket_idx_t; -// Next values were defined according getnameinfo(3) -#define NETDATA_MAX_NETWORK_COMBINED_LENGTH 1018 -#define NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH 5 // :TCP: -#define NETDATA_DIM_LENGTH_WITHOUT_SERVICE_PROTOCOL 979 - -#define NETDATA_INBOUND_DIRECTION (uint32_t)1 -#define NETDATA_OUTBOUND_DIRECTION (uint32_t)2 -/** - * Allocate the maximum number of structures in the beginning, this can force the collector to use more memory - * in the long term, on the other had it is faster. - */ -typedef struct netdata_socket_plot { - // Search - avl_t avl; - netdata_socket_idx_t index; - - // Current data - netdata_socket_t sock; - - // Previous values and values used to write on chart. - netdata_plot_values_t plot; - - int family; // AF_INET or AF_INET6 - char *resolved_name; // Resolve only in the first call - unsigned char resolved; - - char *dimension_sent; - char *dimension_recv; - char *dimension_retransmit; - - uint32_t flags; -} netdata_socket_plot_t; - -#define NETWORK_VIEWER_CHARTS_CREATED (uint32_t)1 -typedef struct netdata_vector_plot { - netdata_socket_plot_t *plot; // Vector used to plot charts - - avl_tree_lock tree; // AVL tree to speed up search - uint32_t last; // The 'other' dimension, the last chart accepted. - uint32_t next; // The next position to store in the vector. - uint32_t max_plot; // Max number of elements to plot. - uint32_t last_plot; // Last element plot - - uint32_t flags; // Flags - -} netdata_vector_plot_t; - -void clean_port_structure(ebpf_network_viewer_port_list_t **clean); +void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean); extern ebpf_network_viewer_port_list_t *listen_ports; void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values); -void parse_network_viewer_section(struct config *cfg); -void ebpf_fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table); -void parse_service_name_section(struct config *cfg); +void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table); +void ebpf_parse_service_name_section(struct config *cfg); +void ebpf_parse_ips_unsafe(char *ptr); +void ebpf_parse_ports(char *ptr); +void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em); +void ebpf_socket_fill_publish_apps(uint32_t current_pid, netdata_socket_t *ns); + extern struct config socket_config; extern netdata_ebpf_targets_t socket_targets[]; diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c index 8d8930a109dae9..106ff4f291e1f3 100644 --- a/collectors/ebpf.plugin/ebpf_softirq.c +++ b/collectors/ebpf.plugin/ebpf_softirq.c @@ -71,6 +71,7 @@ static void ebpf_obsolete_softirq_global(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency", + "", "Software IRQ latency", EBPF_COMMON_DIMENSION_MILLISECONDS, "softirqs", @@ -218,9 +219,9 @@ static void softirq_collector(ebpf_module_t *em) //This will be cancelled by its parent uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -228,9 +229,9 @@ static void softirq_collector(ebpf_module_t *em) pthread_mutex_lock(&lock); // write dims now for all hitherto discovered IRQs. - write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency"); + ebpf_write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency", ""); softirq_write_dims(); - write_end_chart(); + ebpf_write_end_chart(); pthread_mutex_unlock(&lock); diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c index 359fe230825ba5..fb007f928f3297 100644 --- a/collectors/ebpf.plugin/ebpf_swap.c +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -124,13 +124,6 @@ static int ebpf_swap_attach_kprobe(struct swap_bpf *obj) if (ret) return -1; - obj->links.netdata_release_task_probe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_probe, - false, - EBPF_COMMON_FNCT_CLEAN_UP); - ret = libbpf_get_error(obj->links.netdata_swap_writepage_probe); - if (ret) - return -1; - return 0; } @@ -176,7 +169,6 @@ static void ebpf_swap_adjust_map(struct swap_bpf *obj, ebpf_module_t *em) static void ebpf_swap_disable_release_task(struct swap_bpf *obj) { bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false); - bpf_program__set_autoload(obj->progs.netdata_release_task_probe, false); } /** @@ -242,7 +234,8 @@ static void ebpf_obsolete_swap_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART, - "Calls to function swap_readpage.", + "", + "Calls to function swap_readpage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, @@ -252,7 +245,8 @@ static void ebpf_obsolete_swap_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART, - "Calls to function swap_writepage.", + "", + "Calls to function swap_writepage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, @@ -292,25 +286,35 @@ static inline void ebpf_obsolete_swap_cgroup_charts(ebpf_module_t *em) { */ void ebpf_obsolete_swap_apps_charts(struct ebpf_module *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_MEM_SWAP_READ_CHART, - "Calls to function swap_readpage.", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_SWAP_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20191, - em->update_every); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<swap_writepage.", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_SWAP_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20192, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_swap_readpage", + "Calls to function swap_readpage.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_swap_readpage", + 20070, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_swap_writepage", + "Calls to function swap_writepage.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_swap_writepage", + 20071, + update_every); + w->charts_created &= ~(1<next) { - if (unlikely(w->exposed && w->processes)) { - ebpf_swap_sum_pids(&w->swap, w->root_pid); - } - } + if (unlikely(!(w->charts_created & (1<next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, (long long) w->swap.read); - } - } - write_end_chart(); + ebpf_swap_sum_pids(&w->swap, w->root_pid); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, (long long) w->swap.write); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_swap_readpage"); + write_chart_dimension("calls", (long long) w->swap.read); + ebpf_write_end_chart(); + + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_swap_writepage"); + write_chart_dimension("calls", (long long) w->swap.write); + ebpf_write_end_chart(); } - write_end_chart(); } /** @@ -632,21 +630,21 @@ static void ebpf_swap_sum_cgroup_pids(netdata_publish_swap_t *swap, struct pid_o static void ebpf_send_systemd_swap_charts() { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.read); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.write); } } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -660,7 +658,7 @@ static void ebpf_send_systemd_swap_charts() static void ebpf_create_specific_swap_charts(char *type, int update_every) { ebpf_create_chart(type, NETDATA_MEM_SWAP_READ_CHART, - "Calls to function swap_readpage.", + "Calls to function swap_readpage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_CGROUP_SWAP_READ_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100, @@ -668,7 +666,7 @@ static void ebpf_create_specific_swap_charts(char *type, int update_every) swap_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_SWAP); ebpf_create_chart(type, NETDATA_MEM_SWAP_WRITE_CHART, - "Calls to function swap_writepage.", + "Calls to function swap_writepage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_CGROUP_SWAP_WRITE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101, @@ -687,12 +685,12 @@ static void ebpf_create_specific_swap_charts(char *type, int update_every) */ static void ebpf_obsolete_specific_swap_charts(char *type, int update_every) { - ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_READ_CHART,"Calls to function swap_readpage.", + ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_READ_CHART, "", "Calls to function swap_readpage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_READ_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100, update_every); - ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_WRITE_CHART, "Calls to function swap_writepage.", + ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_WRITE_CHART, "", "Calls to function swap_writepage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_WRITE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101, update_every); @@ -708,13 +706,13 @@ static void ebpf_obsolete_specific_swap_charts(char *type, int update_every) */ static void ebpf_send_specific_swap_data(char *type, netdata_publish_swap_t *values) { - write_begin_chart(type, NETDATA_MEM_SWAP_READ_CHART); + ebpf_write_begin_chart(type, NETDATA_MEM_SWAP_READ_CHART, ""); write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].name, (long long) values->read); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_MEM_SWAP_WRITE_CHART); + ebpf_write_begin_chart(type, NETDATA_MEM_SWAP_WRITE_CHART, ""); write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name, (long long) values->write); - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -727,14 +725,14 @@ static void ebpf_send_specific_swap_data(char *type, netdata_publish_swap_t *val static void ebpf_create_systemd_swap_charts(int update_every) { ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_READ_CHART, - "Calls to swap_readpage.", + "Calls to swap_readpage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20191, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_READ_CONTEXT, NETDATA_EBPF_MODULE_NAME_SWAP, update_every); ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_WRITE_CHART, - "Calls to function swap_writepage.", + "Calls to function swap_writepage.", EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20192, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT, @@ -804,9 +802,9 @@ static void swap_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -859,23 +857,44 @@ static void swap_collector(ebpf_module_t *em) void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr) { struct ebpf_target *root = ptr; - ebpf_create_charts_on_apps(NETDATA_MEM_SWAP_READ_CHART, - "Calls to function swap_readpage.", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_SWAP_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20191, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); - - ebpf_create_charts_on_apps(NETDATA_MEM_SWAP_WRITE_CHART, - "Calls to function swap_writepage.", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_SWAP_SUBMENU, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20192, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + struct ebpf_target *w; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_swap_readpage", + "Calls to function swap_readpage.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_swap_readpage", + 20070, + update_every, + NETDATA_EBPF_MODULE_NAME_SWAP); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_swap_writepage", + "Calls to function swap_writepage.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_swap_writepage", + 20071, + update_every, + NETDATA_EBPF_MODULE_NAME_SWAP); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } @@ -959,7 +978,7 @@ static int ebpf_swap_load_bpf(ebpf_module_t *em) #endif if (ret) - netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c index 521d39f31d5232..a16318107cee52 100644 --- a/collectors/ebpf.plugin/ebpf_sync.c +++ b/collectors/ebpf.plugin/ebpf_sync.c @@ -298,7 +298,8 @@ static void ebpf_obsolete_sync_global(ebpf_module_t *em) if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_EBPF_FILE_SYNC_CHART, - "Monitor calls for fsync(2) and fdatasync(2).", + "", + "Monitor calls to fsync(2) and fdatasync(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, @@ -309,7 +310,8 @@ static void ebpf_obsolete_sync_global(ebpf_module_t *em) if (local_syscalls[NETDATA_SYNC_MSYNC_IDX].enabled) ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_EBPF_MSYNC_CHART, - "Monitor calls for msync(2).", + "", + "Monitor calls to msync(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, @@ -320,7 +322,8 @@ static void ebpf_obsolete_sync_global(ebpf_module_t *em) if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_EBPF_SYNC_CHART, - "Monitor calls for sync(2) and syncfs(2).", + "", + "Monitor calls to sync(2) and syncfs(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, @@ -331,7 +334,8 @@ static void ebpf_obsolete_sync_global(ebpf_module_t *em) if (local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].enabled) ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP, NETDATA_EBPF_FILE_SEGMENT_CHART, - "Monitor calls for sync_file_range(2).", + "", + "Monitor calls to sync_file_range(2).", EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, @@ -383,7 +387,7 @@ static void ebpf_sync_exit(void *ptr) */ static int ebpf_sync_load_legacy(ebpf_sync_syscalls_t *w, ebpf_module_t *em) { - em->thread_name = w->syscall; + em->info.thread_name = w->syscall; if (!w->probe_links) { w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &w->objects); if (!w->probe_links) { @@ -413,7 +417,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) #endif int i; - const char *saved_name = em->thread_name; + const char *saved_name = em->info.thread_name; int errors = 0; for (i = 0; local_syscalls[i].syscall; i++) { ebpf_sync_syscalls_t *w = &local_syscalls[i]; @@ -424,7 +428,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) if (ebpf_sync_load_legacy(w, em)) errors++; - em->thread_name = saved_name; + em->info.thread_name = saved_name; } #ifdef LIBBPF_MAJOR_VERSION else { @@ -446,12 +450,12 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) w->enabled = false; } - em->thread_name = saved_name; + em->info.thread_name = saved_name; } #endif } } - em->thread_name = saved_name; + em->info.thread_name = saved_name; memset(sync_counter_aggregated_data, 0 , NETDATA_SYNC_IDX_END * sizeof(netdata_syscall_stat_t)); memset(sync_counter_publish_aggregated, 0 , NETDATA_SYNC_IDX_END * sizeof(netdata_publish_syscall_t)); @@ -507,7 +511,7 @@ static void ebpf_send_sync_chart(char *id, int idx, int end) { - write_begin_chart(NETDATA_EBPF_MEMORY_GROUP, id); + ebpf_write_begin_chart(NETDATA_EBPF_MEMORY_GROUP, id, ""); netdata_publish_syscall_t *move = &sync_counter_publish_aggregated[idx]; @@ -519,7 +523,7 @@ static void ebpf_send_sync_chart(char *id, idx++; } - write_end_chart(); + ebpf_write_end_chart(); } /** @@ -560,9 +564,9 @@ static void sync_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -610,7 +614,7 @@ static void ebpf_create_sync_chart(char *id, int end, int update_every) { - ebpf_write_chart_cmd(NETDATA_EBPF_MEMORY_GROUP, id, title, EBPF_COMMON_DIMENSION_CALL, + ebpf_write_chart_cmd(NETDATA_EBPF_MEMORY_GROUP, id, "", title, EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NULL, order, update_every, NETDATA_EBPF_MODULE_NAME_SYNC); @@ -637,22 +641,22 @@ static void ebpf_create_sync_charts(int update_every) { if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_FILE_SYNC_CHART, - "Monitor calls for fsync(2) and fdatasync(2).", 21300, + "Monitor calls to fsync(2) and fdatasync(2).", 21300, NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX, update_every); if (local_syscalls[NETDATA_SYNC_MSYNC_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_MSYNC_CHART, - "Monitor calls for msync(2).", 21301, + "Monitor calls to msync(2).", 21301, NETDATA_SYNC_MSYNC_IDX, NETDATA_SYNC_MSYNC_IDX, update_every); if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_SYNC_CHART, - "Monitor calls for sync(2) and syncfs(2).", 21302, + "Monitor calls to sync(2) and syncfs(2).", 21302, NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX, update_every); if (local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_FILE_SEGMENT_CHART, - "Monitor calls for sync_file_range(2).", 21303, + "Monitor calls to sync_file_range(2).", 21303, NETDATA_SYNC_SYNC_FILE_RANGE_IDX, NETDATA_SYNC_SYNC_FILE_RANGE_IDX, update_every); fflush(stdout); diff --git a/collectors/ebpf.plugin/ebpf_unittest.c b/collectors/ebpf.plugin/ebpf_unittest.c index 3e1443ad3771ed..11b449e03bc24b 100644 --- a/collectors/ebpf.plugin/ebpf_unittest.c +++ b/collectors/ebpf.plugin/ebpf_unittest.c @@ -12,8 +12,8 @@ ebpf_module_t test_em; void ebpf_ut_initialize_structure(netdata_run_mode_t mode) { memset(&test_em, 0, sizeof(ebpf_module_t)); - test_em.thread_name = strdupz("process"); - test_em.config_name = test_em.thread_name; + test_em.info.thread_name = strdupz("process"); + test_em.info.config_name = test_em.info.thread_name; test_em.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 | NETDATA_V5_14; test_em.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE; @@ -28,7 +28,7 @@ void ebpf_ut_initialize_structure(netdata_run_mode_t mode) */ void ebpf_ut_cleanup_memory() { - freez((void *)test_em.thread_name); + freez((void *)test_em.info.thread_name); } /** @@ -70,14 +70,14 @@ int ebpf_ut_load_real_binary() */ int ebpf_ut_load_fake_binary() { - const char *original = test_em.thread_name; + const char *original = test_em.info.thread_name; - test_em.thread_name = strdupz("I_am_not_here"); + test_em.info.thread_name = strdupz("I_am_not_here"); int ret = ebpf_ut_load_binary(); ebpf_ut_cleanup_memory(); - test_em.thread_name = original; + test_em.info.thread_name = original; return !ret; } diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c index e566e169da7b06..354901c9ccc484 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.c +++ b/collectors/ebpf.plugin/ebpf_vfs.c @@ -420,6 +420,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED, + "", "Files deleted", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -430,6 +431,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, + "", "Write to disk", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -441,6 +443,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, + "", "Fails to write", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -452,6 +455,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, + "", "Read from disk", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -463,6 +467,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, + "", "Fails to read", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -474,6 +479,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, + "", "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, @@ -484,6 +490,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, + "", "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, @@ -494,7 +501,8 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC, - "Calls to vfs_fsync", + "", + "Calls to vfs_fsync.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -505,6 +513,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, + "", "Sync error", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -515,7 +524,8 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) } ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN, - "Calls to vfs_open", + "", + "Calls to vfs_open.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -526,6 +536,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, + "", "Open error", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -537,7 +548,8 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE, - "Calls to vfs_create", + "", + "Calls to vfs_create.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, @@ -548,6 +560,7 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, + "", "Create error", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, @@ -589,144 +602,166 @@ static inline void ebpf_obsolete_vfs_cgroup_charts(ebpf_module_t *em) { */ void ebpf_obsolete_vfs_apps_charts(struct ebpf_module *em) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_FILE_DELETED, - "Files deleted", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20065, - em->update_every); - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, - "Write to disk", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20066, - em->update_every); + int order = 20275; + struct ebpf_target *w; + int update_every = em->update_every; + for (w = apps_groups_root_target; w; w = w->next) { + if (unlikely(!(w->charts_created & (1<mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, - "Fails to write", + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_unlink", + "Files deleted.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20067, - em->update_every); - } - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_READ_CALLS, - "Read from disk", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20068, - em->update_every); - - if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, - "Fails to read", + "app.ebpf_call_vfs_unlink", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_write", + "Write to disk.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20069, - em->update_every); - } - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, - "Bytes written on disk", - EBPF_COMMON_DIMENSION_BYTES, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20070, - em->update_every); - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_READ_BYTES, - "Bytes read from disk", - EBPF_COMMON_DIMENSION_BYTES, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20071, - em->update_every); - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_FSYNC, - "Calls for vfs_fsync", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20072, - em->update_every); + "app.ebpf_call_vfs_write", + order++, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_write_error", + "Fails to write.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_write_error", + order++, + update_every); + } - if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, - "Sync error", + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_read", + "Read from disk.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20073, - em->update_every); - } - - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_OPEN, - "Calls for vfs_open", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20074, - em->update_every); + "app.ebpf_call_vfs_read", + order++, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_read_error", + "Fails to read.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_read_error", + order++, + update_every); + } - if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, - "Open error", + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_write_bytes", + "Bytes written on disk.", + EBPF_COMMON_DIMENSION_BYTES, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_write_bytes", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_read_bytes", + "Bytes read from disk.", + EBPF_COMMON_DIMENSION_BYTES, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_read_bytes", + order++, + update_every); + + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_fsync", + "Calls to vfs_fsync.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20075, - em->update_every); - } + "app.ebpf_call_vfs_fsync", + order++, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_fsync_error", + "Fails to sync.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_fsync_error", + order++, + update_every); + } - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_CREATE, - "Calls for vfs_create", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20076, - em->update_every); + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_open", + "Calls to vfs_open.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_open", + order++, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_open_error", + "Fails to open.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_open_error", + order++, + update_every); + } - if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY, - NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, - "Create error", + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_create", + "Calls to vfs_create.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - NULL, - 20077, - em->update_every); + "app.ebpf_call_vfs_create", + order++, + update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_create_error", + "Fails to create.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_create_error", + order++, + update_every); + } + w->charts_created &= ~(1<mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_FILE_ERR_COUNT, + "", "Fails to write or read", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, @@ -783,7 +822,8 @@ static void ebpf_obsolete_vfs_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_FSYNC, - "Calls for vfs_fsync", + "", + "Calls to vfs_fsync.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, @@ -794,6 +834,7 @@ static void ebpf_obsolete_vfs_global(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_FSYNC_ERR, + "", "Fails to synchronize", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, @@ -805,7 +846,8 @@ static void ebpf_obsolete_vfs_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_OPEN, - "Calls for vfs_open", + "", + "Calls to vfs_open.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, @@ -816,6 +858,7 @@ static void ebpf_obsolete_vfs_global(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_OPEN_ERR, + "", "Fails to open a file", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, @@ -827,7 +870,8 @@ static void ebpf_obsolete_vfs_global(ebpf_module_t *em) ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_CREATE, - "Calls for vfs_create", + "", + "Calls to vfs_create.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, @@ -838,6 +882,7 @@ static void ebpf_obsolete_vfs_global(ebpf_module_t *em) if (em->mode < MODE_ENTRY) { ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_CREATE_ERR, + "", "Fails to create a file.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, @@ -1086,123 +1131,72 @@ void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct ebpf_target *root) { struct ebpf_target *w; for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - ebpf_vfs_sum_pids(&w->vfs, w->root_pid); - } - } + if (unlikely(!(w->charts_created & (1<next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.unlink_call); - } - } - write_end_chart(); + ebpf_vfs_sum_pids(&w->vfs, w->root_pid); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.write_call + w->vfs.writev_call); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_unlink"); + write_chart_dimension("calls", w->vfs.unlink_call); + ebpf_write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.write_err + w->vfs.writev_err); - } - } - write_end_chart(); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_write"); + write_chart_dimension("calls", w->vfs.write_call + w->vfs.writev_call); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.read_call + w->vfs.readv_call); + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_write_error"); + write_chart_dimension("calls", w->vfs.write_err + w->vfs.writev_err); + ebpf_write_end_chart(); } - } - write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.read_err + w->vfs.readv_err); - } - } - write_end_chart(); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_read"); + write_chart_dimension("calls", w->vfs.read_call + w->vfs.readv_call); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.write_bytes + w->vfs.writev_bytes); + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_read_error"); + write_chart_dimension("calls", w->vfs.read_err + w->vfs.readv_err); + ebpf_write_end_chart(); } - } - write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.read_bytes + w->vfs.readv_bytes); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_write_bytes"); + write_chart_dimension("writes", w->vfs.write_bytes + w->vfs.writev_bytes); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.fsync_call); - } - } - write_end_chart(); + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_read_bytes"); + write_chart_dimension("reads", w->vfs.read_bytes + w->vfs.readv_bytes); + ebpf_write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.fsync_err); - } - } - write_end_chart(); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_fsync"); + write_chart_dimension("calls", w->vfs.fsync_call); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.open_call); + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_fsync_error"); + write_chart_dimension("calls", w->vfs.fsync_err); + ebpf_write_end_chart(); } - } - write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.open_err); - } - } - write_end_chart(); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_open"); + write_chart_dimension("calls", w->vfs.open_call); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.create_call); + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_open_error"); + write_chart_dimension("calls", w->vfs.open_err); + ebpf_write_end_chart(); } - } - write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - write_chart_dimension(w->name, w->vfs.create_err); - } + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_create"); + write_chart_dimension("calls", w->vfs.create_call); + ebpf_write_end_chart(); + + if (em->mode < MODE_ENTRY) { + ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_create_error"); + write_chart_dimension("calls", w->vfs.create_err); + ebpf_write_end_chart(); } - write_end_chart(); } } @@ -1451,7 +1445,7 @@ static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em) ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls for vfs_fsync", + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_FSYNC_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507, ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], @@ -1465,7 +1459,7 @@ static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em) 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); } - ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls for vfs_open", + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_OPEN_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509, ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], @@ -1479,7 +1473,7 @@ static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em) 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); } - ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls for vfs_create", + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511, ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], @@ -1504,76 +1498,76 @@ static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em) */ static void ebpf_obsolete_specific_vfs_charts(char *type, ebpf_module_t *em) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_DELETED, "Files deleted", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_DELETED, "", "Files deleted", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_UNLINK_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500, em->update_every); - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "", "Write to disk", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "", "Fails to write", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502, em->update_every); } - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "", "Read from disk", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "", "Fails to read", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504, em->update_every); } - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "", "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505, em->update_every); - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "", "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506, em->update_every); - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls for vfs_fsync", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "", "Calls to vfs_fsync.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_FSYNC_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "", "Sync error", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508, em->update_every); } - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls for vfs_open", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "", "Calls to vfs_open.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_OPEN_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "", "Open error", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_OPEN_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510, em->update_every); } - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls for vfs_create", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "", "Calls to vfs_create.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error", + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "", "Create error", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_CREATE_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512, em->update_every); @@ -1590,78 +1584,78 @@ static void ebpf_obsolete_specific_vfs_charts(char *type, ebpf_module_t *em) */ static void ebpf_send_specific_vfs_data(char *type, netdata_publish_vfs_t *values, ebpf_module_t *em) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].name, (long long)values->unlink_call); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name, (long long)values->write_call + (long long)values->writev_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name, (long long)values->write_err + (long long)values->writev_err); - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name, (long long)values->read_call + (long long)values->readv_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name, (long long)values->read_err + (long long)values->readv_err); - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name, (long long)values->write_bytes + (long long)values->writev_bytes); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name, (long long)values->read_bytes + (long long)values->readv_bytes); - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name, (long long)values->fsync_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name, (long long)values->fsync_err); - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name, (long long)values->open_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name, (long long)values->open_err); - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name, (long long)values->create_call); - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR); + ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, ""); write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name, (long long)values->create_err); - write_end_chart(); + ebpf_write_end_chart(); } } @@ -1722,7 +1716,7 @@ static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em) ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT, NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); - ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync", + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, 20072, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT, @@ -1735,7 +1729,7 @@ static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em) ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT, NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); } - ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open", + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, 20074, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_OPEN_CONTEXT, @@ -1749,7 +1743,7 @@ static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em) NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); } - ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create", + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, 20076, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_CREATE_CONTEXT, @@ -1774,125 +1768,124 @@ static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em) static void ebpf_send_systemd_vfs_charts(ebpf_module_t *em) { ebpf_cgroup_target_t *ect; - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.unlink_call); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_call + ect->publish_systemd_vfs.writev_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_err + ect->publish_systemd_vfs.writev_err); } } - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_call + ect->publish_systemd_vfs.readv_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_err + ect->publish_systemd_vfs.readv_err); } } - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_bytes + ect->publish_systemd_vfs.writev_bytes); } } - write_end_chart(); + ebpf_write_end_chart(); - - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_bytes + ect->publish_systemd_vfs.readv_bytes); } } - write_end_chart(); + ebpf_write_end_chart(); - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_err); } } - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_err); } } - write_end_chart(); + ebpf_write_end_chart(); } - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_call); } } - write_end_chart(); + ebpf_write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR); + ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, ""); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_err); } } - write_end_chart(); + ebpf_write_end_chart(); } } @@ -1960,9 +1953,9 @@ static void vfs_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); - while (!ebpf_exit_plugin && running_time < lifetime) { + while (!ebpf_plugin_exit && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); - if (ebpf_exit_plugin || ++counter != update_every) + if (ebpf_plugin_exit || ++counter != update_every) continue; counter = 0; @@ -2098,7 +2091,7 @@ static void ebpf_create_global_charts(ebpf_module_t *em) ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_FSYNC, - "Calls for vfs_fsync", + "Calls to vfs_fsync.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NULL, @@ -2124,7 +2117,7 @@ static void ebpf_create_global_charts(ebpf_module_t *em) ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_OPEN, - "Calls for vfs_open", + "Calls to vfs_open.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NULL, @@ -2150,7 +2143,7 @@ static void ebpf_create_global_charts(ebpf_module_t *em) ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_VFS_CREATE, - "Calls for vfs_create", + "Calls to vfs_create.", EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NULL, @@ -2188,127 +2181,219 @@ static void ebpf_create_global_charts(ebpf_module_t *em) void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr) { struct ebpf_target *root = ptr; + struct ebpf_target *w; + int order = 20275; + int update_every = em->update_every; + for (w = root; w; w = w->next) { + if (unlikely(!w->exposed)) + continue; - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED, - "Files deleted", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20065, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, - "Write to disk", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20066, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_unlink", + "Files deleted.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_unlink", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_write", + "Write to disk.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_write", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_write_error", + "Fails to write.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_write_error", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, - "Fails to write", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20067, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS, - "Read from disk", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20068, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_read", + "Read from disk.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_read", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_read_error", + "Fails to read.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_read_error", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, - "Fails to read", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20069, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, - "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20070, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES, - "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20071, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC, - "Calls for vfs_fsync", EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20072, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_write_bytes", + "Bytes written on disk.", + EBPF_COMMON_DIMENSION_BYTES, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_write_bytes", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION writes '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_read_bytes", + "Bytes read from disk.", + EBPF_COMMON_DIMENSION_BYTES, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_read_bytes", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION reads '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_fsync", + "Calls to vfs_fsync.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_fsync", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_fsync_error", + "Fails to sync.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_fsync_error", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, - "Sync error", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20073, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN, - "Calls for vfs_open", EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20074, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_open", + "Calls to vfs_open.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_open", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_open_error", + "Fails to open.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_open_error", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, - "Open error", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20075, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE, - "Calls for vfs_create", EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20076, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_create", + "Calls to vfs_create.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_create", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_cmd(NETDATA_APP_FAMILY, + w->clean_name, + "_ebpf_call_vfs_create_error", + "Fails to create a file.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + "app.ebpf_call_vfs_create_error", + order++, + update_every, + NETDATA_EBPF_MODULE_NAME_VFS); + ebpf_create_chart_labels("app_group", w->name, 1); + ebpf_commit_label(); + fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, - "Create error", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20077, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + w->charts_created |= 1<apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; diff --git a/collectors/ebpf.plugin/integrations/ebpf_cachestat.md b/collectors/ebpf.plugin/integrations/ebpf_cachestat.md new file mode 100644 index 00000000000000..5bf0a37749eaf6 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_cachestat.md @@ -0,0 +1,179 @@ + + +# eBPF Cachestat + + + + + +Plugin: ebpf.plugin +Module: cachestat + + + +## Overview + +Monitor Linux page cache events giving for users a general vision about how his kernel is manipulating files. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF Cachestat instance + +These metrics show total number of calls to functions inside kernel. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.cachestat_ratio | ratio | % | +| mem.cachestat_dirties | dirty | page/s | +| mem.cachestat_hits | hit | hits/s | +| mem.cachestat_misses | miss | misses/s | + +### Per apps + +These Metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.ebpf_cachestat_hit_ratio | ratio | % | +| app.ebpf_cachestat_dirty_pages | pages | page/s | +| app.ebpf_cachestat_access | hits | hits/s | +| app.ebpf_cachestat_misses | misses | misses/s | + +### Per cgroup + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.cachestat_ratio | ratio | % | +| cgroup.cachestat_dirties | dirty | page/s | +| cgroup.cachestat_hits | hit | hits/s | +| cgroup.cachestat_misses | miss | misses/s | +| services.cachestat_ratio | a dimension per systemd service | % | +| services.cachestat_dirties | a dimension per systemd service | page/s | +| services.cachestat_hits | a dimension per systemd service | hits/s | +| services.cachestat_misses | a dimension per systemd service | misses/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/cachestat.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/cachestat.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_dcstat.md b/collectors/ebpf.plugin/integrations/ebpf_dcstat.md new file mode 100644 index 00000000000000..4c5719026ed301 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_dcstat.md @@ -0,0 +1,177 @@ + + +# eBPF DCstat + + + + + +Plugin: ebpf.plugin +Module: dcstat + + + +## Overview + +Monitor directory cache events per application given an overall vision about files on memory or storage device. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per apps + +These Metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.ebpf_dc_ratio | ratio | % | +| app.ebpf_dc_reference | files | files | +| app.ebpf_dc_not_cache | files | files | +| app.ebpf_dc_not_found | files | files | + +### Per filesystem + +These metrics show total number of calls to functions inside kernel. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| filesystem.dc_reference | reference, slow, miss | files | +| filesystem.dc_hit_ratio | ratio | % | + +### Per cgroup + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.dc_ratio | ratio | % | +| cgroup.dc_reference | reference | files | +| cgroup.dc_not_cache | slow | files | +| cgroup.dc_not_found | miss | files | +| services.dc_ratio | a dimension per systemd service | % | +| services.dc_reference | a dimension per systemd service | files | +| services.dc_not_cache | a dimension per systemd service | files | +| services.dc_not_found | a dimension per systemd service | files | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/dcstat.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/dcstat.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config option + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_disk.md b/collectors/ebpf.plugin/integrations/ebpf_disk.md new file mode 100644 index 00000000000000..557da125d8d529 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_disk.md @@ -0,0 +1,137 @@ + + +# eBPF Disk + + + + + +Plugin: ebpf.plugin +Module: disk + + + +## Overview + +Measure latency for I/O events on disk. + +Attach tracepoints to internal kernel functions. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per disk + +These metrics measure latency for I/O events on every hard disk present on host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| disk.latency_io | latency | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).` + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/disk.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/disk.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md b/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md new file mode 100644 index 00000000000000..23f5bd26e35773 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md @@ -0,0 +1,177 @@ + + +# eBPF Filedescriptor + + + + + +Plugin: ebpf.plugin +Module: filedescriptor + + + +## Overview + +Monitor calls for functions responsible to open or close a file descriptor and possible errors. + +Attach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Depending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + +These Metrics show grouped information per cgroup/service. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.fd_open | open | calls/s | +| cgroup.fd_open_error | open | calls/s | +| cgroup.fd_closed | close | calls/s | +| cgroup.fd_close_error | close | calls/s | +| services.file_open | a dimension per systemd service | calls/s | +| services.file_open_error | a dimension per systemd service | calls/s | +| services.file_closed | a dimension per systemd service | calls/s | +| services.file_close_error | a dimension per systemd service | calls/s | + +### Per eBPF Filedescriptor instance + +These metrics show total number of calls to functions inside kernel. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| filesystem.file_descriptor | open, close | calls/s | +| filesystem.file_error | open, close | calls/s | + +### Per apps + +These Metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.ebpf_file_open | calls | calls/s | +| app.ebpf_file_open_error | calls | calls/s | +| app.ebpf_file_closed | calls | calls/s | +| app.ebpf_file_close_error | calls | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/fd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/fd.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_filesystem.md b/collectors/ebpf.plugin/integrations/ebpf_filesystem.md new file mode 100644 index 00000000000000..7a1bb832b6195e --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_filesystem.md @@ -0,0 +1,163 @@ + + +# eBPF Filesystem + + + + + +Plugin: ebpf.plugin +Module: filesystem + + + +## Overview + +Monitor latency for main actions on filesystem like I/O events. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per filesystem + +Latency charts associate with filesystem actions. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| filesystem.read_latency | latency period | calls/s | +| filesystem.open_latency | latency period | calls/s | +| filesystem.sync_latency | latency period | calls/s | + +### Per iilesystem + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| filesystem.write_latency | latency period | calls/s | + +### Per eBPF Filesystem instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| filesystem.attributte_latency | latency period | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/filesystem.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/filesystem.conf +``` +#### Options + +This configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | +| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no | +| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no | +| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no | +| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no | +| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_hardirq.md b/collectors/ebpf.plugin/integrations/ebpf_hardirq.md new file mode 100644 index 00000000000000..f9b52962450e31 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_hardirq.md @@ -0,0 +1,137 @@ + + +# eBPF Hardirq + + + + + +Plugin: ebpf.plugin +Module: hardirq + + + +## Overview + +Monitor latency for each HardIRQ available. + +Attach tracepoints to internal kernel functions. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF Hardirq instance + +These metrics show latest timestamp for each hardIRQ available on host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.hardirq_latency | hardirq names | milliseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`). + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/hardirq.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/hardirq.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_mdflush.md b/collectors/ebpf.plugin/integrations/ebpf_mdflush.md new file mode 100644 index 00000000000000..0081b7d834a750 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_mdflush.md @@ -0,0 +1,132 @@ + + +# eBPF MDflush + + + + + +Plugin: ebpf.plugin +Module: mdflush + + + +## Overview + +Monitor when flush events happen between disks. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF MDflush instance + +Number of times md_flush_request was called since last time. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mdstat.mdstat_flush | disk | flushes | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/mdflush.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/mdflush.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_mount.md b/collectors/ebpf.plugin/integrations/ebpf_mount.md new file mode 100644 index 00000000000000..d19e57809f2316 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_mount.md @@ -0,0 +1,140 @@ + + +# eBPF Mount + + + + + +Plugin: ebpf.plugin +Module: mount + + + +## Overview + +Monitor calls for mount and umount syscall. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF Mount instance + +Calls for syscalls mount an umount. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mount_points.call | mount, umount | calls/s | +| mount_points.error | mount, umount | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).` + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/mount.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/mount.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_oomkill.md b/collectors/ebpf.plugin/integrations/ebpf_oomkill.md new file mode 100644 index 00000000000000..897cddfacb3514 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_oomkill.md @@ -0,0 +1,160 @@ + + +# eBPF OOMkill + + + + + +Plugin: ebpf.plugin +Module: oomkill + + + +## Overview + +Monitor applications that reach out of memory. + +Attach tracepoint to internal kernel functions. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + +These metrics show cgroup/service that reached OOM. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.oomkills | cgroup name | kills | +| services.oomkills | a dimension per systemd service | kills | + +### Per apps + +These metrics show cgroup/service that reached OOM. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.oomkill | kills | kills | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`). + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/oomkill.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/oomkill.conf +``` +#### Options + +Overwrite default configuration reducing number of I/O events + + +#### Examples +There are no configuration examples. + + + +## Troubleshooting + +### update every + + + +### ebpf load mode + + + +### lifetime + + + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_process.md b/collectors/ebpf.plugin/integrations/ebpf_process.md new file mode 100644 index 00000000000000..109890139d5a41 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_process.md @@ -0,0 +1,111 @@ + + +# eBPF Process + + + + + +Plugin: ebpf.plugin +Module: process + + + +## Overview + +Monitor internal memory usage. + +Uses netdata internal statistic to monitor memory management by plugin. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF Process instance + +How plugin is allocating memory. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netdata.ebpf_aral_stat_size | memory | bytes | +| netdata.ebpf_aral_stat_alloc | aral | calls | +| netdata.ebpf_threads | total, running | threads | +| netdata.ebpf_load_methods | legacy, co-re | methods | +| netdata.ebpf_kernel_memory | memory_locked | bytes | +| netdata.ebpf_hash_tables_count | hash_table | hash tables | +| netdata.ebpf_aral_stat_size | memory | bytes | +| netdata.ebpf_aral_stat_alloc | aral | calls | +| netdata.ebpf_aral_stat_size | memory | bytes | +| netdata.ebpf_aral_stat_alloc | aral | calls | +| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows | +| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Netdata flags. + +To have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`. + + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_processes.md b/collectors/ebpf.plugin/integrations/ebpf_processes.md new file mode 100644 index 00000000000000..62542359a8d2b2 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_processes.md @@ -0,0 +1,187 @@ + + +# eBPF Processes + + + + + +Plugin: ebpf.plugin +Module: processes + + + +## Overview + +Monitor calls for function creating tasks (threads and processes) inside Linux kernel. + +Attach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF Processes instance + +These metrics show total number of calls to functions inside kernel. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.process_thread | process | calls/s | +| system.process_status | process, zombie | difference | +| system.exit | process | calls/s | +| system.task_error | task | calls/s | + +### Per apps + +These Metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.process_create | calls | calls/s | +| app.thread_create | call | calls/s | +| app.task_exit | call | calls/s | +| app.task_close | call | calls/s | +| app.task_error | app | calls/s | + +### Per cgroup + +These Metrics show grouped information per cgroup/service. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.process_create | process | calls/s | +| cgroup.thread_create | thread | calls/s | +| cgroup.task_exit | exit | calls/s | +| cgroup.task_close | process | calls/s | +| cgroup.task_error | process | calls/s | +| services.process_create | a dimension per systemd service | calls/s | +| services.thread_create | a dimension per systemd service | calls/s | +| services.task_close | a dimension per systemd service | calls/s | +| services.task_exit | a dimension per systemd service | calls/s | +| services.task_error | a dimension per systemd service | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`). + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/process.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/process.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_shm.md b/collectors/ebpf.plugin/integrations/ebpf_shm.md new file mode 100644 index 00000000000000..ffa05c77030944 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_shm.md @@ -0,0 +1,185 @@ + + +# eBPF SHM + + + + + +Plugin: ebpf.plugin +Module: shm + + + +## Overview + +Monitor syscall responsible to manipulate shared memory. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + +These Metrics show grouped information per cgroup/service. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.shmget | get | calls/s | +| cgroup.shmat | at | calls/s | +| cgroup.shmdt | dt | calls/s | +| cgroup.shmctl | ctl | calls/s | +| services.shmget | a dimension per systemd service | calls/s | +| services.shmat | a dimension per systemd service | calls/s | +| services.shmdt | a dimension per systemd service | calls/s | +| services.shmctl | a dimension per systemd service | calls/s | + +### Per apps + +These Metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.ebpf_shmget_call | calls | calls/s | +| app.ebpf_shmat_call | calls | calls/s | +| app.ebpf_shmdt_call | calls | calls/s | +| app.ebpf_shmctl_call | calls | calls/s | + +### Per eBPF SHM instance + +These Metrics show number of calls for specified syscall. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.shared_memory_calls | get, at, dt, ctl | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).` + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/shm.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/shm.conf +``` +#### Options + +This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | +| shmget | Enable or disable monitoring for syscall `shmget` | yes | no | +| shmat | Enable or disable monitoring for syscall `shmat` | yes | no | +| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no | +| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_socket.md b/collectors/ebpf.plugin/integrations/ebpf_socket.md new file mode 100644 index 00000000000000..dc7a7d07b5232b --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_socket.md @@ -0,0 +1,201 @@ + + +# eBPF Socket + + + + + +Plugin: ebpf.plugin +Module: socket + + + +## Overview + +Monitor bandwidth consumption per application for protocols TCP and UDP. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF Socket instance + +These metrics show total number of calls to functions inside kernel. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ip.inbound_conn | connection_tcp | connections/s | +| ip.tcp_outbound_conn | received | connections/s | +| ip.tcp_functions | received, send, closed | calls/s | +| ip.total_tcp_bandwidth | received, send | kilobits/s | +| ip.tcp_error | received, send | calls/s | +| ip.tcp_retransmit | retransmited | calls/s | +| ip.udp_functions | received, send | calls/s | +| ip.total_udp_bandwidth | received, send | kilobits/s | +| ip.udp_error | received, send | calls/s | + +### Per apps + +These metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.ebpf_call_tcp_v4_connection | connections | connections/s | +| app.app.ebpf_call_tcp_v6_connection | connections | connections/s | +| app.ebpf_sock_bytes_sent | bandwidth | kilobits/s | +| app.ebpf_sock_bytes_received | bandwidth | kilobits/s | +| app.ebpf_call_tcp_sendmsg | calls | calls/s | +| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s | +| app.ebpf_call_tcp_retransmit | calls | calls/s | +| app.ebpf_call_udp_sendmsg | calls | calls/s | +| app.ebpf_call_udp_recvmsg | calls | calls/s | + +### Per cgroup + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.net_conn_ipv4 | connected_v4 | connections/s | +| cgroup.net_conn_ipv6 | connected_v6 | connections/s | +| cgroup.net_bytes_recv | received | calls/s | +| cgroup.net_bytes_sent | sent | calls/s | +| cgroup.net_tcp_recv | received | calls/s | +| cgroup.net_tcp_send | sent | calls/s | +| cgroup.net_retransmit | retransmitted | calls/s | +| cgroup.net_udp_send | sent | calls/s | +| cgroup.net_udp_recv | received | calls/s | +| services.net_conn_ipv6 | a dimension per systemd service | connections/s | +| services.net_bytes_recv | a dimension per systemd service | kilobits/s | +| services.net_bytes_sent | a dimension per systemd service | kilobits/s | +| services.net_tcp_recv | a dimension per systemd service | calls/s | +| services.net_tcp_send | a dimension per systemd service | calls/s | +| services.net_tcp_retransmit | a dimension per systemd service | calls/s | +| services.net_udp_send | a dimension per systemd service | calls/s | +| services.net_udp_recv | a dimension per systemd service | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/network.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/network.conf +``` +#### Options + +All options are defined inside section `[global]`. Options inside `network connections` are ignored for while. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no | +| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no | +| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no | +| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_softirq.md b/collectors/ebpf.plugin/integrations/ebpf_softirq.md new file mode 100644 index 00000000000000..6a4312c6ef6fc7 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_softirq.md @@ -0,0 +1,137 @@ + + +# eBPF SoftIRQ + + + + + +Plugin: ebpf.plugin +Module: softirq + + + +## Overview + +Monitor latency for each SoftIRQ available. + +Attach kprobe to internal kernel functions. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF SoftIRQ instance + +These metrics show latest timestamp for each softIRQ available on host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.softirq_latency | soft IRQs | milliseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).` + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/softirq.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/softirq.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_swap.md b/collectors/ebpf.plugin/integrations/ebpf_swap.md new file mode 100644 index 00000000000000..ce2423f8ded240 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_swap.md @@ -0,0 +1,170 @@ + + +# eBPF SWAP + + + + + +Plugin: ebpf.plugin +Module: swap + + + +## Overview + +Monitors when swap has I/O events and applications executing events. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + +These Metrics show grouped information per cgroup/service. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.swap_read | read | calls/s | +| cgroup.swap_write | write | calls/s | +| services.swap_read | a dimension per systemd service | calls/s | +| services.swap_write | a dimension per systemd service | calls/s | + +### Per apps + +These Metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.ebpf_call_swap_readpage | a dimension per app group | calls/s | +| app.ebpf_call_swap_writepage | a dimension per app group | calls/s | + +### Per eBPF SWAP instance + +These metrics show total number of calls to functions inside kernel. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.swapcalls | write, read | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/swap.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/swap.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_sync.md b/collectors/ebpf.plugin/integrations/ebpf_sync.md new file mode 100644 index 00000000000000..6f6c246a7df4fc --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_sync.md @@ -0,0 +1,157 @@ + + +# eBPF Sync + + + + + +Plugin: ebpf.plugin +Module: sync + + + +## Overview + +Monitor syscall responsible to move data from memory to storage device. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per eBPF Sync instance + +These metrics show total number of calls to functions inside kernel. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.file_sync | fsync, fdatasync | calls/s | +| mem.meory_map | msync | calls/s | +| mem.sync | sync, syncfs | calls/s | +| mem.file_segment | sync_file_range | calls/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ sync_freq ](https://github.com/netdata/netdata/blob/master/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. | + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + +#### Debug Filesystem + +This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`). + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/sync.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/sync.conf +``` +#### Options + +This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | +| sync | Enable or disable monitoring for syscall `sync` | yes | no | +| msync | Enable or disable monitoring for syscall `msync` | yes | no | +| fsync | Enable or disable monitoring for syscall `fsync` | yes | no | +| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no | +| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no | +| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/integrations/ebpf_vfs.md b/collectors/ebpf.plugin/integrations/ebpf_vfs.md new file mode 100644 index 00000000000000..4b824e975b96a4 --- /dev/null +++ b/collectors/ebpf.plugin/integrations/ebpf_vfs.md @@ -0,0 +1,212 @@ + + +# eBPF VFS + + + + + +Plugin: ebpf.plugin +Module: vfs + + + +## Overview + +Monitor I/O events on Linux Virtual Filesystem. + +Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time. + +### Default Behavior + +#### Auto-Detection + +The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cgroup + +These Metrics show grouped information per cgroup/service. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cgroup.vfs_unlink | delete | calls/s | +| cgroup.vfs_write | write | calls/s | +| cgroup.vfs_write_error | write | calls/s | +| cgroup.vfs_read | read | calls/s | +| cgroup.vfs_read_error | read | calls/s | +| cgroup.vfs_write_bytes | write | bytes/s | +| cgroup.vfs_read_bytes | read | bytes/s | +| cgroup.vfs_fsync | fsync | calls/s | +| cgroup.vfs_fsync_error | fsync | calls/s | +| cgroup.vfs_open | open | calls/s | +| cgroup.vfs_open_error | open | calls/s | +| cgroup.vfs_create | create | calls/s | +| cgroup.vfs_create_error | create | calls/s | +| services.vfs_unlink | a dimension per systemd service | calls/s | +| services.vfs_write | a dimension per systemd service | calls/s | +| services.vfs_write_error | a dimension per systemd service | calls/s | +| services.vfs_read | a dimension per systemd service | calls/s | +| services.vfs_read_error | a dimension per systemd service | calls/s | +| services.vfs_write_bytes | a dimension per systemd service | bytes/s | +| services.vfs_read_bytes | a dimension per systemd service | bytes/s | +| services.vfs_fsync | a dimension per systemd service | calls/s | +| services.vfs_fsync_error | a dimension per systemd service | calls/s | +| services.vfs_open | a dimension per systemd service | calls/s | +| services.vfs_open_error | a dimension per systemd service | calls/s | +| services.vfs_create | a dimension per systemd service | calls/s | +| services.vfs_create_error | a dimension per systemd service | calls/s | + +### Per eBPF VFS instance + +These Metrics show grouped information per cgroup/service. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| filesystem.vfs_deleted_objects | delete | calls/s | +| filesystem.vfs_io | read, write | calls/s | +| filesystem.vfs_io_bytes | read, write | bytes/s | +| filesystem.vfs_io_error | read, write | calls/s | +| filesystem.vfs_fsync | fsync | calls/s | +| filesystem.vfs_fsync_error | fsync | calls/s | +| filesystem.vfs_open | open | calls/s | +| filesystem.vfs_open_error | open | calls/s | +| filesystem.vfs_create | create | calls/s | +| filesystem.vfs_create_error | create | calls/s | + +### Per apps + +These Metrics show grouped information per apps group. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| app_group | The name of the group defined in the configuration. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| app.ebpf_call_vfs_unlink | calls | calls/s | +| app.ebpf_call_vfs_write | calls | calls/s | +| app.ebpf_call_vfs_write_error | calls | calls/s | +| app.ebpf_call_vfs_read | calls | calls/s | +| app.ebpf_call_vfs_read_error | calls | calls/s | +| app.ebpf_call_vfs_write_bytes | writes | bytes/s | +| app.ebpf_call_vfs_read_bytes | reads | bytes/s | +| app.ebpf_call_vfs_fsync | calls | calls/s | +| app.ebpf_call_vfs_fsync_error | calls | calls/s | +| app.ebpf_call_vfs_open | calls | calls/s | +| app.ebpf_call_vfs_open_error | calls | calls/s | +| app.ebpf_call_vfs_create | calls | calls/s | +| app.ebpf_call_vfs_create_error | calls | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Compile kernel + +Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions. +When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files +with different names. + +Now follow steps: +1. Copy the configuration file to /usr/src/linux/.config. +2. Select the necessary options: make oldconfig +3. Compile your kernel image: make bzImage +4. Compile your modules: make modules +5. Copy your new kernel image for boot loader directory +6. Install the new modules: make modules_install +7. Generate an initial ramdisk image (`initrd`) if it is necessary. +8. Update your boot loader + + + +### Configuration + +#### File + +The configuration file name for this integration is `ebpf.d/vfs.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ebpf.d/vfs.conf +``` +#### Options + +All options are defined inside section `[global]`. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 5 | no | +| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no | +| apps | Enable or disable integration with apps.plugin | no | no | +| cgroups | Enable or disable integration with cgroup.plugin | no | no | +| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no | +| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no | +| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no | +| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no | +| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/ebpf.plugin/metadata.yaml b/collectors/ebpf.plugin/metadata.yaml index 23232677836017..97b5df38940d43 100644 --- a/collectors/ebpf.plugin/metadata.yaml +++ b/collectors/ebpf.plugin/metadata.yaml @@ -196,32 +196,34 @@ modules: - name: close - name: apps description: "These Metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.file_open + - name: app.ebpf_file_open description: Number of open files unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.file_open_error + - name: calls + - name: app.ebpf_file_open_error description: Fails to open files unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.file_closed + - name: calls + - name: app.ebpf_file_closed description: Files closed unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.file_close_error + - name: calls + - name: app.ebpf_file_close_error description: Fails to close files unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group + - name: calls - meta: plugin_name: ebpf.plugin module_name: processes @@ -379,38 +381,40 @@ modules: - name: task - name: apps description: "These Metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.process_create + - name: app.process_create description: Process started unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.thread_create + - name: calls + - name: app.thread_create description: Threads started unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.task_exit + - name: call + - name: app.task_exit description: Tasks starts exit process unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.task_close + - name: call + - name: app.task_close description: Tasks closed unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.task_error + - name: call + - name: app.task_error description: Errors to create process or threads unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group + - name: app - name: cgroup description: "These Metrics show grouped information per cgroup/service." labels: [] @@ -841,32 +845,34 @@ modules: - name: miss - name: apps description: "These Metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.cachestat_ratio + - name: app.ebpf_cachestat_hit_ratio description: Hit ratio unit: "%" chart_type: line dimensions: - - name: a dimension per app group - - name: apps.cachestat_dirties + - name: ratio + - name: app.ebpf_cachestat_dirty_pages description: Number of dirty pages unit: "page/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.cachestat_hits + - name: pages + - name: app.ebpf_cachestat_access description: Number of accessed files unit: "hits/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.cachestat_misses + - name: hits + - name: app.ebpf_cachestat_misses description: Files out of page cache unit: "misses/s" chart_type: stacked dimensions: - - name: a dimension per app group + - name: misses - name: cgroup description: "" labels: [] @@ -1076,27 +1082,27 @@ modules: labels: [] metrics: - name: mem.file_sync - description: Monitor calls for fsync(2) and fdatasync(2). + description: Monitor calls to fsync(2) and fdatasync(2). unit: "calls/s" chart_type: stacked dimensions: - name: fsync - name: fdatasync - name: mem.meory_map - description: Monitor calls for msync(2). + description: Monitor calls to msync(2). unit: "calls/s" chart_type: line dimensions: - name: msync - name: mem.sync - description: Monitor calls for sync(2) and syncfs(2). + description: Monitor calls to sync(2) and syncfs(2). unit: "calls/s" chart_type: line dimensions: - name: sync - name: syncfs - name: mem.file_segment - description: Monitor calls for sync_file_range(2). + description: Monitor calls to sync_file_range(2). unit: "calls/s" chart_type: line dimensions: @@ -1333,41 +1339,43 @@ modules: labels: [] metrics: - name: cgroup.swap_read - description: Calls to function swap_readpage. + description: Calls to function swap_readpage. unit: "calls/s" chart_type: line dimensions: - name: read - name: cgroup.swap_write - description: Calls to function swap_writepage. + description: Calls to function swap_writepage. unit: "calls/s" chart_type: line dimensions: - name: write - name: services.swap_read - description: Calls to swap_readpage. + description: Calls to swap_readpage. unit: "calls/s" chart_type: stacked dimensions: - name: a dimension per systemd service - name: services.swap_write - description: Calls to function swap_writepage. + description: Calls to function swap_writepage. unit: "calls/s" chart_type: stacked dimensions: - name: a dimension per systemd service - name: apps description: "These Metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.swap_read_call - description: Calls to function swap_readpage. + - name: app.ebpf_call_swap_readpage + description: Calls to function swap_readpage. unit: "calls/s" chart_type: stacked dimensions: - name: a dimension per app group - - name: apps.swap_write_call - description: Calls to function swap_writepage. + - name: app.ebpf_call_swap_writepage + description: Calls to function swap_writepage. unit: "calls/s" chart_type: stacked dimensions: @@ -1501,14 +1509,16 @@ modules: - name: a dimension per systemd service - name: apps description: "These metrics show cgroup/service that reached OOM." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.oomkills + - name: app.oomkill description: OOM kills unit: "kills" chart_type: stacked dimensions: - - name: a dimension per app group + - name: kills - meta: plugin_name: ebpf.plugin module_name: socket @@ -1713,68 +1723,64 @@ modules: - name: send - name: apps description: "These metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.outbound_conn_v4 + - name: app.ebpf_call_tcp_v4_connection description: Calls to tcp_v4_connection unit: "connections/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.outbound_conn_v6 + - name: connections + - name: app.app.ebpf_call_tcp_v6_connection description: Calls to tcp_v6_connection unit: "connections/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.total_bandwidth_sent + - name: connections + - name: app.ebpf_sock_bytes_sent description: Bytes sent unit: "kilobits/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.total_bandwidth_recv + - name: bandwidth + - name: app.ebpf_sock_bytes_received description: bytes received unit: "kilobits/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.bandwidth_tcp_send + - name: bandwidth + - name: app.ebpf_call_tcp_sendmsg description: Calls for tcp_sendmsg unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.bandwidth_tcp_recv + - name: calls + - name: app.ebpf_call_tcp_cleanup_rbuf description: Calls for tcp_cleanup_rbuf unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.bandwidth_tcp_retransmit + - name: calls + - name: app.ebpf_call_tcp_retransmit description: Calls for tcp_retransmit unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.bandwidth_udp_send + - name: calls + - name: app.ebpf_call_udp_sendmsg description: Calls for udp_sendmsg unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.bandwidth_udp_recv + - name: calls + - name: app.ebpf_call_udp_recvmsg description: Calls for udp_recvmsg unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: services.net_conn_ipv4 - description: Calls to tcp_v4_connection - unit: "connections/s" - chart_type: stacked - dimensions: - - name: a dimension per systemd service + - name: calls - name: cgroup description: "" labels: [] @@ -2005,32 +2011,34 @@ modules: scopes: - name: apps description: "These Metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.dc_ratio + - name: app.ebpf_dc_ratio description: Percentage of files inside directory cache unit: "%" chart_type: line dimensions: - - name: a dimension per app group - - name: apps.dc_reference + - name: ratio + - name: app.ebpf_dc_reference description: Count file access unit: "files" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.dc_not_cache + - name: files + - name: app.ebpf_dc_not_cache description: Files not present inside directory cache unit: "files" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.dc_not_found + - name: files + - name: app.ebpf_dc_not_found description: Files not found unit: "files" chart_type: stacked dimensions: - - name: a dimension per app group + - name: files - name: filesystem description: "These metrics show total number of calls to functions inside kernel." labels: [] @@ -2409,81 +2417,83 @@ modules: labels: [] metrics: - name: cgroup.shmget - description: Calls to syscall shmget(2). + description: Calls to syscall shmget(2). unit: "calls/s" chart_type: line dimensions: - name: get - name: cgroup.shmat - description: Calls to syscall shmat(2). + description: Calls to syscall shmat(2). unit: "calls/s" chart_type: line dimensions: - name: at - name: cgroup.shmdt - description: Calls to syscall shmdt(2). + description: Calls to syscall shmdt(2). unit: "calls/s" chart_type: line dimensions: - name: dt - name: cgroup.shmctl - description: Calls to syscall shmctl(2). + description: Calls to syscall shmctl(2). unit: "calls/s" chart_type: line dimensions: - name: ctl - name: services.shmget - description: Calls to syscall shmget(2). + description: Calls to syscall shmget(2). unit: "calls/s" chart_type: stacked dimensions: - name: a dimension per systemd service - name: services.shmat - description: Calls to syscall shmat(2). + description: Calls to syscall shmat(2). unit: "calls/s" chart_type: stacked dimensions: - name: a dimension per systemd service - name: services.shmdt - description: Calls to syscall shmdt(2). + description: Calls to syscall shmdt(2). unit: "calls/s" chart_type: stacked dimensions: - name: a dimension per systemd service - name: services.shmctl - description: Calls to syscall shmctl(2). + description: Calls to syscall shmctl(2). unit: "calls/s" chart_type: stacked dimensions: - name: a dimension per systemd service - name: apps description: "These Metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.shmget_call - description: Calls to syscall shmget(2). + - name: app.ebpf_shmget_call + description: Calls to syscall shmget(2). unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.shmat_call - description: Calls to syscall shmat(2). + - name: calls + - name: app.ebpf_shmat_call + description: Calls to syscall shmat(2). unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.shmdt_call - description: Calls to syscall shmdt(2). + - name: calls + - name: app.ebpf_shmdt_call + description: Calls to syscall shmdt(2). unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.shmctl_call - description: Calls to syscall shmctl(2). + - name: calls + - name: app.ebpf_shmctl_call + description: Calls to syscall shmctl(2). unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group + - name: calls - name: global description: "These Metrics show number of calls for specified syscall." labels: [] @@ -2898,7 +2908,7 @@ modules: dimensions: - name: read - name: cgroup.vfs_fsync - description: Calls for vfs_fsync + description: Calls to vfs_fsync. unit: "calls/s" chart_type: line dimensions: @@ -2910,7 +2920,7 @@ modules: dimensions: - name: fsync - name: cgroup.vfs_open - description: Calls for vfs_open + description: Calls to vfs_open. unit: "calls/s" chart_type: line dimensions: @@ -2922,7 +2932,7 @@ modules: dimensions: - name: open - name: cgroup.vfs_create - description: Calls for vfs_create + description: Calls to vfs_create. unit: "calls/s" chart_type: line dimensions: @@ -2976,7 +2986,7 @@ modules: dimensions: - name: a dimension per systemd service - name: services.vfs_fsync - description: Calls to vfs_fsync + description: Calls to vfs_fsync. unit: "calls/s" chart_type: stacked dimensions: @@ -2988,7 +2998,7 @@ modules: dimensions: - name: a dimension per systemd service - name: services.vfs_open - description: Calls to vfs_open + description: Calls to vfs_open. unit: "calls/s" chart_type: stacked dimensions: @@ -3000,7 +3010,7 @@ modules: dimensions: - name: a dimension per systemd service - name: services.vfs_create - description: Calls to vfs_create + description: Calls to vfs_create. unit: "calls/s" chart_type: stacked dimensions: @@ -3043,7 +3053,7 @@ modules: - name: read - name: write - name: filesystem.vfs_fsync - description: Calls for vfs_fsync + description: Calls to vfs_fsync. unit: "calls/s" chart_type: line dimensions: @@ -3055,7 +3065,7 @@ modules: dimensions: - name: fsync - name: filesystem.vfs_open - description: Calls for vfs_open + description: Calls to vfs_open. unit: "calls/s" chart_type: line dimensions: @@ -3067,7 +3077,7 @@ modules: dimensions: - name: open - name: filesystem.vfs_create - description: Calls for vfs_create + description: Calls to vfs_create. unit: "calls/s" chart_type: line dimensions: @@ -3080,86 +3090,88 @@ modules: - name: create - name: apps description: "These Metrics show grouped information per apps group." - labels: [] + labels: + - name: app_group + description: The name of the group defined in the configuration. metrics: - - name: apps.file_deleted + - name: app.ebpf_call_vfs_unlink description: Files deleted unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_write_call + - name: calls + - name: app.ebpf_call_vfs_write description: Write to disk unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_write_error + - name: calls + - name: app.ebpf_call_vfs_write_error description: Fails to write unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_read_call + - name: calls + - name: app.ebpf_call_vfs_read description: Read from disk unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_read_error + - name: calls + - name: app.ebpf_call_vfs_read_error description: Fails to read unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_write_bytes + - name: calls + - name: app.ebpf_call_vfs_write_bytes description: Bytes written on disk unit: "bytes/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_read_bytes + - name: writes + - name: app.ebpf_call_vfs_read_bytes description: Bytes read on disk unit: "bytes/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_fsync - description: Calls for vfs_fsync + - name: reads + - name: app.ebpf_call_vfs_fsync + description: Calls to vfs_fsync. unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_fsync_error + - name: calls + - name: app.ebpf_call_vfs_fsync_error description: Sync error unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_open - description: Calls for vfs_open + - name: calls + - name: app.ebpf_call_vfs_open + description: Calls to vfs_open. unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_open_error + - name: calls + - name: app.ebpf_call_vfs_open_error description: Open error unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_create - description: Calls for vfs_create + - name: calls + - name: app.ebpf_call_vfs_create + description: Calls to vfs_create. unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group - - name: apps.vfs_create_error + - name: calls + - name: app.ebpf_call_vfs_create_error description: Create error unit: "calls/s" chart_type: stacked dimensions: - - name: a dimension per app group + - name: calls - meta: plugin_name: ebpf.plugin module_name: process diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c index 65b8a2d5a23314..ca6048a1687fae 100644 --- a/collectors/freebsd.plugin/freebsd_devstat.c +++ b/collectors/freebsd.plugin/freebsd_devstat.c @@ -89,21 +89,21 @@ static size_t disks_added = 0, disks_found = 0; static void disk_free(struct disk *dm) { if (likely(dm->st_io)) - rrdset_is_obsolete(dm->st_io); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_io); if (likely(dm->st_ops)) - rrdset_is_obsolete(dm->st_ops); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_ops); if (likely(dm->st_qops)) - rrdset_is_obsolete(dm->st_qops); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_qops); if (likely(dm->st_util)) - rrdset_is_obsolete(dm->st_util); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_util); if (likely(dm->st_iotime)) - rrdset_is_obsolete(dm->st_iotime); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_iotime); if (likely(dm->st_await)) - rrdset_is_obsolete(dm->st_await); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_await); if (likely(dm->st_avagsz)) - rrdset_is_obsolete(dm->st_avagsz); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_avagsz); if (likely(dm->st_svctm)) - rrdset_is_obsolete(dm->st_svctm); + rrdset_is_obsolete___safe_from_collector_thread(dm->st_svctm); disks_added--; freez(dm->name); diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c index 80a209105fbcd6..36be684226fb4a 100644 --- a/collectors/freebsd.plugin/freebsd_getifaddrs.c +++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c @@ -52,15 +52,15 @@ static size_t network_interfaces_added = 0, network_interfaces_found = 0; static void network_interface_free(struct cgroup_network_interface *ifm) { if (likely(ifm->st_bandwidth)) - rrdset_is_obsolete(ifm->st_bandwidth); + rrdset_is_obsolete___safe_from_collector_thread(ifm->st_bandwidth); if (likely(ifm->st_packets)) - rrdset_is_obsolete(ifm->st_packets); + rrdset_is_obsolete___safe_from_collector_thread(ifm->st_packets); if (likely(ifm->st_errors)) - rrdset_is_obsolete(ifm->st_errors); + rrdset_is_obsolete___safe_from_collector_thread(ifm->st_errors); if (likely(ifm->st_drops)) - rrdset_is_obsolete(ifm->st_drops); + rrdset_is_obsolete___safe_from_collector_thread(ifm->st_drops); if (likely(ifm->st_events)) - rrdset_is_obsolete(ifm->st_events); + rrdset_is_obsolete___safe_from_collector_thread(ifm->st_events); network_interfaces_added--; freez(ifm->name); diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c index cc0abd9060ac86..d55eb3d4a49ce9 100644 --- a/collectors/freebsd.plugin/freebsd_getmntinfo.c +++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c @@ -39,9 +39,9 @@ static size_t mount_points_added = 0, mount_points_found = 0; static void mount_point_free(struct mount_point *m) { if (likely(m->st_space)) - rrdset_is_obsolete(m->st_space); + rrdset_is_obsolete___safe_from_collector_thread(m->st_space); if (likely(m->st_inodes)) - rrdset_is_obsolete(m->st_inodes); + rrdset_is_obsolete___safe_from_collector_thread(m->st_inodes); mount_points_added--; freez(m->name); @@ -216,7 +216,7 @@ int do_getmntinfo(int update_every, usec_t dt) { (mntbuf[i].f_blocks > 2 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!m->st_space)) { - snprintfz(title, 4096, "Disk Space Usage for %s [%s]", + snprintfz(title, sizeof(title) - 1, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); m->st_space = rrdset_create_localhost("disk_space", mntbuf[i].f_mntonname, @@ -254,7 +254,7 @@ int do_getmntinfo(int update_every, usec_t dt) { (mntbuf[i].f_files > 1 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { if (unlikely(!m->st_inodes)) { - snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", + snprintfz(title, sizeof(title) - 1, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); m->st_inodes = rrdset_create_localhost("disk_inodes", mntbuf[i].f_mntonname, diff --git a/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md b/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md new file mode 100644 index 00000000000000..5f18661d0188c9 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md @@ -0,0 +1,111 @@ + + +# dev.cpu.0.freq + + + + + +Plugin: freebsd.plugin +Module: dev.cpu.0.freq + + + +## Overview + +Read current CPU Scaling frequency. + +Current CPU Scaling Frequency + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per dev.cpu.0.freq instance + +The metric shows status of CPU frequency, it is direct affected by system load. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.scaling_cur_freq | frequency | MHz | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `Config options`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config Config options +``` +#### Options + + + +
+ +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md b/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md new file mode 100644 index 00000000000000..a3736f771a5d93 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md @@ -0,0 +1,120 @@ + + +# dev.cpu.temperature + + + + + +Plugin: freebsd.plugin +Module: dev.cpu.temperature + + + +## Overview + +Get current CPU temperature + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per dev.cpu.temperature instance + +This metric show latest CPU temperature. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.temperature | a dimension per core | Celsius | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/devstat.md b/collectors/freebsd.plugin/integrations/devstat.md new file mode 100644 index 00000000000000..9d9c6400b0c5c3 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/devstat.md @@ -0,0 +1,155 @@ + + +# devstat + + + + + +Plugin: freebsd.plugin +Module: devstat + + + +## Overview + +Collect information per hard disk available on host. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per devstat instance + +These metrics give a general vision about I/O events on disks. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.io | io, out | KiB/s | + +### Per disk + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| disk.io | reads, writes, frees | KiB/s | +| disk.ops | reads, writes, other, frees | operations/s | +| disk.qops | operations | operations | +| disk.util | utilization | % of time working | +| disk.iotime | reads, writes, other, frees | milliseconds/s | +| disk.await | reads, writes, other, frees | milliseconds/operation | +| disk.avgsz | reads, writes, frees | KiB/operation | +| disk.svctm | svctm | milliseconds/operation | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no | +| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no | +| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no | +| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no | +| operations for all disks | Enable or disable operations for all disks metric. | auto | no | +| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no | +| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no | +| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no | +| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no | +| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no | +| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no | +| disable by default disks matching | Do not create charts for disks listed. | | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/getifaddrs.md b/collectors/freebsd.plugin/integrations/getifaddrs.md new file mode 100644 index 00000000000000..63c4ce136c45c4 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/getifaddrs.md @@ -0,0 +1,161 @@ + + +# getifaddrs + + + + + +Plugin: freebsd.plugin +Module: getifaddrs + + + +## Overview + +Collect traffic per network interface. + +The plugin calls `getifaddrs` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per getifaddrs instance + +General overview about network traffic. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.net | received, sent | kilobits/s | +| system.packets | received, sent, multicast_received, multicast_sent | packets/s | +| system.ipv4 | received, sent | kilobits/s | +| system.ipv6 | received, sent | kilobits/s | + +### Per network device + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| net.net | received, sent | kilobits/s | +| net.packets | received, sent, multicast_received, multicast_sent | packets/s | +| net.errors | inbound, outbound | errors/s | +| net.drops | inbound, outbound | drops/s | +| net.events | collisions | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ interface_speed ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | network interface ${label:device} current speed | +| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes | +| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes | +| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | +| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes | +| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no | +| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no | +| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no | +| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no | +| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no | +| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no | +| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no | +| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no | +| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no | +| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no | +| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no | +| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/getmntinfo.md b/collectors/freebsd.plugin/integrations/getmntinfo.md new file mode 100644 index 00000000000000..d26ad1c0334815 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/getmntinfo.md @@ -0,0 +1,131 @@ + + +# getmntinfo + + + + + +Plugin: freebsd.plugin +Module: getmntinfo + + + +## Overview + +Collect information per mount point. + +The plugin calls `getmntinfo` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per mount point + +These metrics show detailss about mount point usages. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| disk.space | avail, used, reserved_for_root | GiB | +| disk.inodes | avail, used, reserved_for_root | inodes | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization | +| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no | +| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no | +| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no | +| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no | +| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/hw.intrcnt.md b/collectors/freebsd.plugin/integrations/hw.intrcnt.md new file mode 100644 index 00000000000000..49164c369d1bdf --- /dev/null +++ b/collectors/freebsd.plugin/integrations/hw.intrcnt.md @@ -0,0 +1,121 @@ + + +# hw.intrcnt + + + + + +Plugin: freebsd.plugin +Module: hw.intrcnt + + + +## Overview + +Get total number of interrupts + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per hw.intrcnt instance + +These metrics show system interrupts frequency. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.intr | interrupts | interrupts/s | +| system.interrupts | a dimension per interrupt | interrupts/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config option + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| hw.intrcnt | Enable or disable Interrupts metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/ipfw.md b/collectors/freebsd.plugin/integrations/ipfw.md new file mode 100644 index 00000000000000..84e023bdfebd29 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/ipfw.md @@ -0,0 +1,126 @@ + + +# ipfw + + + + + +Plugin: freebsd.plugin +Module: ipfw + + + +## Overview + +Collect information about FreeBSD firewall. + +The plugin uses RAW socket to communicate with kernel and collect data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ipfw instance + +Theese metrics show FreeBSD firewall statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipfw.mem | dynamic, static | bytes | +| ipfw.packets | a dimension per static rule | packets/s | +| ipfw.bytes | a dimension per static rule | bytes/s | +| ipfw.active | a dimension per dynamic rule | rules | +| ipfw.expired | a dimension per dynamic rule | rules | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| counters for static rules | Enable or disable counters for static rules metric. | yes | no | +| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no | +| allocated memory | Enable or disable allocated memory metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/kern.cp_time.md b/collectors/freebsd.plugin/integrations/kern.cp_time.md new file mode 100644 index 00000000000000..95bdb8d90b497e --- /dev/null +++ b/collectors/freebsd.plugin/integrations/kern.cp_time.md @@ -0,0 +1,139 @@ + + +# kern.cp_time + + + + + +Plugin: freebsd.plugin +Module: kern.cp_time + + + +## Overview + +Total CPU utilization + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per kern.cp_time instance + +These metrics show CPU usage statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.cpu | nice, system, user, interrupt, idle | percentage | + +### Per core + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.cpu | nice, system, user, interrupt, idle | percentage | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) | +| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes | +| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes | +| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + +The netdata main configuration file. + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| kern.cp_time | Enable or disable Total CPU usage. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/kern.ipc.msq.md b/collectors/freebsd.plugin/integrations/kern.ipc.msq.md new file mode 100644 index 00000000000000..e7457e0c1e23be --- /dev/null +++ b/collectors/freebsd.plugin/integrations/kern.ipc.msq.md @@ -0,0 +1,122 @@ + + +# kern.ipc.msq + + + + + +Plugin: freebsd.plugin +Module: kern.ipc.msq + + + +## Overview + +Collect number of IPC message Queues + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per kern.ipc.msq instance + +These metrics show statistics IPC messages statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ipc_msq_queues | queues | queues | +| system.ipc_msq_messages | messages | messages | +| system.ipc_msq_size | allocated, used | bytes | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/kern.ipc.sem.md b/collectors/freebsd.plugin/integrations/kern.ipc.sem.md new file mode 100644 index 00000000000000..7bf7235e68ed90 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/kern.ipc.sem.md @@ -0,0 +1,127 @@ + + +# kern.ipc.sem + + + + + +Plugin: freebsd.plugin +Module: kern.ipc.sem + + + +## Overview + +Collect information about semaphore. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per kern.ipc.sem instance + +These metrics shows counters for semaphores on host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ipc_semaphores | semaphores | semaphores | +| system.ipc_semaphore_arrays | arrays | arrays | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization | +| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/kern.ipc.shm.md b/collectors/freebsd.plugin/integrations/kern.ipc.shm.md new file mode 100644 index 00000000000000..1f10c1e6e5dfa1 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/kern.ipc.shm.md @@ -0,0 +1,121 @@ + + +# kern.ipc.shm + + + + + +Plugin: freebsd.plugin +Module: kern.ipc.shm + + + +## Overview + +Collect shared memory information. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per kern.ipc.shm instance + +These metrics give status about current shared memory segments. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ipc_shared_mem_segs | segments | segments | +| system.ipc_shared_mem_size | allocated | KiB | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| kern.ipc.shm | Enable or disable shared memory metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md b/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md new file mode 100644 index 00000000000000..29562bc9a3c374 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md @@ -0,0 +1,124 @@ + + +# net.inet.icmp.stats + + + + + +Plugin: freebsd.plugin +Module: net.inet.icmp.stats + + + +## Overview + +Collect information about ICMP traffic. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.inet.icmp.stats instance + +These metrics show ICMP connections statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv4.icmp | received, sent | packets/s | +| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s | +| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no | +| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no | +| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md b/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md new file mode 100644 index 00000000000000..785767e8916d4c --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md @@ -0,0 +1,126 @@ + + +# net.inet.ip.stats + + + + + +Plugin: freebsd.plugin +Module: net.inet.ip.stats + + + +## Overview + +Collect IP stats + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.inet.ip.stats instance + +These metrics show IPv4 connections statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv4.packets | received, sent, forwarded, delivered | packets/s | +| ipv4.fragsout | ok, failed, created | packets/s | +| ipv4.fragsin | ok, failed, all | packets/s | +| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no | +| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no | +| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no | +| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md b/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md new file mode 100644 index 00000000000000..5b414458025921 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md @@ -0,0 +1,125 @@ + + +# net.inet.tcp.states + + + + + +Plugin: freebsd.plugin +Module: net.inet.tcp.states + + + +## Overview + + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.inet.tcp.states instance + +A counter for TCP connections. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv4.tcpsock | connections | active connections | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md b/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md new file mode 100644 index 00000000000000..be779740db477d --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md @@ -0,0 +1,142 @@ + + +# net.inet.tcp.stats + + + + + +Plugin: freebsd.plugin +Module: net.inet.tcp.stats + + + +## Overview + +Collect overall information about TCP connections. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.inet.tcp.stats instance + +These metrics show TCP connections statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv4.tcppackets | received, sent | packets/s | +| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s | +| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s | +| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s | +| ipv4.tcpofo | inqueue | packets/s | +| ipv4.tcpsyncookies | received, sent, failed | packets/s | +| ipv4.tcplistenissues | overflows | packets/s | +| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute | +| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. | +| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute | +| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no | +| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no | +| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no | +| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no | +| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no | +| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no | +| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no | +| ECN packets | Enable or disable ECN packets metric. | auto | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md b/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md new file mode 100644 index 00000000000000..d3da4045562098 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md @@ -0,0 +1,128 @@ + + +# net.inet.udp.stats + + + + + +Plugin: freebsd.plugin +Module: net.inet.udp.stats + + + +## Overview + +Collect information about UDP connections. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.inet.udp.stats instance + +These metrics show UDP connections statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv4.udppackets | received, sent | packets/s | +| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute | +| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no | +| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md b/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md new file mode 100644 index 00000000000000..7344b79b3b3ee1 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md @@ -0,0 +1,132 @@ + + +# net.inet6.icmp6.stats + + + + + +Plugin: freebsd.plugin +Module: net.inet6.icmp6.stats + + + +## Overview + +Collect information abou IPv6 ICMP + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.inet6.icmp6.stats instance + +Collect IPv6 ICMP traffic statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv6.icmp | received, sent | messages/s | +| ipv6.icmpredir | received, sent | redirects/s | +| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s | +| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s | +| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s | +| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s | +| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| icmp | Enable or disable ICMP metric. | auto | no | +| icmp redirects | Enable or disable ICMP redirects metric. | auto | no | +| icmp errors | Enable or disable ICMP errors metric. | auto | no | +| icmp echos | Enable or disable ICMP echos metric. | auto | no | +| icmp router | Enable or disable ICMP router metric. | auto | no | +| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no | +| icmp types | Enable or disable ICMP types metric. | auto | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md b/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md new file mode 100644 index 00000000000000..d9128b529729a5 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md @@ -0,0 +1,126 @@ + + +# net.inet6.ip6.stats + + + + + +Plugin: freebsd.plugin +Module: net.inet6.ip6.stats + + + +## Overview + +Collect information abou IPv6 stats. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.inet6.ip6.stats instance + +These metrics show general information about IPv6 connections. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv6.packets | received, sent, forwarded, delivers | packets/s | +| ipv6.fragsout | ok, failed, all | packets/s | +| ipv6.fragsin | ok, failed, timeout, all | packets/s | +| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no | +| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no | +| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no | +| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/net.isr.md b/collectors/freebsd.plugin/integrations/net.isr.md new file mode 100644 index 00000000000000..2d75b825a2fb52 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/net.isr.md @@ -0,0 +1,140 @@ + + +# net.isr + + + + + +Plugin: freebsd.plugin +Module: net.isr + + + +## Overview + +Collect information about system softnet stat. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per net.isr instance + +These metrics show statistics about softnet stats. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s | + +### Per core + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog | +| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) | +| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| netisr | Enable or disable general vision about softnet stat metrics. | yes | no | +| netisr per core | Enable or disable softnet stat metric per core. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/system.ram.md b/collectors/freebsd.plugin/integrations/system.ram.md new file mode 100644 index 00000000000000..7d49749224bcf0 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/system.ram.md @@ -0,0 +1,129 @@ + + +# system.ram + + + + + +Plugin: freebsd.plugin +Module: system.ram + + + +## Overview + +Show information about system memory usage. + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per system.ram instance + +This metric shows RAM usage statistics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB | +| mem.available | avail | MiB | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | system.ram | system memory utilization | +| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | system.ram | system memory utilization | +| [ ram_available ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping | +| [ ram_available ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| system.ram | Enable or disable system RAM metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/uptime.md b/collectors/freebsd.plugin/integrations/uptime.md new file mode 100644 index 00000000000000..e3f1db3f1d9dbf --- /dev/null +++ b/collectors/freebsd.plugin/integrations/uptime.md @@ -0,0 +1,120 @@ + + +# uptime + + + + + +Plugin: freebsd.plugin +Module: uptime + + + +## Overview + +Show period of time server is up. + +The plugin calls `clock_gettime` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per uptime instance + +How long the system is running. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.uptime | uptime | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.loadavg | Enable or disable load average metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.loadavg.md b/collectors/freebsd.plugin/integrations/vm.loadavg.md new file mode 100644 index 00000000000000..88c47b7a4b893c --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.loadavg.md @@ -0,0 +1,128 @@ + + +# vm.loadavg + + + + + +Plugin: freebsd.plugin +Module: vm.loadavg + + + +## Overview + +System Load Average + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.loadavg instance + +Monitoring for number of threads running or waiting. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.load | load1, load5, load15 | load | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | number of active CPU cores in the system | +| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system fifteen-minute load average | +| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system five-minute load average | +| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system one-minute load average | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.loadavg | Enable or disable load average metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md new file mode 100644 index 00000000000000..c3e7466e91b2c3 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md @@ -0,0 +1,120 @@ + + +# vm.stats.sys.v_intr + + + + + +Plugin: freebsd.plugin +Module: vm.stats.sys.v_intr + + + +## Overview + +Device interrupts + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.stats.sys.v_intr instance + +The metric show device interrupt frequency. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.dev_intr | interrupts | interrupts/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config option + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md new file mode 100644 index 00000000000000..ce914bb50c2b7b --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md @@ -0,0 +1,120 @@ + + +# vm.stats.sys.v_soft + + + + + +Plugin: freebsd.plugin +Module: vm.stats.sys.v_soft + + + +## Overview + +Software Interrupt + +vm.stats.sys.v_soft + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.stats.sys.v_soft instance + +This metric shows software interrupt frequency. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.soft_intr | interrupts | interrupts/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config option + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md new file mode 100644 index 00000000000000..cbcee311ff8540 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md @@ -0,0 +1,121 @@ + + +# vm.stats.sys.v_swtch + + + + + +Plugin: freebsd.plugin +Module: vm.stats.sys.v_swtch + + + +## Overview + +CPU context switch + +The plugin calls `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.stats.sys.v_swtch instance + +The metric count the number of context switches happening on host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ctxt | switches | context switches/s | +| system.forks | started | processes/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md b/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md new file mode 100644 index 00000000000000..19230dd56cc8d0 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md @@ -0,0 +1,120 @@ + + +# vm.stats.vm.v_pgfaults + + + + + +Plugin: freebsd.plugin +Module: vm.stats.vm.v_pgfaults + + + +## Overview + +Collect memory page faults events. + +The plugin calls `sysctl` function to collect necessary data + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.stats.vm.v_pgfaults instance + +The number of page faults happened on host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md b/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md new file mode 100644 index 00000000000000..c6caaa68205add --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md @@ -0,0 +1,125 @@ + + +# vm.stats.vm.v_swappgs + + + + + +Plugin: freebsd.plugin +Module: vm.stats.vm.v_swappgs + + + +## Overview + +The metric swap amount of data read from and written to SWAP. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.stats.vm.v_swappgs instance + +This metric shows events happening on SWAP. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.swapio | io, out | KiB/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.swap_info.md b/collectors/freebsd.plugin/integrations/vm.swap_info.md new file mode 100644 index 00000000000000..caa22b3dcd3a91 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.swap_info.md @@ -0,0 +1,125 @@ + + +# vm.swap_info + + + + + +Plugin: freebsd.plugin +Module: vm.swap_info + + + +## Overview + +Collect information about SWAP memory. + +The plugin calls `sysctlnametomib` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.swap_info instance + +This metric shows the SWAP usage. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.swap | free, used | MiB | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ used_swap ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swap | swap memory utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| vm.swap_info | Enable or disable SWAP metrics. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/vm.vmtotal.md b/collectors/freebsd.plugin/integrations/vm.vmtotal.md new file mode 100644 index 00000000000000..f3f631af69ba6a --- /dev/null +++ b/collectors/freebsd.plugin/integrations/vm.vmtotal.md @@ -0,0 +1,129 @@ + + +# vm.vmtotal + + + + + +Plugin: freebsd.plugin +Module: vm.vmtotal + + + +## Overview + +Collect Virtual Memory information from host. + +The plugin calls function `sysctl` to collect data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vm.vmtotal instance + +These metrics show an overall vision about processes running. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.active_processes | active | processes | +| system.processes | running, blocked | processes | +| mem.real | used | MiB | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ active_processes ](https://github.com/netdata/netdata/blob/master/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config Options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| enable total processes | Number of active processes. | yes | no | +| processes running | Show number of processes running or blocked. | yes | no | +| real memory | Memeory used on host. | yes | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/integrations/zfs.md b/collectors/freebsd.plugin/integrations/zfs.md new file mode 100644 index 00000000000000..99f10026d302f2 --- /dev/null +++ b/collectors/freebsd.plugin/integrations/zfs.md @@ -0,0 +1,152 @@ + + +# zfs + + + + + +Plugin: freebsd.plugin +Module: zfs + + + +## Overview + +Collect metrics for ZFS filesystem + +The plugin uses `sysctl` function to collect necessary data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per zfs instance + +These metrics show detailed information about ZFS filesystem. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| zfs.arc_size | arcsz, target, min, max | MiB | +| zfs.l2_size | actual, size | MiB | +| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s | +| zfs.bytes | read, write | KiB/s | +| zfs.hits | hits, misses | percentage | +| zfs.hits_rate | hits, misses | events/s | +| zfs.dhits | hits, misses | percentage | +| zfs.dhits_rate | hits, misses | events/s | +| zfs.phits | hits, misses | percentage | +| zfs.phits_rate | hits, misses | events/s | +| zfs.mhits | hits, misses | percentage | +| zfs.mhits_rate | hits, misses | events/s | +| zfs.l2hits | hits, misses | percentage | +| zfs.l2hits_rate | hits, misses | events/s | +| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s | +| zfs.arc_size_breakdown | recent, frequent | percentage | +| zfs.memory_ops | throttled | operations/s | +| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s | +| zfs.actual_hits | hits, misses | percentage | +| zfs.actual_hits_rate | hits, misses | events/s | +| zfs.demand_data_hits | hits, misses | percentage | +| zfs.demand_data_hits_rate | hits, misses | events/s | +| zfs.prefetch_data_hits | hits, misses | percentage | +| zfs.prefetch_data_hits_rate | hits, misses | events/s | +| zfs.hash_elements | current, max | elements | +| zfs.hash_chains | current, max | chains | +| zfs.trim_bytes | TRIMmed | bytes | +| zfs.trim_requests | successful, failed, unsupported | requests | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| show zero charts | Do not show charts with zero metrics. | no | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/freebsd.plugin/metadata.yaml b/collectors/freebsd.plugin/metadata.yaml index fca8982f7bbfef..36fba2430b3396 100644 --- a/collectors/freebsd.plugin/metadata.yaml +++ b/collectors/freebsd.plugin/metadata.yaml @@ -323,7 +323,7 @@ modules: link: "https://www.freebsd.org/" categories: - data-collection.freebsd - icon_filename: "freebsd.org" + icon_filename: "freebsd.svg" related_resources: integrations: list: [] @@ -2893,36 +2893,16 @@ modules: metric: net.net info: network interface ${label:device} current speed os: "*" - - name: 1m_received_traffic_overflow - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.net - info: average inbound utilization for the network interface ${label:device} over the last minute - os: "linux" - - name: 1m_sent_traffic_overflow - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.net - info: average outbound utilization for the network interface ${label:device} over the last minute - os: "linux" - name: inbound_packets_dropped_ratio link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets + metric: net.drops info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes - os: "linux" + os: "*" - name: outbound_packets_dropped_ratio link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets - info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes - os: "linux" - - name: wifi_inbound_packets_dropped_ratio - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets - info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes - os: "linux" - - name: wifi_outbound_packets_dropped_ratio - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets + metric: net.drops info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes - os: "linux" + os: "*" - name: 1m_received_packets_rate link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf metric: net.packets @@ -2931,9 +2911,7 @@ modules: - name: 10s_received_packets_storm link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf metric: net.packets - info: - ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over - the last minute + info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute os: "linux freebsd" - name: interface_inbound_errors link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf @@ -2945,16 +2923,6 @@ modules: metric: net.errors info: number of outbound errors for the network interface ${label:device} in the last 10 minutes os: "freebsd" - - name: inbound_packets_dropped - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.drops - info: number of inbound dropped packets for the network interface ${label:device} in the last 10 minutes - os: "linux" - - name: outbound_packets_dropped - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.drops - info: number of outbound dropped packets for the network interface ${label:device} in the last 10 minutes - os: "linux" metrics: folding: title: Metrics diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md deleted file mode 100644 index 5a9fd93c030d01..00000000000000 --- a/collectors/freeipmi.plugin/README.md +++ /dev/null @@ -1,287 +0,0 @@ - - -# freeipmi.plugin - -Netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin. - -> FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI -> specification defines a set of interfaces for platform management and is implemented by a number vendors for system -> management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, -> power control, and serial-over-LAN (SOL). - -## Installing the FreeIPMI plugin - -When using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named -`netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not -installed automatically due to the large number of dependencies it requires. - -When using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though -you will still need to have FreeIPMI installed on your system to be able to use the plugin. - -When using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically -called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata. - -### Special Considerations - -Accessing IPMI requires root access, so the FreeIPMI plugin is automatically installed setuid root. - -FreeIPMI does not work correctly on IBM POWER systems, thus Netdata’s FreeIPMI plugin is not usable on such systems. - -If you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root -to initiailze IPMI settings so that the Netdata plugin works correctly. It should return information about available -seensors on the system. - -In some distributions `libipmimonitoring.pc` is located in a non-standard directory, which -can cause building the plugin to fail when building Netdata from source. In that case you -should find the file and link it to the standard pkg-config directory. Usually, running `sudo ln -s -/usr/lib/$(uname -m)-linux-gnu/pkgconfig/libipmimonitoring.pc/libipmimonitoring.pc /usr/lib/pkgconfig/libipmimonitoring.pc` -resolves this issue. - -## Metrics - -The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending -on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard. - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - -### global - -These metrics refer to the monitored host. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|----------|:----------:|:------:| -| ipmi.sel | events | events | - -### sensor - -These metrics refer to the sensor. - -Labels: - -| Label | Description | -|-----------|-----------------------------------------------------------------------------------------------------------------| -| sensor | Sensor name. Same value as the "Name" column in the `ipmi-sensors` output. | -| type | Sensor type. Same value as the "Type" column in the `ipmi-sensors` output. | -| component | General sensor component. Identified by Netdata based on sensor name and type (e.g. System, Processor, Memory). | - -Metrics: - -| Metric | Dimensions | Unit | -|-----------------------------|:-----------------------------------:|:----------:| -| ipmi.sensor_state | nominal, critical, warning, unknown | state | -| ipmi.sensor_temperature_c | temperature | Celsius | -| ipmi.sensor_temperature_f | temperature | Fahrenheit | -| ipmi.sensor_voltage | voltage | Volts | -| ipmi.sensor_ampere | ampere | Amps | -| ipmi.sensor_fan_speed | rotations | RPM | -| ipmi.sensor_power | power | Watts | -| ipmi.sensor_reading_percent | percentage | % | - -## Alarms - -There are 2 alarms: - -- The sensor is in a warning or critical state. -- System Event Log (SEL) is non-empty. - -## Configuration - -The plugin supports a few options. To see them, run: - -```text -# ./freeipmi.plugin --help - - netdata freeipmi.plugin v1.40.0-137-gf162c25bd - Copyright (C) 2023 Netdata Inc. - Released under GNU General Public License v3 or later. - All rights reserved. - - This program is a data collector plugin for netdata. - - Available command line options: - - SECONDS data collection frequency - minimum: 5 - - debug enable verbose output - default: disabled - - sel - no-sel enable/disable SEL collection - default: enabled - - reread-sdr-cache re-read SDR cache on every iteration - default: disabled - - interpret-oem-data attempt to parse OEM data - default: disabled - - assume-system-event-record - tread illegal SEL events records as normal - default: disabled - - ignore-non-interpretable-sensors - do not read sensors that cannot be interpreted - default: disabled - - bridge-sensors bridge sensors not owned by the BMC - default: disabled - - shared-sensors enable shared sensors, if found - default: disabled - - no-discrete-reading do not read sensors that their event/reading type code is invalid - default: enabled - - ignore-scanning-disabled - Ignore the scanning bit and read sensors no matter what - default: disabled - - assume-bmc-owner assume the BMC is the sensor owner no matter what - (usually bridging is required too) - default: disabled - - hostname HOST - username USER - password PASS connect to remote IPMI host - default: local IPMI processor - - no-auth-code-check - noauthcodecheck don't check the authentication codes returned - - driver-type IPMIDRIVER - Specify the driver type to use instead of doing an auto selection. - The currently available outofband drivers are LAN and LAN_2_0, - which perform IPMI 1.5 and IPMI 2.0 respectively. - The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. - - sdr-cache-dir PATH directory for SDR cache files - default: /tmp - - sensor-config-file FILE filename to read sensor configuration - default: system default - - sel-config-file FILE filename to read sel configuration - default: system default - - ignore N1,N2,N3,... sensor IDs to ignore - default: none - - ignore-status N1,N2,N3,... sensor IDs to ignore status (nominal/warning/critical) - default: none - - -v - -V - version print version and exit - - Linux kernel module for IPMI is CPU hungry. - On Linux run this to lower kipmiN CPU utilization: - # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us - - or create: /etc/modprobe.d/ipmi.conf with these contents: - options ipmi_si kipmid_max_busy_us=10 - - For more information: - https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin -``` - -You can set these options in `/etc/netdata/netdata.conf` at this section: - -``` -[plugin:freeipmi] - update every = 5 - command options = -``` - -Append to `command options =` the settings you need. The minimum `update every` is 5 (enforced internally by the -plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable. - -## Ignoring specific sensors - -Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to -be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library -used by Netdata's `freeipmi.plugin`). - -So, `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To -configure it, edit `/etc/netdata/netdata.conf` and set: - -``` -[plugin:freeipmi] - command options = ignore 1,2,3,4,... -``` - -To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID: - -``` -ID | Name | Type | State | Reading | Units | Event -1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK' -2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK' -3 | Avg Power | Current | Nominal | 100.00 | W | 'OK' -4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK' -5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK' -6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK' -7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK' -8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK' -9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK' -10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK' -11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK' -12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK' -13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK' -14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' -15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' -... -``` - -## Debugging - -You can run the plugin by hand: - -```sh -# become user netdata -sudo su -s /bin/sh netdata - -# run the plugin in debug mode -/usr/libexec/netdata/plugins.d/freeipmi.plugin 5 debug -``` - -You will get verbose output on what the plugin does. - -## kipmi0 CPU usage - -There have been reports that kipmi is showing increased CPU when the IPMI is queried. To lower the CPU consumption of -the system you can issue this command: - -```sh -echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us -``` - -You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content: - -```sh -# prevent kipmi from consuming 100% CPU -options ipmi_si kipmid_max_busy_us=10 -``` - -This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower -now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. You can also use a higher -number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick). - -If you need to disable IPMI for Netdata, edit `/etc/netdata/netdata.conf` and set: - -``` -[plugins] - freeipmi = no -``` diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md new file mode 120000 index 00000000000000..f55ebf73d5cd6a --- /dev/null +++ b/collectors/freeipmi.plugin/README.md @@ -0,0 +1 @@ +integrations/intelligent_platform_management_interface_ipmi.md \ No newline at end of file diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c index 56a1c499892eb2..6ec9b698bf924a 100644 --- a/collectors/freeipmi.plugin/freeipmi_plugin.c +++ b/collectors/freeipmi.plugin/freeipmi_plugin.c @@ -22,6 +22,10 @@ #include "libnetdata/libnetdata.h" #include "libnetdata/required_dummies.h" +#define FREEIPMI_GLOBAL_FUNCTION_SENSORS() do { \ + fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"ipmi-sensors\" %d \"%s\"\n", 5, "Displays current sensor state and readings"); \ + } while(0) + // component names, based on our patterns #define NETDATA_SENSOR_COMPONENT_MEMORY_MODULE "Memory Module" #define NETDATA_SENSOR_COMPONENT_MEMORY "Memory" @@ -83,6 +87,12 @@ static void netdata_update_ipmi_sel_events_count(struct netdata_ipmi_state *stat /* Communication Configuration - Initialize accordingly */ +static netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER; +static bool function_plugin_should_exit = false; + +int update_every = IPMI_SENSORS_MIN_UPDATE_EVERY; // this is the minimum update frequency +int update_every_sel = IPMI_SEL_MIN_UPDATE_EVERY; // this is the minimum update frequency for SEL events + /* Hostname, NULL for In-band communication, non-null for a hostname */ char *hostname = NULL; @@ -707,6 +717,8 @@ struct netdata_ipmi_state { } updates; }; +struct netdata_ipmi_state state = {0}; + // ---------------------------------------------------------------------------- // excluded record ids maintenance (both for sensor data and state) @@ -1146,7 +1158,7 @@ int netdata_ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_conf successful++; if(unlikely(state->debug)) - fprintf(stderr, "%s: %s data collection speed was %llu usec\n", + fprintf(stderr, "%s: %s data collection speed was %"PRIu64" usec\n", program_name, netdata_collect_type_to_string(type), end - start); // add it to our total @@ -1297,6 +1309,7 @@ static size_t send_ipmi_sensor_metrics_to_netdata(struct netdata_ipmi_state *sta int update_every = (int)(state->sensors.freq_ut / USEC_PER_SEC); struct sensor *sn; + netdata_mutex_lock(&stdout_mutex); // generate the CHART/DIMENSION lines, if we have to dfe_start_reentrant(state->sensors.dict, sn) { if(unlikely(!sn->do_metric && !sn->do_state)) @@ -1307,7 +1320,7 @@ static size_t send_ipmi_sensor_metrics_to_netdata(struct netdata_ipmi_state *sta if(likely(sn->do_metric)) { if(unlikely(!is_sensor_updated(sn->last_collected_metric_ut, state->updates.now_ut, state->sensors.freq_ut))) { if(unlikely(state->debug)) - fprintf(stderr, "%s: %s() sensor '%s' metric is not UPDATED (last updated %llu, now %llu, freq %llu\n", + fprintf(stderr, "%s: %s() sensor '%s' metric is not UPDATED (last updated %"PRIu64", now %"PRIu64", freq %"PRIu64"\n", program_name, __FUNCTION__, sn->sensor_name, sn->last_collected_metric_ut, state->updates.now_ut, state->sensors.freq_ut); } else { @@ -1360,7 +1373,7 @@ static size_t send_ipmi_sensor_metrics_to_netdata(struct netdata_ipmi_state *sta if(likely(sn->do_state)) { if(unlikely(!is_sensor_updated(sn->last_collected_state_ut, state->updates.now_ut, state->sensors.freq_ut))) { if (unlikely(state->debug)) - fprintf(stderr, "%s: %s() sensor '%s' state is not UPDATED (last updated %llu, now %llu, freq %llu\n", + fprintf(stderr, "%s: %s() sensor '%s' state is not UPDATED (last updated %"PRIu64", now %"PRIu64", freq %"PRIu64"\n", program_name, __FUNCTION__, sn->sensor_name, sn->last_collected_state_ut, state->updates.now_ut, state->sensors.freq_ut); } else { @@ -1396,12 +1409,16 @@ static size_t send_ipmi_sensor_metrics_to_netdata(struct netdata_ipmi_state *sta } dfe_done(sn); + netdata_mutex_unlock(&stdout_mutex); + return total_sensors_sent; } static size_t send_ipmi_sel_metrics_to_netdata(struct netdata_ipmi_state *state) { static bool sel_chart_generated = false; + netdata_mutex_lock(&stdout_mutex); + if(likely(state->sel.status == ICS_RUNNING)) { if(unlikely(!sel_chart_generated)) { sel_chart_generated = true; @@ -1422,37 +1439,197 @@ static size_t send_ipmi_sel_metrics_to_netdata(struct netdata_ipmi_state *state) ); } + netdata_mutex_unlock(&stdout_mutex); + return state->sel.events; } // ---------------------------------------------------------------------------- -// main, command line arguments parsing -int main (int argc, char **argv) { - bool netdata_do_sel = IPMI_ENABLE_SEL_BY_DEFAULT; +static const char *get_sensor_state_string(struct sensor *sn) { + switch (sn->sensor_state) { + case IPMI_MONITORING_STATE_NOMINAL: + return "nominal"; + case IPMI_MONITORING_STATE_WARNING: + return "warning"; + case IPMI_MONITORING_STATE_CRITICAL: + return "critical"; + default: + return "unknown"; + } +} - stderror = stderr; - clocks_init(); +static const char *get_sensor_function_priority(struct sensor *sn) { + switch (sn->sensor_state) { + case IPMI_MONITORING_STATE_WARNING: + return "warning"; + case IPMI_MONITORING_STATE_CRITICAL: + return "critical"; + default: + return "normal"; + } +} - int update_every = IPMI_SENSORS_MIN_UPDATE_EVERY; // this is the minimum update frequency - int update_every_sel = IPMI_SEL_MIN_UPDATE_EVERY; // this is the minimum update frequency for SEL events - bool debug = false; +static void freeimi_function_sensors(const char *transaction, char *function __maybe_unused, int timeout __maybe_unused, bool *cancelled __maybe_unused) { + time_t expires = now_realtime_sec() + update_every; - // ------------------------------------------------------------------------ - // initialization of netdata plugin + BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", update_every); + buffer_json_member_add_string(wb, "help", "View IPMI sensor readings and its state"); + buffer_json_member_add_array(wb, "data"); + + struct sensor *sn; + dfe_start_reentrant(state.sensors.dict, sn) { + if (unlikely(!sn->do_metric && !sn->do_state)) + continue; + + double reading = NAN; + switch (sn->sensor_reading_type) { + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32: + reading = (double)sn->sensor_reading.uint32_value; + break; + case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE: + reading = (double)(sn->sensor_reading.double_value); + break; + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: + reading = (double)sn->sensor_reading.bool_value; + break; + } + + buffer_json_add_array_item_array(wb); + + buffer_json_add_array_item_string(wb, sn->sensor_name); + buffer_json_add_array_item_string(wb, sn->type); + buffer_json_add_array_item_string(wb, sn->component); + buffer_json_add_array_item_double(wb, reading); + buffer_json_add_array_item_string(wb, sn->units); + buffer_json_add_array_item_string(wb, get_sensor_state_string(sn)); + + buffer_json_add_array_item_object(wb); + buffer_json_member_add_string(wb, "severity", get_sensor_function_priority(sn)); + buffer_json_object_close(wb); + + buffer_json_array_close(wb); + } + dfe_done(sn); - program_name = "freeipmi.plugin"; + buffer_json_array_close(wb); // data + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + buffer_rrdf_table_add_field(wb, field_id++, "Sensor", "Sensor Name", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Type", "Sensor Type", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Component", "Sensor Component", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Reading", "Sensor Current Reading", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 2, NULL, 0, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Units", "Sensor Reading Units", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "State", "Sensor State", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field( + wb, field_id++, + "rowOptions", "rowOptions", + RRDF_FIELD_TYPE_NONE, + RRDR_FIELD_VISUAL_ROW_OPTIONS, + RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_FIXED, + NULL, + RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_DUMMY, + NULL); + } + + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "Type"); - // disable syslog - error_log_syslog = 0; + buffer_json_member_add_object(wb, "charts"); + { + buffer_json_member_add_object(wb, "Sensors"); + { + buffer_json_member_add_string(wb, "name", "Sensors"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Sensor"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Sensors"); + buffer_json_add_array_item_string(wb, "Component"); + buffer_json_array_close(wb); + + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Sensors"); + buffer_json_add_array_item_string(wb, "State"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires, wb); + + buffer_free(wb); +} + +// ---------------------------------------------------------------------------- +// main, command line arguments parsing - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; +static void plugin_exit(int code) { + fflush(stdout); + function_plugin_should_exit = true; + exit(code); +} - // initialize the threads +int main (int argc, char **argv) { + clocks_init(); + nd_log_initialize_for_external_plugins("freeipmi.plugin"); netdata_threads_init_for_external_plugins(0); // set the default threads stack size here + bool netdata_do_sel = IPMI_ENABLE_SEL_BY_DEFAULT; + + bool debug = false; + // ------------------------------------------------------------------------ // parse command line parameters @@ -1726,7 +1903,7 @@ int main (int argc, char **argv) { errno = 0; if(freq_s && freq_s < update_every) - collector_error("%s(): update frequency %d seconds is too small for IPMI. Using %d.", + collector_info("%s(): update frequency %d seconds is too small for IPMI. Using %d.", __FUNCTION__, freq_s, update_every); update_every = freq_s = MAX(freq_s, update_every); @@ -1799,16 +1976,17 @@ int main (int argc, char **argv) { heartbeat_t hb; heartbeat_init(&hb); + for(iteration = 0; 1 ; iteration++) { usec_t dt = heartbeat_next(&hb, step); if (!tty) { + netdata_mutex_lock(&stdout_mutex); fprintf(stdout, "\n"); // keepalive to avoid parser read timeout (2 minutes) during ipmi_detect_speed_secs() fflush(stdout); + netdata_mutex_unlock(&stdout_mutex); } - struct netdata_ipmi_state state = {0 }; - spinlock_lock(&sensors_data.spinlock); state.sensors = sensors_data.state.sensors; spinlock_unlock(&sensors_data.spinlock); @@ -1825,8 +2003,7 @@ int main (int argc, char **argv) { __FUNCTION__, (size_t)((now_monotonic_usec() - state.sensors.last_iteration_ut) / USEC_PER_SEC)); fprintf(stdout, "EXIT\n"); - fflush(stdout); - exit(0); + plugin_exit(0); } break; @@ -1836,14 +2013,12 @@ int main (int argc, char **argv) { case ICS_INIT_FAILED: collector_error("%s(): sensors failed to initialize. Calling DISABLE.", __FUNCTION__); fprintf(stdout, "DISABLE\n"); - fflush(stdout); - exit(0); + plugin_exit(0); case ICS_FAILED: collector_error("%s(): sensors fails repeatedly to collect metrics. Exiting to restart.", __FUNCTION__); fprintf(stdout, "EXIT\n"); - fflush(stdout); - exit(0); + plugin_exit(0); } if(netdata_do_sel) { @@ -1863,6 +2038,16 @@ int main (int argc, char **argv) { if(unlikely(debug)) fprintf(stderr, "%s: calling send_ipmi_sensor_metrics_to_netdata()\n", program_name); + static bool add_func_sensors = true; + if (add_func_sensors) { + add_func_sensors = false; + struct functions_evloop_globals *wg = + functions_evloop_init(1, "FREEIPMI", &stdout_mutex, &function_plugin_should_exit); + functions_evloop_add_function( + wg, "ipmi-sensors", freeimi_function_sensors, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT); + FREEIPMI_GLOBAL_FUNCTION_SENSORS(); + } + state.updates.now_ut = now_monotonic_usec(); send_ipmi_sensor_metrics_to_netdata(&state); @@ -1870,7 +2055,7 @@ int main (int argc, char **argv) { send_ipmi_sel_metrics_to_netdata(&state); if(unlikely(debug)) - fprintf(stderr, "%s: iteration %zu, dt %llu usec, sensors ever collected %zu, sensors last collected %zu \n" + fprintf(stderr, "%s: iteration %zu, dt %"PRIu64" usec, sensors ever collected %zu, sensors last collected %zu \n" , program_name , iteration , dt @@ -1878,6 +2063,8 @@ int main (int argc, char **argv) { , state.sensors.collected ); + netdata_mutex_lock(&stdout_mutex); + if (!global_chart_created) { global_chart_created = true; @@ -1897,10 +2084,11 @@ int main (int argc, char **argv) { if (now_monotonic_sec() - started_t > IPMI_RESTART_EVERY_SECONDS) { collector_info("%s(): reached my lifetime expectancy. Exiting to restart.", __FUNCTION__); fprintf(stdout, "EXIT\n"); - fflush(stdout); - exit(0); + plugin_exit(0); } fflush(stdout); + + netdata_mutex_unlock(&stdout_mutex); } } diff --git a/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md b/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md new file mode 100644 index 00000000000000..c0293fc37d3239 --- /dev/null +++ b/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md @@ -0,0 +1,275 @@ + + +# Intelligent Platform Management Interface (IPMI) + + + + + +Plugin: freeipmi.plugin +Module: freeipmi + + + +## Overview + +"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations." + + +The plugin uses open source library IPMImonitoring to communicate with sensors. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid. + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Linux kernel module for IPMI can create big overhead. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard. + + +### Per Intelligent Platform Management Interface (IPMI) instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipmi.sel | events | events | + +### Per sensor + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| sensor | The sensor name | +| type | One of 45 recognized sensor types (Battery, Voltage...) | +| component | One of 25 recognized components (Processor, Peripheral). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipmi.sensor_state | nominal, critical, warning, unknown | state | +| ipmi.sensor_temperature_c | temperature | Celsius | +| ipmi.sensor_temperature_f | temperature | Fahrenheit | +| ipmi.sensor_voltage | voltage | Volts | +| ipmi.sensor_ampere | ampere | Amps | +| ipmi.sensor_fan_speed | rotations | RPM | +| ipmi.sensor_power | power | Watts | +| ipmi.sensor_reading_percent | percentage | % | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state | + + +## Setup + +### Prerequisites + +#### Install freeipmi.plugin + +When using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires. + +When using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin. + +When using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata. + + +#### Preliminary actions + +If you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root +to initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system. + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:freeipmi]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + +The configuration is set using command line options: + +``` +# netdata.conf +[plugin:freeipmi] + command options = opt1 opt2 ... optN +``` + +To display a help message listing the available command line options: + +```bash +./usr/libexec/netdata/plugins.d/freeipmi.plugin --help +``` + + +
Command options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| SECONDS | Data collection frequency. | | no | +| debug | Enable verbose output. | disabled | no | +| no-sel | Disable System Event Log (SEL) collection. | disabled | no | +| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no | +| interpret-oem-data | Attempt to parse OEM data. | disabled | no | +| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no | +| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no | +| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no | +| shared-sensors | Enable shared sensors if found. | disabled | no | +| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no | +| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no | +| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no | +| hostname HOST | Remote IPMI hostname or IP address. | local | no | +| username USER | Username that will be used when connecting to the remote host. | | no | +| password PASS | Password that will be used when connecting to the remote host. | | no | +| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no | +| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no | +| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no | +| sensor-config-file FILE | Sensors configuration filename. | system default | no | +| sel-config-file FILE | SEL configuration filename. | system default | no | +| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no | +| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no | +| -v | Print version and exit. | | no | +| --help | Print usage message and exit. | | no | + +
+ +#### Examples + +##### Decrease data collection frequency + +Basic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable. + +```yaml +[plugin:freeipmi] + update every = 10 + +``` +##### Disable SEL collection + +Append to `command options =` the options you need. + +
Config + +```yaml +[plugin:freeipmi] + command options = no-sel + +``` +
+ +##### Ignore specific sensors + +Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. + +**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`). + +To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID: + +ID | Name | Type | State | Reading | Units | Event +1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK' +2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK' +3 | Avg Power | Current | Nominal | 100.00 | W | 'OK' +4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK' +5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK' +6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK' +7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK' +8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK' +9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK' +10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK' +11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK' +12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK' +13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK' +14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' +15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' +... + +`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`: + + +
Config + +```yaml +[plugin:freeipmi] + command options = ignore 1,2,3,4,... + +``` +
+ + + +## Troubleshooting + +### Debug Mode + + + +### kimpi0 CPU usage + + + + diff --git a/collectors/freeipmi.plugin/metadata.yaml b/collectors/freeipmi.plugin/metadata.yaml index 9540410bf46c44..f8c75c2cb144f3 100644 --- a/collectors/freeipmi.plugin/metadata.yaml +++ b/collectors/freeipmi.plugin/metadata.yaml @@ -2,7 +2,7 @@ plugin_name: freeipmi.plugin modules: - meta: plugin_name: freeipmi.plugin - module_name: sensors + module_name: freeipmi monitored_instance: name: Intelligent Platform Management Interface (IPMI) link: "https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface" @@ -42,34 +42,225 @@ modules: setup: prerequisites: list: - - title: Preliminary actions + - title: Install freeipmi.plugin description: | - If you have not previously used IPMI on your system, you will probably need to run the ipmimonitoring command as root to initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system. + When using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires. + + When using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin. - In some distributions libipmimonitoring.pc is located in a non-standard directory, which can cause building the plugin to fail when building Netdata from source. In that case you should find the file and link it to the standard pkg-config directory. Usually, running sudo ln -s /usr/lib/$(uname -m)-linux-gnu/pkgconfig/libipmimonitoring.pc/libipmimonitoring.pc /usr/lib/pkgconfig/libipmimonitoring.pc resolves this issue. + When using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata. + - title: Preliminary actions + description: | + If you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root + to initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system. configuration: file: name: "netdata.conf" - section_name: '[plugin:freeipmi]' - description: "This is netdata main configuration file" + section_name: "[plugin:freeipmi]" options: - description: "This tool receives command line options that are visible when user run: `./usr/libexec/netdata/plugins.d/freeipmi.plugin --help`" + description: | + The configuration is set using command line options: + + ``` + # netdata.conf + [plugin:freeipmi] + command options = opt1 opt2 ... optN + ``` + + To display a help message listing the available command line options: + + ```bash + ./usr/libexec/netdata/plugins.d/freeipmi.plugin --help + ``` folding: - title: "Config options" + title: "Command options" enabled: true list: - - name: command options - description: Variable used to pass arguments for the plugin. - default_value: 1 + - name: SECONDS + description: Data collection frequency. + default_value: "" + required: false + - name: debug + description: Enable verbose output. + default_value: disabled + required: false + - name: no-sel + description: Disable System Event Log (SEL) collection. + default_value: disabled + required: false + - name: reread-sdr-cache + description: Re-read SDR cache on every iteration. + default_value: disabled + required: false + - name: interpret-oem-data + description: Attempt to parse OEM data. + default_value: disabled + required: false + - name: assume-system-event-record + description: treat illegal SEL events records as normal. + default_value: disabled + required: false + - name: ignore-non-interpretable-sensors + description: Do not read sensors that cannot be interpreted. + default_value: disabled + required: false + - name: bridge-sensors + description: Bridge sensors not owned by the BMC. + default_value: disabled + required: false + - name: shared-sensors + description: Enable shared sensors if found. + default_value: disabled + required: false + - name: no-discrete-reading + description: Do not read sensors if their event/reading type code is invalid. + default_value: enabled + required: false + - name: ignore-scanning-disabled + description: Ignore the scanning bit and read sensors no matter what. + default_value: disabled + required: false + - name: assume-bmc-owner + description: Assume the BMC is the sensor owner no matter what (usually bridging is required too). + default_value: disabled + required: false + - name: hostname HOST + description: Remote IPMI hostname or IP address. + default_value: local + required: false + - name: username USER + description: Username that will be used when connecting to the remote host. + default_value: "" + required: false + - name: password PASS + description: Password that will be used when connecting to the remote host. + default_value: "" + required: false + - name: noauthcodecheck / no-auth-code-check + description: Don't check the authentication codes returned. + default_value: "" + required: false + - name: driver-type IPMIDRIVER + description: Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. + default_value: "" + required: false + - name: sdr-cache-dir PATH + description: SDR cache files directory. + default_value: /tmp + required: false + - name: sensor-config-file FILE + description: Sensors configuration filename. + default_value: system default + required: false + - name: sel-config-file FILE + description: SEL configuration filename. + default_value: system default + required: false + - name: ignore N1,N2,N3,... + description: Sensor IDs to ignore. + default_value: "" + required: false + - name: ignore-status N1,N2,N3,... + description: Sensor IDs to ignore status (nominal/warning/critical). + default_value: "" + required: false + - name: -v + description: Print version and exit. + default_value: "" + required: false + - name: --help + description: Print usage message and exit. + default_value: "" required: false examples: folding: enabled: true - title: "" - list: [] + title: "Config" + list: + - name: Decrease data collection frequency + description: Basic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable. + config: | + [plugin:freeipmi] + update every = 10 + folding: + enabled: false + - name: Disable SEL collection + description: Append to `command options =` the options you need. + config: | + [plugin:freeipmi] + command options = no-sel + - name: Ignore specific sensors + description: | + Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. + + **However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`). + + To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID: + + ID | Name | Type | State | Reading | Units | Event + 1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK' + 2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK' + 3 | Avg Power | Current | Nominal | 100.00 | W | 'OK' + 4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK' + 5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK' + 6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK' + 7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK' + 8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK' + 9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK' + 10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK' + 11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK' + 12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK' + 13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK' + 14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' + 15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' + ... + + `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`: + config: | + [plugin:freeipmi] + command options = ignore 1,2,3,4,... troubleshooting: problems: - list: [] + list: + - name: Debug Mode + description: | + You can run `freeipmi.plugin` with the debug option enabled, to troubleshoot issues with it. The output should give you clues as to why the collector isn't working. + + - Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + + - Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + + - Run the `freeipmi.plugin` in debug mode: + + ```bash + ./freeipmi.plugin 5 debug + ``` + - name: kimpi0 CPU usage + description: | + There have been reports that kipmi is showing increased CPU when the IPMI is queried. To lower the CPU consumption of the system you can issue this command: + + ```sh + echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us + ``` + + You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content: + + ```sh + # prevent kipmi from consuming 100% CPU + options ipmi_si kipmid_max_busy_us=10 + ``` + + This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. + + You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick). alerts: - name: ipmi_sensor_state link: https://github.com/netdata/netdata/blob/master/health/health.d/ipmi.conf @@ -79,9 +270,20 @@ modules: folding: title: Metrics enabled: false - description: "" + description: | + The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard. availability: [] scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: ipmi.sel + description: IPMI Events + unit: "events" + chart_type: area + dimensions: + - name: events - name: sensor description: "" labels: @@ -92,12 +294,6 @@ modules: - name: component description: One of 25 recognized components (Processor, Peripheral). metrics: - - name: ipmi.sel - description: IPMI Events - unit: "events" - chart_type: area - dimensions: - - name: events - name: ipmi.sensor_state description: IPMI Sensors State unit: "state" diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md deleted file mode 100644 index 9474a2b97f3843..00000000000000 --- a/collectors/idlejitter.plugin/README.md +++ /dev/null @@ -1,36 +0,0 @@ - - -# idlejitter.plugin - -Idle jitter is a measure of delays in timing for user processes caused by scheduling limitations. - -## How Netdata measures idle jitter - -A thread is spawned that requests to sleep for 20000 microseconds (20ms). -When the system wakes it up, it measures how many microseconds have passed. -The difference between the requested and the actual duration of the sleep, is the idle jitter. -This is done at most 50 times per second, to ensure we have a good average. - -This number is useful: - -- In multimedia-streaming environments such as VoIP gateways, where the CPU jitter can affect the quality of the service. -- On time servers and other systems that require very precise timing, where CPU jitter can actively interfere with timing precision. -- On gaming systems, where CPU jitter can cause frame drops and stuttering. -- In cloud infrastructure that can pause the VM or container for a small duration to perform operations at the host. - -## Charts - -idlejitter.plugin generates the idlejitter chart which measures CPU idle jitter in milliseconds lost per second. - -## Configuration - -This chart is available without any configuration. - - diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md new file mode 120000 index 00000000000000..1ce460b6262442 --- /dev/null +++ b/collectors/idlejitter.plugin/README.md @@ -0,0 +1 @@ +integrations/idle_os_jitter.md \ No newline at end of file diff --git a/collectors/idlejitter.plugin/integrations/idle_os_jitter.md b/collectors/idlejitter.plugin/integrations/idle_os_jitter.md new file mode 100644 index 00000000000000..44463f6f574225 --- /dev/null +++ b/collectors/idlejitter.plugin/integrations/idle_os_jitter.md @@ -0,0 +1,118 @@ + + +# Idle OS Jitter + + + + + +Plugin: idlejitter.plugin +Module: idlejitter.plugin + + + +## Overview + +Monitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service. + + +A thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample. + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration will run by default on all supported systems. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Idle OS Jitter instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.idlejitter | min, max, average | microseconds lost/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + +This integration only supports a single configuration option, and most users will not need to change it. + + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no | + +#### Examples +There are no configuration examples. + + diff --git a/collectors/ioping.plugin/README.md b/collectors/ioping.plugin/README.md deleted file mode 100644 index 73fc35fb00b8d4..00000000000000 --- a/collectors/ioping.plugin/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Monitor I/O latency using ioping.plugin - -The ioping plugin supports monitoring I/O latency for any number of directories/files/devices, by pinging them with `ioping`. - -A recent version of `ioping` is required (one that supports option `-N`). -The supplied plugin can install it, by running: - -```sh -/usr/libexec/netdata/plugins.d/ioping.plugin install -``` - -The `-e` option can be supplied to indicate where the Netdata environment file is installed. The default path is `/etc/netdata/.environment`. - -The above will download, build and install the right version as `/usr/libexec/netdata/plugins.d/ioping`. - -Then you need to edit `/etc/netdata/ioping.conf` (to edit it on your system run -`/etc/netdata/edit-config ioping.conf`) like this: - -```sh -# uncomment the following line - it should already be there -ioping="/usr/libexec/netdata/plugins.d/ioping" - -# set here the directory/file/device, you need to ping -destination="destination" - -# override the chart update frequency - the default is inherited from Netdata -update_every="1s" - -# the request size in bytes to ping the destination -request_size="4k" - -# other iping options - these are the defaults -ioping_opts="-T 1000000 -R" -``` - -## alarms - -Netdata will automatically attach a few alarms for each host. -Check the [latest versions of the ioping alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/ioping.conf) - -## Multiple ioping Plugins With Different Settings - -You may need to run multiple ioping plugins with different settings or different end points. -For example, you may need to ping one destination once per 10 seconds, and another once per second. - -Netdata allows you to add as many `ioping` plugins as you like. - -Follow this procedure: - -**1. Create New ioping Configuration File** - -```sh -# Step Into Configuration Directory -cd /etc/netdata - -# Copy Original ioping Configuration File To New Configuration File -cp ioping.conf ioping2.conf -``` - -Edit `ioping2.conf` and set the settings and the destination you need for the seconds instance. - -**2. Soft Link Original ioping Plugin to New Plugin File** - -```sh -# Become root (If The Step Step Is Performed As Non-Root User) -sudo su - -# Step Into The Plugins Directory -cd /usr/libexec/netdata/plugins.d - -# Link ioping.plugin to ioping2.plugin -ln -s ioping.plugin ioping2.plugin -``` - -That's it. Netdata will detect the new plugin and start it. - -You can name the new plugin any name you like. -Just make sure the plugin and the configuration file have the same name. - - diff --git a/collectors/ioping.plugin/README.md b/collectors/ioping.plugin/README.md new file mode 120000 index 00000000000000..cb660f13b39f05 --- /dev/null +++ b/collectors/ioping.plugin/README.md @@ -0,0 +1 @@ +integrations/ioping.md \ No newline at end of file diff --git a/collectors/ioping.plugin/integrations/ioping.md b/collectors/ioping.plugin/integrations/ioping.md new file mode 100644 index 00000000000000..39a07ed62e10a9 --- /dev/null +++ b/collectors/ioping.plugin/integrations/ioping.md @@ -0,0 +1,133 @@ + + +# IOPing + + + + + +Plugin: ioping.plugin +Module: ioping.plugin + + + +## Overview + +Monitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations. + +Plugin uses `ioping` command. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per disk + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ioping.latency | latency | microseconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds | + + +## Setup + +### Prerequisites + +#### Install ioping + +You can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`). + + + +### Configuration + +#### File + +The configuration file name for this integration is `ioping.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config ioping.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1s | no | +| destination | The directory/file/device to ioping. | | yes | +| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no | +| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no | + +
+ +#### Examples + +##### Basic Configuration + +This example has the minimum configuration necessary to have the plugin running. + +
Config + +```yaml +destination="/dev/sda" + +``` +
+ + diff --git a/collectors/ioping.plugin/ioping.plugin.in b/collectors/ioping.plugin/ioping.plugin.in index 1d79eb70646b66..171e384dbf3b37 100755 --- a/collectors/ioping.plugin/ioping.plugin.in +++ b/collectors/ioping.plugin/ioping.plugin.in @@ -9,7 +9,7 @@ # This plugin requires a latest version of ioping. # You can compile it from source, by running me with option: install -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin:@sbindir_POST@" export LC_ALL=C usage="$(basename "$0") [install] [-h] [-e] @@ -93,42 +93,103 @@ if [ "$INSTALL" == "1" ] fi # ----------------------------------------------------------------------------- +# logging PROGRAM_NAME="$(basename "${0}")" -logdate() { - date "+%Y-%m-%d %H:%M:%S" +# these should be the same with syslog() priorities +NDLP_EMERG=0 # system is unusable +NDLP_ALERT=1 # action must be taken immediately +NDLP_CRIT=2 # critical conditions +NDLP_ERR=3 # error conditions +NDLP_WARN=4 # warning conditions +NDLP_NOTICE=5 # normal but significant condition +NDLP_INFO=6 # informational +NDLP_DEBUG=7 # debug-level messages + +# the max (numerically) log level we will log +LOG_LEVEL=$NDLP_INFO + +set_log_min_priority() { + case "${NETDATA_LOG_LEVEL,,}" in + "emerg" | "emergency") + LOG_LEVEL=$NDLP_EMERG + ;; + + "alert") + LOG_LEVEL=$NDLP_ALERT + ;; + + "crit" | "critical") + LOG_LEVEL=$NDLP_CRIT + ;; + + "err" | "error") + LOG_LEVEL=$NDLP_ERR + ;; + + "warn" | "warning") + LOG_LEVEL=$NDLP_WARN + ;; + + "notice") + LOG_LEVEL=$NDLP_NOTICE + ;; + + "info") + LOG_LEVEL=$NDLP_INFO + ;; + + "debug") + LOG_LEVEL=$NDLP_DEBUG + ;; + esac } +set_log_min_priority + log() { - local status="${1}" - shift + local level="${1}" + shift 1 + + [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <this command will tail all `*.log` files in `/var/log/nginx/`. We use `-F` instead of `-f` to ensure that files will still be tailed after log rotation. +2. `log2joural` is a Netdata program. It reads log entries and extracts fields, according to the PCRE2 pattern it accepts. It can also apply some basic operations on the fields, like injecting new fields or duplicating existing ones or rewriting their values. The output of `log2journal` is in Systemd Journal Export Format, and it looks like this: + ```bash + KEY1=VALUE1 # << start of the first log line + KEY2=VALUE2 + # << log lines separator + KEY1=VALUE1 # << start of the second log line + KEY2=VALUE2 + ``` +3. `systemd-cat-native` is a Netdata program. I can send the logs to a local `systemd-journald` (journal namespaces supported), or to a remote `systemd-journal-remote`. + + +## Processing pipeline + +The sequence of processing in Netdata's `log2journal` is designed to methodically transform and prepare log data for export in the systemd Journal Export Format. This transformation occurs through a pipeline of stages, each with a specific role in processing the log entries. Here's a description of each stage in the sequence: + +1. **Input**
+ The tool reads one log line at a time from the input source. It supports different input formats such as JSON, logfmt, and free-form logs defined by PCRE2 patterns. + +2. **Extract Fields and Values**
+ Based on the input format (JSON, logfmt, or custom pattern), it extracts fields and their values from each log line. In the case of JSON and logfmt, it automatically extracts all fields. For custom patterns, it uses PCRE2 regular expressions, and fields are extracted based on sub-expressions defined in the pattern. + +3. **Transliteration**
+ Extracted fields are transliterated to the limited character set accepted by systemd-journal: capitals A-Z, digits 0-9, underscores. + +4. **Apply Optional Prefix**
+ If a prefix is specified, it is added to all keys. This happens before any other processing so that all subsequent matches and manipulations take the prefix into account. + +5. **Rename Fields**
+ Renames fields as specified in the configuration. This is used to change the names of the fields to match desired or required naming conventions. + +6. **Inject New Fields**
+ New fields are injected into the log data. This can include constants or values derived from other fields, using variable substitution. + +7. **Rewrite Field Values**
+ Applies rewriting rules to alter the values of the fields. This can involve complex transformations, including regular expressions and variable substitutions. The rewrite rules can also inject new fields into the data. + +8. **Filter Fields**
+ Fields are filtered based on include and exclude patterns. This stage selects which fields are to be sent to the journal, allowing for selective logging. + +9. **Output**
+ Finally, the processed log data is output in the Journal Export Format. This format is compatible with systemd's journaling system and can be sent to local or remote systemd journal systems, by piping the output of `log2journal` to `systemd-cat-native`. + +This pipeline ensures a flexible and comprehensive approach to log processing, allowing for a wide range of modifications and customizations to fit various logging requirements. Each stage builds upon the previous one, enabling complex log transformations and enrichments before the data is exported to the systemd journal. + +## Real-life example + +We have an nginx server logging in this standard combined log format: + +```bash + log_format combined '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"'; +``` + +### Extracting fields with a pattern + +First, let's find the right pattern for `log2journal`. We ask ChatGPT: + +``` +My nginx log uses this log format: + +log_format access '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"'; + +I want to use `log2joural` to convert this log for systemd-journal. +`log2journal` accepts a PCRE2 regular expression, using the named groups +in the pattern as the journal fields to extract from the logs. + +Please give me the PCRE2 pattern to extract all the fields from my nginx +log files. +``` + +ChatGPT replies with this: + +```regexp + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s + (?[^ ]+) \s + \[ + (?[^\]]+) + \] + \s+ " + (? + (?[A-Z]+) \s+ + (?[^ ]+) \s+ + (?[^"]+) + ) + " \s+ + (?\d+) \s+ + (?\d+) \s+ + "(?[^"]*)" \s+ + "(?[^"]*)" +``` + +Let's see what the above says: + +1. `(?x)`: enable PCRE2 extended mode. In this mode spaces and newlines in the pattern are ignored. To match a space you have to use `\s`. This mode allows us to split the pattern is multiple lines and add comments to it. +1. `^`: match the beginning of the line +2. `(?[^ ]+) \s - \s + (?[^ ]+) \s + \[ + (?[^\]]+) + \] + \s+ " + (? + (?[A-Z]+) \s+ + (?[^ ]+) \s+ + (?[^"]+) + ) + " \s+ + (?\d+) \s+ + (?\d+) \s+ + "(?[^"]*)" \s+ + "(?[^"]*)" +``` + +Let's test it with a sample line (instead of `tail`): + +```bash +# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 104 0.001 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml +BODY_BYTES_SENT=4172 +HTTP_REFERER=- +HTTP_USER_AGENT=Go-http-client/1.1 +REMOTE_ADDR=1.2.3.4 +REMOTE_USER=- +REQUEST=GET /index.html HTTP/1.1 +REQUEST_METHOD=GET +REQUEST_URI=/index.html +SERVER_PROTOCOL=HTTP/1.1 +STATUS=200 +TIME_LOCAL=19/Nov/2023:00:24:43 +0000 + +``` + +As you can see, it extracted all the fields and made them capitals, as systemd-journal expects them. + +### Prefixing field names + +To make sure the fields are unique for nginx and do not interfere with other applications, we should prefix them with `NGINX_`: + +```yaml +pattern: | + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s + (?[^ ]+) \s + \[ + (?[^\]]+) + \] + \s+ " + (? + (?[A-Z]+) \s+ + (?[^ ]+) \s+ + (?[^"]+) + ) + " \s+ + (?\d+) \s+ + (?\d+) \s+ + "(?[^"]*)" \s+ + "(?[^"]*)" + +prefix: 'NGINX_' # <<< we added this +``` + +And let's try it: + +```bash +# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml +NGINX_BODY_BYTES_SENT=4172 +NGINX_HTTP_REFERER=- +NGINX_HTTP_USER_AGENT=Go-http-client/1.1 +NGINX_REMOTE_ADDR=1.2.3.4 +NGINX_REMOTE_USER=- +NGINX_REQUEST=GET /index.html HTTP/1.1 +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/index.html +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000 + +``` + +### Renaming fields + +Now, all fields start with `NGINX_` but we want `NGINX_REQUEST` to be the `MESSAGE` of the log line, as we will see it by default in `journalctl` and the Netdata dashboard. Let's rename it: + +```yaml +pattern: | + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s + (?[^ ]+) \s + \[ + (?[^\]]+) + \] + \s+ " + (? + (?[A-Z]+) \s+ + (?[^ ]+) \s+ + (?[^"]+) + ) + " \s+ + (?\d+) \s+ + (?\d+) \s+ + "(?[^"]*)" \s+ + "(?[^"]*)" + +prefix: 'NGINX_' + +rename: # <<< we added this + - new_key: MESSAGE # <<< we added this + old_key: NGINX_REQUEST # <<< we added this +``` + +Let's test it: + +```bash +# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml +MESSAGE=GET /index.html HTTP/1.1 # <<< renamed ! +NGINX_BODY_BYTES_SENT=4172 +NGINX_HTTP_REFERER=- +NGINX_HTTP_USER_AGENT=Go-http-client/1.1 +NGINX_REMOTE_ADDR=1.2.3.4 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/index.html +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000 + +``` + +### Injecting new fields + +To have a complete message in journals we need 3 fields: `MESSAGE`, `PRIORITY` and `SYSLOG_IDENTIFIER`. We have already added `MESSAGE` by renaming `NGINX_REQUEST`. We can also inject a `SYSLOG_IDENTIFIER` and `PRIORITY`. + +Ideally, we would want the 5xx errors to be red in our `journalctl` output and the dashboard. To achieve that we need to set the `PRIORITY` field to the right log level. Log priorities are numeric and follow the `syslog` priorities. Checking `/usr/include/sys/syslog.h` we can see these: + +```c +#define LOG_EMERG 0 /* system is unusable */ +#define LOG_ALERT 1 /* action must be taken immediately */ +#define LOG_CRIT 2 /* critical conditions */ +#define LOG_ERR 3 /* error conditions */ +#define LOG_WARNING 4 /* warning conditions */ +#define LOG_NOTICE 5 /* normal but significant condition */ +#define LOG_INFO 6 /* informational */ +#define LOG_DEBUG 7 /* debug-level messages */ +``` + +Avoid setting priority to 0 (`LOG_EMERG`), because these will be on your terminal (the journal uses `wall` to let you know of such events). A good priority for errors is 3 (red), or 4 (yellow). + +To set the PRIORITY field in the output, we can use `NGINX_STATUS`. We will do this in 2 steps: a) inject the priority field as a copy is `NGINX_STATUS` and then b) use a pattern on its value to rewrite it to the priority level we want. + +First, let's inject `SYSLOG_IDENTIFIER` and `PRIORITY`: + +```yaml +pattern: | + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s + (?[^ ]+) \s + \[ + (?[^\]]+) + \] + \s+ " + (? + (?[A-Z]+) \s+ + (?[^ ]+) \s+ + (?[^"]+) + ) + " \s+ + (?\d+) \s+ + (?\d+) \s+ + "(?[^"]*)" \s+ + "(?[^"]*)" + +prefix: 'NGINX_' + +rename: + - new_key: MESSAGE + old_key: NGINX_REQUEST + +inject: # <<< we added this + - key: PRIORITY # <<< we added this + value: '${NGINX_STATUS}' # <<< we added this + + - key: SYSLOG_IDENTIFIER # <<< we added this + value: 'nginx-log' # <<< we added this +``` + +Let's see what this does: + +```bash +# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml +MESSAGE=GET /index.html HTTP/1.1 +NGINX_BODY_BYTES_SENT=4172 +NGINX_HTTP_REFERER=- +NGINX_HTTP_USER_AGENT=Go-http-client/1.1 +NGINX_REMOTE_ADDR=1.2.3.4 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/index.html +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000 +PRIORITY=200 # <<< PRIORITY added +SYSLOG_IDENTIFIER=nginx-log # <<< SYSLOG_IDENTIFIER added + +``` + +### Rewriting field values + +Now we need to rewrite `PRIORITY` to the right syslog level based on its value (`NGINX_STATUS`). We will assign the priority 6 (info) when the status is 1xx, 2xx, 3xx, priority 5 (notice) when status is 4xx, priority 3 (error) when status is 5xx and anything else will go to priority 4 (warning). Let's do it: + +```yaml +pattern: | + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s + (?[^ ]+) \s + \[ + (?[^\]]+) + \] + \s+ " + (? + (?[A-Z]+) \s+ + (?[^ ]+) \s+ + (?[^"]+) + ) + " \s+ + (?\d+) \s+ + (?\d+) \s+ + "(?[^"]*)" \s+ + "(?[^"]*)" + +prefix: 'NGINX_' + +rename: + - new_key: MESSAGE + old_key: NGINX_REQUEST + +inject: + - key: PRIORITY + value: '${NGINX_STATUS}' + +rewrite: # <<< we added this + - key: PRIORITY # <<< we added this + match: '^[123]' # <<< we added this + value: 6 # <<< we added this + + - key: PRIORITY # <<< we added this + match: '^4' # <<< we added this + value: 5 # <<< we added this + + - key: PRIORITY # <<< we added this + match: '^5' # <<< we added this + value: 3 # <<< we added this + + - key: PRIORITY # <<< we added this + match: '.*' # <<< we added this + value: 4 # <<< we added this +``` + +Rewrite rules are processed in order and the first matching a field, stops by default processing for this field. This is why the last rule, that matches everything does not always change the priority to 4. + +Let's test it: + +```bash +# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml +MESSAGE=GET /index.html HTTP/1.1 +NGINX_BODY_BYTES_SENT=4172 +NGINX_HTTP_REFERER=- +NGINX_HTTP_USER_AGENT=Go-http-client/1.1 +NGINX_REMOTE_ADDR=1.2.3.4 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/index.html +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000 +PRIORITY=6 # <<< PRIORITY rewritten here +SYSLOG_IDENTIFIER=nginx-log + +``` + +Rewrite rules are powerful. You can have named groups in them, like in the main pattern, to extract sub-fields from them, which you can then use in variable substitution. You can use rewrite rules to anonymize the URLs, e.g to remove customer IDs or transaction details from them. + +### Sending logs to systemd-journal + +Now the message is ready to be sent to a systemd-journal. For this we use `systemd-cat-native`. This command can send such messages to a journal running on the localhost, a local journal namespace, or a `systemd-journal-remote` running on another server. By just appending `| systemd-cat-native` to the command, the message will be sent to the local journal. + + +```bash +# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml | systemd-cat-native +# no output + +# let's find the message +# journalctl -r -o verbose SYSLOG_IDENTIFIER=nginx-log +Wed 2023-12-06 13:23:07.083299 EET [s=5290f0133f25407aaa1e2c451c0e4756;i=57194;b=0dfa96ecc2094cecaa8ec0efcb93b865;m=b133308867;t=60bd59346a289;x=5c1bdacf2b9c4bbd] + PRIORITY=6 + _UID=0 + _GID=0 + _CAP_EFFECTIVE=1ffffffffff + _SELINUX_CONTEXT=unconfined + _BOOT_ID=0dfa96ecc2094cecaa8ec0efcb93b865 + _MACHINE_ID=355c8eca894d462bbe4c9422caf7a8bb + _HOSTNAME=lab-logtest-src + _RUNTIME_SCOPE=system + _TRANSPORT=journal + MESSAGE=GET /index.html HTTP/1.1 + NGINX_BODY_BYTES_SENT=4172 + NGINX_HTTP_REFERER=- + NGINX_HTTP_USER_AGENT=Go-http-client/1.1 + NGINX_REMOTE_ADDR=1.2.3.4 + NGINX_REMOTE_USER=- + NGINX_REQUEST_METHOD=GET + NGINX_REQUEST_URI=/index.html + NGINX_SERVER_PROTOCOL=HTTP/1.1 + NGINX_STATUS=200 + NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000 + SYSLOG_IDENTIFIER=nginx-log + _PID=114343 + _COMM=systemd-cat-nat + _AUDIT_SESSION=253 + _AUDIT_LOGINUID=1000 + _SYSTEMD_CGROUP=/user.slice/user-1000.slice/session-253.scope + _SYSTEMD_SESSION=253 + _SYSTEMD_OWNER_UID=1000 + _SYSTEMD_UNIT=session-253.scope + _SYSTEMD_SLICE=user-1000.slice + _SYSTEMD_USER_SLICE=-.slice + _SYSTEMD_INVOCATION_ID=c59e33ead8c24880b027e317b89f9f76 + _SOURCE_REALTIME_TIMESTAMP=1701861787083299 + +``` + +So, the log line, with all its fields parsed, ended up in systemd-journal. Now we can send all the nginx logs to systemd-journal like this: + +```bash +tail -F /var/log/nginx/access.log |\ + log2journal -f nginx.yaml |\ + systemd-cat-native +``` + +## Best practices + +**Create a systemd service unit**: Add the above commands to a systemd unit file. When you run it in a systemd unit file you will be able to start/stop it and also see its status. Furthermore you can use the `LogNamespace=` directive of systemd service units to isolate your nginx logs from the logs of the rest of the system. Here is how to do it: + +Create the file `/etc/systemd/system/nginx-logs.service` (change `/path/to/nginx.yaml` to the right path): + +``` +[Unit] +Description=NGINX Log to Systemd Journal +After=network.target + +[Service] +ExecStart=/bin/sh -c 'tail -F /var/log/nginx/access.log | log2journal -f /path/to/nginx.yaml' | systemd-cat-native +LogNamespace=nginx-logs +Restart=always +RestartSec=3 + +[Install] +WantedBy=multi-user.target +``` + +Reload systemd to grab this file: + +```bash +sudo systemctl daemon-reload +``` + +Enable and start the service: + +```bash +sudo systemctl enable nginx-logs.service +sudo systemctl start nginx-logs.service +``` + +To see the logs of the namespace, use: + +```bash +journalctl -f --namespace=nginx-logs +``` + +Netdata will automatically pick the new namespace and present it at the list of sources of the dashboard. + +You can also instruct `systemd-cat-native` to log to a remote system, sending the logs to a `systemd-journal-remote` instance running on another server. Check [the manual of systemd-cat-native](https://github.com/netdata/netdata/blob/master/libnetdata/log/systemd-cat-native.md). + + +## Performance + +`log2journal` and `systemd-cat-native` have been designed to process hundreds of thousands of log lines per second. They both utilize high performance indexing hashtables to speed up lookups, and queues that dynamically adapt to the number of log lines offered, offering a smooth and fast experience under all conditions. + +In our tests, the combined CPU utilization of `log2journal` and `systemd-cat-native` versus `promtail` with similar configuration is 1 to 5. So, `log2journal` and `systemd-cat-native` combined, are 5 times faster than `promtail`. + +### PCRE2 patterns + +The key characteristic that can influence the performance of a logs processing pipeline using these tools, is the quality of the PCRE2 patterns used. Poorly created PCRE2 patterns can make processing significantly slower, or CPU consuming. + +Especially the pattern `.*` seems to have the biggest impact on CPU consumption, especially when multiple `.*` are on the same pattern. + +Usually we use `.*` to indicate that we need to match everything up to a character, e.g. `.* ` to match up to a space. By replacing it with `[^ ]+` (meaning: match at least a character up to a space), the regular expression engine can be a lot more efficient, reducing the overall CPU utilization significantly. + +### Performance of systemd journals + +The ingestion pipeline of logs, from `tail` to `systemd-journald` or `systemd-journal-remote` is very efficient in all aspects. CPU utilization is better than any other system we tested and RAM usage is independent of the number of fields indexed, making systemd-journal one of the most efficient log management engines for ingesting high volumes of structured logs. + +High fields cardinality does not have a noticable impact on systemd-journal. The amount of fields indexed and the amount of unique values per field, have a linear and predictable result in the resource utilization of `systemd-journald` and `systemd-journal-remote`. This is unlike other logs management solutions, like Loki, that their RAM requirements grow exponentially as the cardinality increases, making it impractical for them to index the amount of information systemd journals can index. + +However, the number of fields added to journals influences the overall disk footprint. Less fields means more log entries per journal file, smaller overall disk footprint and faster queries. + +systemd-journal files are primarily designed for security and reliability. This comes at the cost of disk footprint. The internal structure of journal files is such that in case of corruption, minimum data loss will incur. To achieve such a unique characteristic, certain data within the files need to be aligned at predefined boundaries, so that in case there is a corruption, non-corrupted parts of the journal file can be recovered. + +Despite the fact that systemd-journald employees several techniques to optimize disk footprint, like deduplication of log entries, shared indexes for fields and their values, compression of long log entries, etc. the disk footprint of journal files is generally 10x more compared to other monitoring solutions, like Loki. + +This can be improved by storing journal files in a compressed filesystem. In our tests, a compressed filesystem can save up to 75% of the space required by journal files. The journal files will still be bigger than the overall disk footprint of other solutions, but the flexibility (index any number of fields), reliability (minimal potential data loss) and security (tampering protection and sealing) features of systemd-journal justify the difference. + +When using versions of systemd prior to 254 and you are centralizing logs to a remote system, `systemd-journal-remote` creates very small files (32MB). This results in increased duplication of information across the files, increasing the overall disk footprint. systemd versions 254+, added options to `systemd-journal-remote` to control the max size per file. This can significantly reduce the duplication of information. + +Another limitation of the `systemd-journald` ecosystem is the uncompressed transmission of logs across systems. `systemd-journal-remote` up to version 254 that we tested, accepts encrypted, but uncompressed data. This means that when centralizing logs to a logs server, the bandwidth required will be increased compared to other log management solution. + +## Security Considerations + +`log2journal` and `systemd-cat-native` are used to convert log files to structured logs in the systemd-journald ecosystem. + +Systemd-journal is a logs management solution designed primarily for security and reliability. When configured properly, it can reliably and securely store your logs, ensuring they will available and unchanged for as long as you need them. + +When sending logs to a remote system, `systemd-cat-native` can be configured the same way `systemd-journal-upload` is configured, using HTTPS and private keys to encrypt and secure their transmission over the network. + +When dealing with sensitive logs, organizations usually follow 2 strategies: + +1. Anonymize the logs before storing them, so that the stored logs do not have any sensitive information. +2. Store the logs in full, including sensitive information, and carefully control who and how has access to them. + +Netdata can help in both cases. + +If you want to anonymize the logs before storing them, use rewriting rules at the `log2journal` phase to remove sensitive information from them. This process usually means matching the sensitive part and replacing with `XXX` or `CUSTOMER_ID`, or `CREDIT_CARD_NUMBER`, so that the resulting log entries stored in journal files will not include any such sensitive information. + +If on other hand your organization prefers to maintain the full logs and control who and how has access on them, use Netdata Cloud to assign roles to your team members and control which roles can access the journal logs in your environment. + +## `log2journal` options + +``` + +Netdata log2journal v1.43.0-341-gdac4df856 + +Convert logs to systemd Journal Export Format. + + - JSON logs: extracts all JSON fields. + - logfmt logs: extracts all logfmt fields. + - free-form logs: uses PCRE2 patterns to extracts fields. + +Usage: ./log2journal [OPTIONS] PATTERN|json + +Options: + + --file /path/to/file.yaml or -f /path/to/file.yaml + Read yaml configuration file for instructions. + + --config CONFIG_NAME or -c CONFIG_NAME + Run with the internal YAML configuration named CONFIG_NAME. + Available internal YAML configs: + + nginx-combined nginx-json default + +-------------------------------------------------------------------------------- + INPUT PROCESSING + + PATTERN + PATTERN should be a valid PCRE2 regular expression. + RE2 regular expressions (like the ones usually used in Go applications), + are usually valid PCRE2 patterns too. + Sub-expressions without named groups are evaluated, but their matches are + not added to the output. + + - JSON mode + JSON mode is enabled when the pattern is set to: json + Field names are extracted from the JSON logs and are converted to the + format expected by Journal Export Format (all caps, only _ is allowed). + + - logfmt mode + logfmt mode is enabled when the pattern is set to: logfmt + Field names are extracted from the logfmt logs and are converted to the + format expected by Journal Export Format (all caps, only _ is allowed). + + All keys extracted from the input, are transliterated to match Journal + semantics (capital A-Z, digits 0-9, underscore). + + In a YAML file: + ```yaml + pattern: 'PCRE2 pattern | json | logfmt' + ``` + +-------------------------------------------------------------------------------- + GLOBALS + + --prefix PREFIX + Prefix all fields with PREFIX. The PREFIX is added before any other + processing, so that the extracted keys have to be matched with the PREFIX in + them. PREFIX is NOT transliterated and it is assumed to be systemd-journal + friendly. + + In a YAML file: + ```yaml + prefix: 'PREFIX_' # prepend all keys with this prefix. + ``` + + --filename-key KEY + Add a field with KEY as the key and the current filename as value. + Automatically detects filenames when piped after 'tail -F', + and tail matches multiple filenames. + To inject the filename when tailing a single file, use --inject. + + In a YAML file: + ```yaml + filename: + key: KEY + ``` + +-------------------------------------------------------------------------------- + RENAMING OF KEYS + + --rename NEW=OLD + Rename fields. OLD has been transliterated and PREFIX has been added. + NEW is assumed to be systemd journal friendly. + + Up to 512 renaming rules are allowed. + + In a YAML file: + ```yaml + rename: + - new_key: KEY1 + old_key: KEY2 # transliterated with PREFIX added + - new_key: KEY3 + old_key: KEY4 # transliterated with PREFIX added + # add as many as required + ``` + +-------------------------------------------------------------------------------- + INJECTING NEW KEYS + + --inject KEY=VALUE + Inject constant fields to the output (both matched and unmatched logs). + --inject entries are added to unmatched lines too, when their key is + not used in --inject-unmatched (--inject-unmatched override --inject). + VALUE can use variable like ${OTHER_KEY} to be replaced with the values + of other keys available. + + Up to 512 fields can be injected. + + In a YAML file: + ```yaml + inject: + - key: KEY1 + value: 'VALUE1' + - key: KEY2 + value: '${KEY3}${KEY4}' # gets the values of KEY3 and KEY4 + # add as many as required + ``` + +-------------------------------------------------------------------------------- + REWRITING KEY VALUES + + --rewrite KEY=/MATCH/REPLACE[/OPTIONS] + Apply a rewrite rule to the values of a specific key. + The first character after KEY= is the separator, which should also + be used between the MATCH, REPLACE and OPTIONS. + + OPTIONS can be a comma separated list of `non-empty`, `dont-stop` and + `inject`. + + When `non-empty` is given, MATCH is expected to be a variable + substitution using `${KEY1}${KEY2}`. Once the substitution is completed + the rule is matching the KEY only if the result is not empty. + When `non-empty` is not set, the MATCH string is expected to be a PCRE2 + regular expression to be checked against the KEY value. This PCRE2 + pattern may include named groups to extract parts of the KEY's value. + + REPLACE supports variable substitution like `${variable}` against MATCH + named groups (when MATCH is a PCRE2 pattern) and `${KEY}` against the + keys defined so far. + + Example: + --rewrite DATE=/^(?\d{4})-(?\d{2})-(?\d{2})$/ + ${day}/${month}/${year} + The above will rewrite dates in the format YYYY-MM-DD to DD/MM/YYYY. + + Only one rewrite rule is applied per key; the sequence of rewrites for a + given key, stops once a rule matches it. This allows providing a sequence + of independent rewriting rules for the same key, matching the different + values the key may get, and also provide a catch-all rewrite rule at the + end, for setting the key value if no other rule matched it. The rewrite + rule can allow processing more rewrite rules when OPTIONS includes + the keyword 'dont-stop'. + + Up to 512 rewriting rules are allowed. + + In a YAML file: + ```yaml + rewrite: + # the order if these rules in important - processed top to bottom + - key: KEY1 + match: 'PCRE2 PATTERN WITH NAMED GROUPS' + value: 'all match fields and input keys as ${VARIABLE}' + inject: BOOLEAN # yes = inject the field, don't just rewrite it + stop: BOOLEAN # no = continue processing, don't stop if matched + - key: KEY2 + non_empty: '${KEY3}${KEY4}' # match only if this evaluates to non empty + value: 'all input keys as ${VARIABLE}' + inject: BOOLEAN # yes = inject the field, don't just rewrite it + stop: BOOLEAN # no = continue processing, don't stop if matched + # add as many rewrites as required + ``` + + By default rewrite rules are applied only on fields already defined. + This allows shipping YAML files that include more rewrites than are + required for a specific input file. + Rewrite rules however allow injecting new fields when OPTIONS include + the keyword `inject` or in YAML `inject: yes` is given. + + MATCH on the command line can be empty to define an unconditional rule. + Similarly, `match` and `non_empty` can be omitted in the YAML file. +-------------------------------------------------------------------------------- + UNMATCHED LINES + + --unmatched-key KEY + Include unmatched log entries in the output with KEY as the field name. + Use this to include unmatched entries to the output stream. + Usually it should be set to --unmatched-key=MESSAGE so that the + unmatched entry will appear as the log message in the journals. + Use --inject-unmatched to inject additional fields to unmatched lines. + + In a YAML file: + ```yaml + unmatched: + key: MESSAGE # inject the error log as MESSAGE + ``` + + --inject-unmatched LINE + Inject lines into the output for each unmatched log entry. + Usually, --inject-unmatched=PRIORITY=3 is needed to mark the unmatched + lines as errors, so that they can easily be spotted in the journals. + + Up to 512 such lines can be injected. + + In a YAML file: + ```yaml + unmatched: + key: MESSAGE # inject the error log as MESSAGE + inject:: + - key: KEY1 + value: 'VALUE1' + # add as many constants as required + ``` + +-------------------------------------------------------------------------------- + FILTERING + + --include PATTERN + Include only keys matching the PCRE2 PATTERN. + Useful when parsing JSON of logfmt logs, to include only the keys given. + The keys are matched after the PREFIX has been added to them. + + --exclude PATTERN + Exclude the keys matching the PCRE2 PATTERN. + Useful when parsing JSON of logfmt logs, to exclude some of the keys given. + The keys are matched after the PREFIX has been added to them. + + When both include and exclude patterns are set and both match a key, + exclude wins and the key will not be added, like a pipeline, we first + include it and then exclude it. + + In a YAML file: + ```yaml + filter: + include: 'PCRE2 PATTERN MATCHING KEY NAMES TO INCLUDE' + exclude: 'PCRE2 PATTERN MATCHING KEY NAMES TO EXCLUDE' + ``` + +-------------------------------------------------------------------------------- + OTHER + + -h, or --help + Display this help and exit. + + --show-config + Show the configuration in YAML format before starting the job. + This is also an easy way to convert command line parameters to yaml. + +The program accepts all parameters as both --option=value and --option value. + +The maximum log line length accepted is 1048576 characters. + +PIPELINE AND SEQUENCE OF PROCESSING + +This is a simple diagram of the pipeline taking place: + + +---------------------------------------------------+ + | INPUT | + | read one log line at a time | + +---------------------------------------------------+ + v v v v v v + +---------------------------------------------------+ + | EXTRACT FIELDS AND VALUES | + | JSON, logfmt, or pattern based | + | (apply optional PREFIX - all keys use capitals) | + +---------------------------------------------------+ + v v v v v v + +---------------------------------------------------+ + | RENAME FIELDS | + | change the names of the fields | + +---------------------------------------------------+ + v v v v v v + +---------------------------------------------------+ + | INJECT NEW FIELDS | + | constants, or other field values as variables | + +---------------------------------------------------+ + v v v v v v + +---------------------------------------------------+ + | REWRITE FIELD VALUES | + | pipeline multiple rewriting rules to alter | + | the values of the fields | + +---------------------------------------------------+ + v v v v v v + +---------------------------------------------------+ + | FILTER FIELDS | + | use include and exclude patterns on the field | + | names, to select which fields are sent to journal | + +---------------------------------------------------+ + v v v v v v + +---------------------------------------------------+ + | OUTPUT | + | generate Journal Export Format | + +---------------------------------------------------+ + +-------------------------------------------------------------------------------- +JOURNAL FIELDS RULES (enforced by systemd-journald) + + - field names can be up to 64 characters + - the only allowed field characters are A-Z, 0-9 and underscore + - the first character of fields cannot be a digit + - protected journal fields start with underscore: + * they are accepted by systemd-journal-remote + * they are NOT accepted by a local systemd-journald + + For best results, always include these fields: + + MESSAGE=TEXT + The MESSAGE is the body of the log entry. + This field is what we usually see in our logs. + + PRIORITY=NUMBER + PRIORITY sets the severity of the log entry. + 0=emerg, 1=alert, 2=crit, 3=err, 4=warn, 5=notice, 6=info, 7=debug + - Emergency events (0) are usually broadcast to all terminals. + - Emergency, alert, critical, and error (0-3) are usually colored red. + - Warning (4) entries are usually colored yellow. + - Notice (5) entries are usually bold or have a brighter white color. + - Info (6) entries are the default. + - Debug (7) entries are usually grayed or dimmed. + + SYSLOG_IDENTIFIER=NAME + SYSLOG_IDENTIFIER sets the name of application. + Use something descriptive, like: SYSLOG_IDENTIFIER=nginx-logs + +You can find the most common fields at 'man systemd.journal-fields'. + +``` + +`log2journal` supports YAML configuration files, like the ones found [in this directory](https://github.com/netdata/netdata/tree/master/collectors/log2journal/log2journal.d). + +## `systemd-cat-native` options + +Read [the manual of systemd-cat-native](https://github.com/netdata/netdata/blob/master/libnetdata/log/systemd-cat-native.md). diff --git a/collectors/log2journal/log2journal-help.c b/collectors/log2journal/log2journal-help.c new file mode 100644 index 00000000000000..21be948e8a974b --- /dev/null +++ b/collectors/log2journal/log2journal-help.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +static void config_dir_print_available(void) { + const char *path = LOG2JOURNAL_CONFIG_PATH; + DIR *dir; + struct dirent *entry; + + dir = opendir(path); + + if (dir == NULL) { + log2stderr(" >>> Cannot open directory:\n %s", path); + return; + } + + size_t column_width = 80; + size_t current_columns = 7; // Start with 7 spaces for the first line + + while ((entry = readdir(dir))) { + if (entry->d_type == DT_REG) { // Check if it's a regular file + const char *file_name = entry->d_name; + size_t len = strlen(file_name); + if (len >= 5 && strcmp(file_name + len - 5, ".yaml") == 0) { + // Remove the ".yaml" extension + len -= 5; + if (current_columns == 7) { + printf(" "); // Print 7 spaces at the beginning of a new line + } + if (current_columns + len + 1 > column_width) { + // Start a new line if the current line is full + printf("\n "); // Print newline and 7 spaces + current_columns = 7; + } + printf("%.*s ", (int)len, file_name); // Print the filename without extension + current_columns += len + 1; // Add filename length and a space + } + } + } + + closedir(dir); + printf("\n"); // Add a newline at the end +} + +void log_job_command_line_help(const char *name) { + printf("\n"); + printf("Netdata log2journal " PACKAGE_VERSION "\n"); + printf("\n"); + printf("Convert logs to systemd Journal Export Format.\n"); + printf("\n"); + printf(" - JSON logs: extracts all JSON fields.\n"); + printf(" - logfmt logs: extracts all logfmt fields.\n"); + printf(" - free-form logs: uses PCRE2 patterns to extracts fields.\n"); + printf("\n"); + printf("Usage: %s [OPTIONS] PATTERN|json\n", name); + printf("\n"); + printf("Options:\n"); + printf("\n"); +#ifdef HAVE_LIBYAML + printf(" --file /path/to/file.yaml or -f /path/to/file.yaml\n"); + printf(" Read yaml configuration file for instructions.\n"); + printf("\n"); + printf(" --config CONFIG_NAME or -c CONFIG_NAME\n"); + printf(" Run with the internal YAML configuration named CONFIG_NAME.\n"); + printf(" Available internal YAML configs:\n"); + printf("\n"); + config_dir_print_available(); + printf("\n"); +#else + printf(" IMPORTANT:\n"); + printf(" YAML configuration parsing is not compiled in this binary.\n"); + printf("\n"); +#endif + printf("--------------------------------------------------------------------------------\n"); + printf(" INPUT PROCESSING\n"); + printf("\n"); + printf(" PATTERN\n"); + printf(" PATTERN should be a valid PCRE2 regular expression.\n"); + printf(" RE2 regular expressions (like the ones usually used in Go applications),\n"); + printf(" are usually valid PCRE2 patterns too.\n"); + printf(" Sub-expressions without named groups are evaluated, but their matches are\n"); + printf(" not added to the output.\n"); + printf("\n"); + printf(" - JSON mode\n"); + printf(" JSON mode is enabled when the pattern is set to: json\n"); + printf(" Field names are extracted from the JSON logs and are converted to the\n"); + printf(" format expected by Journal Export Format (all caps, only _ is allowed).\n"); + printf("\n"); + printf(" - logfmt mode\n"); + printf(" logfmt mode is enabled when the pattern is set to: logfmt\n"); + printf(" Field names are extracted from the logfmt logs and are converted to the\n"); + printf(" format expected by Journal Export Format (all caps, only _ is allowed).\n"); + printf("\n"); + printf(" All keys extracted from the input, are transliterated to match Journal\n"); + printf(" semantics (capital A-Z, digits 0-9, underscore).\n"); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" pattern: 'PCRE2 pattern | json | logfmt'\n"); + printf(" ```\n"); + printf("\n"); + printf("--------------------------------------------------------------------------------\n"); + printf(" GLOBALS\n"); + printf("\n"); + printf(" --prefix PREFIX\n"); + printf(" Prefix all fields with PREFIX. The PREFIX is added before any other\n"); + printf(" processing, so that the extracted keys have to be matched with the PREFIX in\n"); + printf(" them. PREFIX is NOT transliterated and it is assumed to be systemd-journal\n"); + printf(" friendly.\n"); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" prefix: 'PREFIX_' # prepend all keys with this prefix.\n"); + printf(" ```\n"); + printf("\n"); + printf(" --filename-key KEY\n"); + printf(" Add a field with KEY as the key and the current filename as value.\n"); + printf(" Automatically detects filenames when piped after 'tail -F',\n"); + printf(" and tail matches multiple filenames.\n"); + printf(" To inject the filename when tailing a single file, use --inject.\n"); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" filename:\n"); + printf(" key: KEY\n"); + printf(" ```\n"); + printf("\n"); + printf("--------------------------------------------------------------------------------\n"); + printf(" RENAMING OF KEYS\n"); + printf("\n"); + printf(" --rename NEW=OLD\n"); + printf(" Rename fields. OLD has been transliterated and PREFIX has been added.\n"); + printf(" NEW is assumed to be systemd journal friendly.\n"); + printf("\n"); + printf(" Up to %d renaming rules are allowed.\n", MAX_RENAMES); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" rename:\n"); + printf(" - new_key: KEY1\n"); + printf(" old_key: KEY2 # transliterated with PREFIX added\n"); + printf(" - new_key: KEY3\n"); + printf(" old_key: KEY4 # transliterated with PREFIX added\n"); + printf(" # add as many as required\n"); + printf(" ```\n"); + printf("\n"); + printf("--------------------------------------------------------------------------------\n"); + printf(" INJECTING NEW KEYS\n"); + printf("\n"); + printf(" --inject KEY=VALUE\n"); + printf(" Inject constant fields to the output (both matched and unmatched logs).\n"); + printf(" --inject entries are added to unmatched lines too, when their key is\n"); + printf(" not used in --inject-unmatched (--inject-unmatched override --inject).\n"); + printf(" VALUE can use variable like ${OTHER_KEY} to be replaced with the values\n"); + printf(" of other keys available.\n"); + printf("\n"); + printf(" Up to %d fields can be injected.\n", MAX_INJECTIONS); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" inject:\n"); + printf(" - key: KEY1\n"); + printf(" value: 'VALUE1'\n"); + printf(" - key: KEY2\n"); + printf(" value: '${KEY3}${KEY4}' # gets the values of KEY3 and KEY4\n"); + printf(" # add as many as required\n"); + printf(" ```\n"); + printf("\n"); + printf("--------------------------------------------------------------------------------\n"); + printf(" REWRITING KEY VALUES\n"); + printf("\n"); + printf(" --rewrite KEY=/MATCH/REPLACE[/OPTIONS]\n"); + printf(" Apply a rewrite rule to the values of a specific key.\n"); + printf(" The first character after KEY= is the separator, which should also\n"); + printf(" be used between the MATCH, REPLACE and OPTIONS.\n"); + printf("\n"); + printf(" OPTIONS can be a comma separated list of `non-empty`, `dont-stop` and\n"); + printf(" `inject`.\n"); + printf("\n"); + printf(" When `non-empty` is given, MATCH is expected to be a variable\n"); + printf(" substitution using `${KEY1}${KEY2}`. Once the substitution is completed\n"); + printf(" the rule is matching the KEY only if the result is not empty.\n"); + printf(" When `non-empty` is not set, the MATCH string is expected to be a PCRE2\n"); + printf(" regular expression to be checked against the KEY value. This PCRE2\n"); + printf(" pattern may include named groups to extract parts of the KEY's value.\n"); + printf("\n"); + printf(" REPLACE supports variable substitution like `${variable}` against MATCH\n"); + printf(" named groups (when MATCH is a PCRE2 pattern) and `${KEY}` against the\n"); + printf(" keys defined so far.\n"); + printf("\n"); + printf(" Example:\n"); + printf(" --rewrite DATE=/^(?\\d{4})-(?\\d{2})-(?\\d{2})$/\n"); + printf(" ${day}/${month}/${year}\n"); + printf(" The above will rewrite dates in the format YYYY-MM-DD to DD/MM/YYYY.\n"); + printf("\n"); + printf(" Only one rewrite rule is applied per key; the sequence of rewrites for a\n"); + printf(" given key, stops once a rule matches it. This allows providing a sequence\n"); + printf(" of independent rewriting rules for the same key, matching the different\n"); + printf(" values the key may get, and also provide a catch-all rewrite rule at the\n"); + printf(" end, for setting the key value if no other rule matched it. The rewrite\n"); + printf(" rule can allow processing more rewrite rules when OPTIONS includes\n"); + printf(" the keyword 'dont-stop'.\n"); + printf("\n"); + printf(" Up to %d rewriting rules are allowed.\n", MAX_REWRITES); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" rewrite:\n"); + printf(" # the order if these rules in important - processed top to bottom\n"); + printf(" - key: KEY1\n"); + printf(" match: 'PCRE2 PATTERN WITH NAMED GROUPS'\n"); + printf(" value: 'all match fields and input keys as ${VARIABLE}'\n"); + printf(" inject: BOOLEAN # yes = inject the field, don't just rewrite it\n"); + printf(" stop: BOOLEAN # no = continue processing, don't stop if matched\n"); + printf(" - key: KEY2\n"); + printf(" non_empty: '${KEY3}${KEY4}' # match only if this evaluates to non empty\n"); + printf(" value: 'all input keys as ${VARIABLE}'\n"); + printf(" inject: BOOLEAN # yes = inject the field, don't just rewrite it\n"); + printf(" stop: BOOLEAN # no = continue processing, don't stop if matched\n"); + printf(" # add as many rewrites as required\n"); + printf(" ```\n"); + printf("\n"); + printf(" By default rewrite rules are applied only on fields already defined.\n"); + printf(" This allows shipping YAML files that include more rewrites than are\n"); + printf(" required for a specific input file.\n"); + printf(" Rewrite rules however allow injecting new fields when OPTIONS include\n"); + printf(" the keyword `inject` or in YAML `inject: yes` is given.\n"); + printf("\n"); + printf(" MATCH on the command line can be empty to define an unconditional rule.\n"); + printf(" Similarly, `match` and `non_empty` can be omitted in the YAML file."); + printf("\n"); + printf("--------------------------------------------------------------------------------\n"); + printf(" UNMATCHED LINES\n"); + printf("\n"); + printf(" --unmatched-key KEY\n"); + printf(" Include unmatched log entries in the output with KEY as the field name.\n"); + printf(" Use this to include unmatched entries to the output stream.\n"); + printf(" Usually it should be set to --unmatched-key=MESSAGE so that the\n"); + printf(" unmatched entry will appear as the log message in the journals.\n"); + printf(" Use --inject-unmatched to inject additional fields to unmatched lines.\n"); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" unmatched:\n"); + printf(" key: MESSAGE # inject the error log as MESSAGE\n"); + printf(" ```\n"); + printf("\n"); + printf(" --inject-unmatched LINE\n"); + printf(" Inject lines into the output for each unmatched log entry.\n"); + printf(" Usually, --inject-unmatched=PRIORITY=3 is needed to mark the unmatched\n"); + printf(" lines as errors, so that they can easily be spotted in the journals.\n"); + printf("\n"); + printf(" Up to %d such lines can be injected.\n", MAX_INJECTIONS); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" unmatched:\n"); + printf(" key: MESSAGE # inject the error log as MESSAGE\n"); + printf(" inject::\n"); + printf(" - key: KEY1\n"); + printf(" value: 'VALUE1'\n"); + printf(" # add as many constants as required\n"); + printf(" ```\n"); + printf("\n"); + printf("--------------------------------------------------------------------------------\n"); + printf(" FILTERING\n"); + printf("\n"); + printf(" --include PATTERN\n"); + printf(" Include only keys matching the PCRE2 PATTERN.\n"); + printf(" Useful when parsing JSON of logfmt logs, to include only the keys given.\n"); + printf(" The keys are matched after the PREFIX has been added to them.\n"); + printf("\n"); + printf(" --exclude PATTERN\n"); + printf(" Exclude the keys matching the PCRE2 PATTERN.\n"); + printf(" Useful when parsing JSON of logfmt logs, to exclude some of the keys given.\n"); + printf(" The keys are matched after the PREFIX has been added to them.\n"); + printf("\n"); + printf(" When both include and exclude patterns are set and both match a key,\n"); + printf(" exclude wins and the key will not be added, like a pipeline, we first\n"); + printf(" include it and then exclude it.\n"); + printf("\n"); + printf(" In a YAML file:\n"); + printf(" ```yaml\n"); + printf(" filter:\n"); + printf(" include: 'PCRE2 PATTERN MATCHING KEY NAMES TO INCLUDE'\n"); + printf(" exclude: 'PCRE2 PATTERN MATCHING KEY NAMES TO EXCLUDE'\n"); + printf(" ```\n"); + printf("\n"); + printf("--------------------------------------------------------------------------------\n"); + printf(" OTHER\n"); + printf("\n"); + printf(" -h, or --help\n"); + printf(" Display this help and exit.\n"); + printf("\n"); + printf(" --show-config\n"); + printf(" Show the configuration in YAML format before starting the job.\n"); + printf(" This is also an easy way to convert command line parameters to yaml.\n"); + printf("\n"); + printf("The program accepts all parameters as both --option=value and --option value.\n"); + printf("\n"); + printf("The maximum log line length accepted is %d characters.\n", MAX_LINE_LENGTH); + printf("\n"); + printf("PIPELINE AND SEQUENCE OF PROCESSING\n"); + printf("\n"); + printf("This is a simple diagram of the pipeline taking place:\n"); + printf(" \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" | INPUT | \n"); + printf(" | read one log line at a time | \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" v v v v v v \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" | EXTRACT FIELDS AND VALUES | \n"); + printf(" | JSON, logfmt, or pattern based | \n"); + printf(" | (apply optional PREFIX - all keys use capitals) | \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" v v v v v v \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" | RENAME FIELDS | \n"); + printf(" | change the names of the fields | \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" v v v v v v \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" | INJECT NEW FIELDS | \n"); + printf(" | constants, or other field values as variables | \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" v v v v v v \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" | REWRITE FIELD VALUES | \n"); + printf(" | pipeline multiple rewriting rules to alter | \n"); + printf(" | the values of the fields | \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" v v v v v v \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" | FILTER FIELDS | \n"); + printf(" | use include and exclude patterns on the field | \n"); + printf(" | names, to select which fields are sent to journal | \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" v v v v v v \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" | OUTPUT | \n"); + printf(" | generate Journal Export Format | \n"); + printf(" +---------------------------------------------------+ \n"); + printf(" \n"); + printf("--------------------------------------------------------------------------------\n"); + printf("JOURNAL FIELDS RULES (enforced by systemd-journald)\n"); + printf("\n"); + printf(" - field names can be up to 64 characters\n"); + printf(" - the only allowed field characters are A-Z, 0-9 and underscore\n"); + printf(" - the first character of fields cannot be a digit\n"); + printf(" - protected journal fields start with underscore:\n"); + printf(" * they are accepted by systemd-journal-remote\n"); + printf(" * they are NOT accepted by a local systemd-journald\n"); + printf("\n"); + printf(" For best results, always include these fields:\n"); + printf("\n"); + printf(" MESSAGE=TEXT\n"); + printf(" The MESSAGE is the body of the log entry.\n"); + printf(" This field is what we usually see in our logs.\n"); + printf("\n"); + printf(" PRIORITY=NUMBER\n"); + printf(" PRIORITY sets the severity of the log entry.\n"); + printf(" 0=emerg, 1=alert, 2=crit, 3=err, 4=warn, 5=notice, 6=info, 7=debug\n"); + printf(" - Emergency events (0) are usually broadcast to all terminals.\n"); + printf(" - Emergency, alert, critical, and error (0-3) are usually colored red.\n"); + printf(" - Warning (4) entries are usually colored yellow.\n"); + printf(" - Notice (5) entries are usually bold or have a brighter white color.\n"); + printf(" - Info (6) entries are the default.\n"); + printf(" - Debug (7) entries are usually grayed or dimmed.\n"); + printf("\n"); + printf(" SYSLOG_IDENTIFIER=NAME\n"); + printf(" SYSLOG_IDENTIFIER sets the name of application.\n"); + printf(" Use something descriptive, like: SYSLOG_IDENTIFIER=nginx-logs\n"); + printf("\n"); + printf("You can find the most common fields at 'man systemd.journal-fields'.\n"); + printf("\n"); +} diff --git a/collectors/log2journal/log2journal-inject.c b/collectors/log2journal/log2journal-inject.c new file mode 100644 index 00000000000000..45158066bf23e0 --- /dev/null +++ b/collectors/log2journal/log2journal-inject.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +void injection_cleanup(INJECTION *inj) { + hashed_key_cleanup(&inj->key); + replace_pattern_cleanup(&inj->value); +} + +static inline bool log_job_injection_replace(INJECTION *inj, const char *key, size_t key_len, const char *value, size_t value_len) { + if(key_len > JOURNAL_MAX_KEY_LEN) + log2stderr("WARNING: injection key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key); + + if(value_len > JOURNAL_MAX_VALUE_LEN) + log2stderr("WARNING: injection value of key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key); + + hashed_key_len_set(&inj->key, key, key_len); + char *v = strndupz(value, value_len); + bool ret = replace_pattern_set(&inj->value, v); + freez(v); + + return ret; +} + +bool log_job_injection_add(LOG_JOB *jb, const char *key, size_t key_len, const char *value, size_t value_len, bool unmatched) { + if (unmatched) { + if (jb->unmatched.injections.used >= MAX_INJECTIONS) { + log2stderr("Error: too many unmatched injections. You can inject up to %d lines.", MAX_INJECTIONS); + return false; + } + } + else { + if (jb->injections.used >= MAX_INJECTIONS) { + log2stderr("Error: too many injections. You can inject up to %d lines.", MAX_INJECTIONS); + return false; + } + } + + bool ret; + if (unmatched) { + ret = log_job_injection_replace(&jb->unmatched.injections.keys[jb->unmatched.injections.used++], + key, key_len, value, value_len); + } else { + ret = log_job_injection_replace(&jb->injections.keys[jb->injections.used++], + key, key_len, value, value_len); + } + + return ret; +} diff --git a/collectors/log2journal/log2journal-json.c b/collectors/log2journal/log2journal-json.c new file mode 100644 index 00000000000000..2ca294e4db3280 --- /dev/null +++ b/collectors/log2journal/log2journal-json.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +#define JSON_ERROR_LINE_MAX 1024 +#define JSON_KEY_MAX 1024 +#define JSON_DEPTH_MAX 100 + +struct log_json_state { + LOG_JOB *jb; + + const char *line; + uint32_t pos; + uint32_t depth; + char *stack[JSON_DEPTH_MAX]; + + char key[JSON_KEY_MAX]; + char msg[JSON_ERROR_LINE_MAX]; +}; + +static inline bool json_parse_object(LOG_JSON_STATE *js); +static inline bool json_parse_array(LOG_JSON_STATE *js); + +#define json_current_pos(js) &(js)->line[(js)->pos] +#define json_consume_char(js) ++(js)->pos + +static inline void json_process_key_value(LOG_JSON_STATE *js, const char *value, size_t len) { + log_job_send_extracted_key_value(js->jb, js->key, value, len); +} + +static inline void json_skip_spaces(LOG_JSON_STATE *js) { + const char *s = json_current_pos(js); + const char *start = s; + + while(isspace(*s)) s++; + + js->pos += s - start; +} + +static inline bool json_expect_char_after_white_space(LOG_JSON_STATE *js, const char *expected) { + json_skip_spaces(js); + + const char *s = json_current_pos(js); + for(const char *e = expected; *e ;e++) { + if (*s == *e) + return true; + } + + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: character '%c' is not one of the expected characters (%s), at pos %zu", + *s ? *s : '?', expected, js->pos); + + return false; +} + +static inline bool json_parse_null(LOG_JSON_STATE *js) { + const char *s = json_current_pos(js); + if (strncmp(s, "null", 4) == 0) { + json_process_key_value(js, "null", 4); + js->pos += 4; + return true; + } + else { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: expected 'null', found '%.4s' at position %zu", s, js->pos); + return false; + } +} + +static inline bool json_parse_true(LOG_JSON_STATE *js) { + const char *s = json_current_pos(js); + if (strncmp(s, "true", 4) == 0) { + json_process_key_value(js, "true", 4); + js->pos += 4; + return true; + } + else { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: expected 'true', found '%.4s' at position %zu", s, js->pos); + return false; + } +} + +static inline bool json_parse_false(LOG_JSON_STATE *js) { + const char *s = json_current_pos(js); + if (strncmp(s, "false", 5) == 0) { + json_process_key_value(js, "false", 5); + js->pos += 5; + return true; + } + else { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: expected 'false', found '%.4s' at position %zu", s, js->pos); + return false; + } +} + +static inline bool json_parse_number(LOG_JSON_STATE *js) { + static __thread char value[8192]; + + value[0] = '\0'; + char *d = value; + const char *s = json_current_pos(js); + size_t remaining = sizeof(value) - 1; // Reserve space for null terminator + + // Optional minus sign + if (*s == '-') { + *d++ = *s++; + remaining--; + } + + // Digits before decimal point + while (*s >= '0' && *s <= '9') { + if (remaining < 2) { + snprintf(js->msg, sizeof(js->msg), "JSON PARSER: truncated number value at pos %zu", js->pos); + return false; + } + *d++ = *s++; + remaining--; + } + + // Decimal point and fractional part + if (*s == '.') { + *d++ = *s++; + remaining--; + + while (*s >= '0' && *s <= '9') { + if (remaining < 2) { + snprintf(js->msg, sizeof(js->msg), "JSON PARSER: truncated fractional part at pos %zu", js->pos); + return false; + } + *d++ = *s++; + remaining--; + } + } + + // Exponent part + if (*s == 'e' || *s == 'E') { + *d++ = *s++; + remaining--; + + // Optional sign in exponent + if (*s == '+' || *s == '-') { + *d++ = *s++; + remaining--; + } + + while (*s >= '0' && *s <= '9') { + if (remaining < 2) { + snprintf(js->msg, sizeof(js->msg), "JSON PARSER: truncated exponent at pos %zu", js->pos); + return false; + } + *d++ = *s++; + remaining--; + } + } + + *d = '\0'; + js->pos += d - value; + + if (d > value) { + json_process_key_value(js, value, d - value); + return true; + } else { + snprintf(js->msg, sizeof(js->msg), "JSON PARSER: invalid number format at pos %zu", js->pos); + return false; + } +} + +static inline bool encode_utf8(unsigned codepoint, char **d, size_t *remaining) { + if (codepoint <= 0x7F) { + // 1-byte sequence + if (*remaining < 2) return false; // +1 for the null + *(*d)++ = (char)codepoint; + (*remaining)--; + } + else if (codepoint <= 0x7FF) { + // 2-byte sequence + if (*remaining < 3) return false; // +1 for the null + *(*d)++ = (char)(0xC0 | ((codepoint >> 6) & 0x1F)); + *(*d)++ = (char)(0x80 | (codepoint & 0x3F)); + (*remaining) -= 2; + } + else if (codepoint <= 0xFFFF) { + // 3-byte sequence + if (*remaining < 4) return false; // +1 for the null + *(*d)++ = (char)(0xE0 | ((codepoint >> 12) & 0x0F)); + *(*d)++ = (char)(0x80 | ((codepoint >> 6) & 0x3F)); + *(*d)++ = (char)(0x80 | (codepoint & 0x3F)); + (*remaining) -= 3; + } + else if (codepoint <= 0x10FFFF) { + // 4-byte sequence + if (*remaining < 5) return false; // +1 for the null + *(*d)++ = (char)(0xF0 | ((codepoint >> 18) & 0x07)); + *(*d)++ = (char)(0x80 | ((codepoint >> 12) & 0x3F)); + *(*d)++ = (char)(0x80 | ((codepoint >> 6) & 0x3F)); + *(*d)++ = (char)(0x80 | (codepoint & 0x3F)); + (*remaining) -= 4; + } + else + // Invalid code point + return false; + + return true; +} + +size_t parse_surrogate(const char *s, char *d, size_t *remaining) { + if (s[0] != '\\' || (s[1] != 'u' && s[1] != 'U')) { + return 0; // Not a valid Unicode escape sequence + } + + char hex[9] = {0}; // Buffer for the hexadecimal value + unsigned codepoint; + + if (s[1] == 'u') { + // Handle \uXXXX + if (!isxdigit(s[2]) || !isxdigit(s[3]) || !isxdigit(s[4]) || !isxdigit(s[5])) { + return 0; // Not a valid \uXXXX sequence + } + + hex[0] = s[2]; + hex[1] = s[3]; + hex[2] = s[4]; + hex[3] = s[5]; + codepoint = (unsigned)strtoul(hex, NULL, 16); + + if (codepoint >= 0xD800 && codepoint <= 0xDBFF) { + // Possible start of surrogate pair + if (s[6] == '\\' && s[7] == 'u' && isxdigit(s[8]) && isxdigit(s[9]) && + isxdigit(s[10]) && isxdigit(s[11])) { + // Valid low surrogate + unsigned low_surrogate = strtoul(&s[8], NULL, 16); + if (low_surrogate < 0xDC00 || low_surrogate > 0xDFFF) { + return 0; // Invalid low surrogate + } + codepoint = 0x10000 + ((codepoint - 0xD800) << 10) + (low_surrogate - 0xDC00); + return encode_utf8(codepoint, &d, remaining) ? 12 : 0; // \uXXXX\uXXXX + } + } + + // Single \uXXXX + return encode_utf8(codepoint, &d, remaining) ? 6 : 0; + } + else { + // Handle \UXXXXXXXX + for (int i = 2; i < 10; i++) { + if (!isxdigit(s[i])) { + return 0; // Not a valid \UXXXXXXXX sequence + } + hex[i - 2] = s[i]; + } + codepoint = (unsigned)strtoul(hex, NULL, 16); + return encode_utf8(codepoint, &d, remaining) ? 10 : 0; // \UXXXXXXXX + } +} + +static inline void copy_newline(LOG_JSON_STATE *js __maybe_unused, char **d, size_t *remaining) { + if(*remaining > 3) { + *(*d)++ = '\\'; + *(*d)++ = 'n'; + (*remaining) -= 2; + } +} + +static inline void copy_tab(LOG_JSON_STATE *js __maybe_unused, char **d, size_t *remaining) { + if(*remaining > 3) { + *(*d)++ = '\\'; + *(*d)++ = 't'; + (*remaining) -= 2; + } +} + +static inline bool json_parse_string(LOG_JSON_STATE *js) { + static __thread char value[JOURNAL_MAX_VALUE_LEN]; + + if(!json_expect_char_after_white_space(js, "\"")) + return false; + + json_consume_char(js); + + value[0] = '\0'; + char *d = value; + const char *s = json_current_pos(js); + size_t remaining = sizeof(value); + + while (*s && *s != '"') { + char c; + + if (*s == '\\') { + s++; + + switch (*s) { + case 'n': + copy_newline(js, &d, &remaining); + s++; + continue; + + case 't': + copy_tab(js, &d, &remaining); + s++; + continue; + + case 'f': + case 'b': + case 'r': + c = ' '; + s++; + break; + + case 'u': { + size_t old_remaining = remaining; + size_t consumed = parse_surrogate(s - 1, d, &remaining); + if (consumed > 0) { + s += consumed - 1; // -1 because we already incremented s after '\\' + d += old_remaining - remaining; + continue; + } + else { + *d++ = '\\'; + remaining--; + c = *s++; + } + } + break; + + default: + c = *s++; + break; + } + } + else + c = *s++; + + if(remaining < 2) { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: truncated string value at pos %zu", js->pos); + return false; + } + else { + *d++ = c; + remaining--; + } + } + *d = '\0'; + js->pos += s - json_current_pos(js); + + if(!json_expect_char_after_white_space(js, "\"")) + return false; + + json_consume_char(js); + + if(d > value) + json_process_key_value(js, value, d - value); + + return true; +} + +static inline bool json_parse_key_and_push(LOG_JSON_STATE *js) { + if (!json_expect_char_after_white_space(js, "\"")) + return false; + + if(js->depth >= JSON_DEPTH_MAX - 1) { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: object too deep, at pos %zu", js->pos); + return false; + } + + json_consume_char(js); + + char *d = js->stack[js->depth]; + if(js->depth) + *d++ = '_'; + + size_t remaining = sizeof(js->key) - (d - js->key); + + const char *s = json_current_pos(js); + char last_c = '\0'; + while(*s && *s != '\"') { + char c; + + if (*s == '\\') { + s++; + c = (char)((*s == 'u') ? '_' : journal_key_characters_map[(unsigned char)*s]); + s += (*s == 'u') ? 5 : 1; + } + else + c = journal_key_characters_map[(unsigned char)*s++]; + + if(c == '_' && last_c == '_') + continue; + else { + if(remaining < 2) { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: key buffer full - keys are too long, at pos %zu", js->pos); + return false; + } + *d++ = c; + remaining--; + } + + last_c = c; + } + *d = '\0'; + js->pos += s - json_current_pos(js); + + if (!json_expect_char_after_white_space(js, "\"")) + return false; + + json_consume_char(js); + + js->stack[++js->depth] = d; + + return true; +} + +static inline bool json_key_pop(LOG_JSON_STATE *js) { + if(js->depth <= 0) { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: cannot pop a key at depth %zu, at pos %zu", js->depth, js->pos); + return false; + } + + char *k = js->stack[js->depth--]; + *k = '\0'; + return true; +} + +static inline bool json_parse_value(LOG_JSON_STATE *js) { + if(!json_expect_char_after_white_space(js, "-.0123456789tfn\"{[")) + return false; + + const char *s = json_current_pos(js); + switch(*s) { + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + return json_parse_number(js); + + case 't': + return json_parse_true(js); + + case 'f': + return json_parse_false(js); + + case 'n': + return json_parse_null(js); + + case '"': + return json_parse_string(js); + + case '{': + return json_parse_object(js); + + case '[': + return json_parse_array(js); + } + + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: unexpected character at pos %zu", js->pos); + return false; +} + +static inline bool json_key_index_and_push(LOG_JSON_STATE *js, size_t index) { + char *d = js->stack[js->depth]; + if(js->depth > 0) { + *d++ = '_'; + } + + // Convert index to string manually + char temp[32]; + char *t = temp + sizeof(temp) - 1; // Start at the end of the buffer + *t = '\0'; + + do { + *--t = (char)((index % 10) + '0'); + index /= 10; + } while (index > 0); + + size_t remaining = sizeof(js->key) - (d - js->key); + + // Append the index to the key + while (*t) { + if(remaining < 2) { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: key buffer full - keys are too long, at pos %zu", js->pos); + return false; + } + + *d++ = *t++; + remaining--; + } + + *d = '\0'; // Null-terminate the key + js->stack[++js->depth] = d; + + return true; +} + +static inline bool json_parse_array(LOG_JSON_STATE *js) { + if(!json_expect_char_after_white_space(js, "[")) + return false; + + json_consume_char(js); + + size_t index = 0; + do { + if(!json_key_index_and_push(js, index)) + return false; + + if(!json_parse_value(js)) + return false; + + json_key_pop(js); + + if(!json_expect_char_after_white_space(js, ",]")) + return false; + + const char *s = json_current_pos(js); + json_consume_char(js); + if(*s == ',') { + index++; + continue; + } + else // } + break; + + } while(true); + + return true; +} + +static inline bool json_parse_object(LOG_JSON_STATE *js) { + if(!json_expect_char_after_white_space(js, "{")) + return false; + + json_consume_char(js); + + do { + if (!json_expect_char_after_white_space(js, "\"")) + return false; + + if(!json_parse_key_and_push(js)) + return false; + + if(!json_expect_char_after_white_space(js, ":")) + return false; + + json_consume_char(js); + + if(!json_parse_value(js)) + return false; + + json_key_pop(js); + + if(!json_expect_char_after_white_space(js, ",}")) + return false; + + const char *s = json_current_pos(js); + json_consume_char(js); + if(*s == ',') + continue; + else // } + break; + + } while(true); + + return true; +} + +LOG_JSON_STATE *json_parser_create(LOG_JOB *jb) { + LOG_JSON_STATE *js = mallocz(sizeof(LOG_JSON_STATE)); + memset(js, 0, sizeof(LOG_JSON_STATE)); + js->jb = jb; + + if(jb->prefix) + copy_to_buffer(js->key, sizeof(js->key), js->jb->prefix, strlen(js->jb->prefix)); + + js->stack[0] = &js->key[strlen(js->key)]; + + return js; +} + +void json_parser_destroy(LOG_JSON_STATE *js) { + if(js) + freez(js); +} + +const char *json_parser_error(LOG_JSON_STATE *js) { + return js->msg; +} + +bool json_parse_document(LOG_JSON_STATE *js, const char *txt) { + js->line = txt; + js->pos = 0; + js->msg[0] = '\0'; + js->stack[0][0] = '\0'; + js->depth = 0; + + if(!json_parse_object(js)) + return false; + + json_skip_spaces(js); + const char *s = json_current_pos(js); + + if(*s) { + snprintf(js->msg, sizeof(js->msg), + "JSON PARSER: excess characters found after document is finished, at pos %zu", js->pos); + return false; + } + + return true; +} + +void json_test(void) { + LOG_JOB jb = { .prefix = "NIGNX_" }; + LOG_JSON_STATE *json = json_parser_create(&jb); + + json_parse_document(json, "{\"value\":\"\\u\\u039A\\u03B1\\u03BB\\u03B7\\u03BC\\u03AD\\u03C1\\u03B1\"}"); + + json_parser_destroy(json); +} diff --git a/collectors/log2journal/log2journal-logfmt.c b/collectors/log2journal/log2journal-logfmt.c new file mode 100644 index 00000000000000..5966cce901b6a5 --- /dev/null +++ b/collectors/log2journal/log2journal-logfmt.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +#define LOGFMT_ERROR_LINE_MAX 1024 +#define LOGFMT_KEY_MAX 1024 + +struct logfmt_state { + LOG_JOB *jb; + + const char *line; + uint32_t pos; + uint32_t key_start; + + char key[LOGFMT_KEY_MAX]; + char msg[LOGFMT_ERROR_LINE_MAX]; +}; + +#define logfmt_current_pos(lfs) &(lfs)->line[(lfs)->pos] +#define logfmt_consume_char(lfs) ++(lfs)->pos + +static inline void logfmt_process_key_value(LOGFMT_STATE *lfs, const char *value, size_t len) { + log_job_send_extracted_key_value(lfs->jb, lfs->key, value, len); +} + +static inline void logfmt_skip_spaces(LOGFMT_STATE *lfs) { + const char *s = logfmt_current_pos(lfs); + const char *start = s; + + while(isspace(*s)) s++; + + lfs->pos += s - start; +} + +static inline void copy_newline(LOGFMT_STATE *lfs __maybe_unused, char **d, size_t *remaining) { + if(*remaining > 3) { + *(*d)++ = '\\'; + *(*d)++ = 'n'; + (*remaining) -= 2; + } +} + +static inline void copy_tab(LOGFMT_STATE *lfs __maybe_unused, char **d, size_t *remaining) { + if(*remaining > 3) { + *(*d)++ = '\\'; + *(*d)++ = 't'; + (*remaining) -= 2; + } +} + +static inline bool logftm_parse_value(LOGFMT_STATE *lfs) { + static __thread char value[JOURNAL_MAX_VALUE_LEN]; + + char quote = '\0'; + const char *s = logfmt_current_pos(lfs); + if(*s == '\"' || *s == '\'') { + quote = *s; + logfmt_consume_char(lfs); + } + + value[0] = '\0'; + char *d = value; + s = logfmt_current_pos(lfs); + size_t remaining = sizeof(value); + + char end_char = (char)(quote == '\0' ? ' ' : quote); + while (*s && *s != end_char) { + char c; + + if (*s == '\\') { + s++; + + switch (*s) { + case 'n': + copy_newline(lfs, &d, &remaining); + s++; + continue; + + case 't': + copy_tab(lfs, &d, &remaining); + s++; + continue; + + case 'f': + case 'b': + case 'r': + c = ' '; + s++; + break; + + default: + c = *s++; + break; + } + } + else + c = *s++; + + if(remaining < 2) { + snprintf(lfs->msg, sizeof(lfs->msg), + "LOGFMT PARSER: truncated string value at pos %zu", lfs->pos); + return false; + } + else { + *d++ = c; + remaining--; + } + } + *d = '\0'; + lfs->pos += s - logfmt_current_pos(lfs); + + s = logfmt_current_pos(lfs); + + if(quote != '\0') { + if (*s != quote) { + snprintf(lfs->msg, sizeof(lfs->msg), + "LOGFMT PARSER: missing quote at pos %zu: '%s'", + lfs->pos, s); + return false; + } + else + logfmt_consume_char(lfs); + } + + if(d > value) + logfmt_process_key_value(lfs, value, d - value); + + return true; +} + +static inline bool logfmt_parse_key(LOGFMT_STATE *lfs) { + logfmt_skip_spaces(lfs); + + char *d = &lfs->key[lfs->key_start]; + + size_t remaining = sizeof(lfs->key) - (d - lfs->key); + + const char *s = logfmt_current_pos(lfs); + char last_c = '\0'; + while(*s && *s != '=') { + char c; + + if (*s == '\\') + s++; + + c = journal_key_characters_map[(unsigned char)*s++]; + + if(c == '_' && last_c == '_') + continue; + else { + if(remaining < 2) { + snprintf(lfs->msg, sizeof(lfs->msg), + "LOGFMT PARSER: key buffer full - keys are too long, at pos %zu", lfs->pos); + return false; + } + *d++ = c; + remaining--; + } + + last_c = c; + } + *d = '\0'; + lfs->pos += s - logfmt_current_pos(lfs); + + s = logfmt_current_pos(lfs); + if(*s != '=') { + snprintf(lfs->msg, sizeof(lfs->msg), + "LOGFMT PARSER: key is missing the equal sign, at pos %zu", lfs->pos); + return false; + } + + logfmt_consume_char(lfs); + + return true; +} + +LOGFMT_STATE *logfmt_parser_create(LOG_JOB *jb) { + LOGFMT_STATE *lfs = mallocz(sizeof(LOGFMT_STATE)); + memset(lfs, 0, sizeof(LOGFMT_STATE)); + lfs->jb = jb; + + if(jb->prefix) + lfs->key_start = copy_to_buffer(lfs->key, sizeof(lfs->key), lfs->jb->prefix, strlen(lfs->jb->prefix)); + + return lfs; +} + +void logfmt_parser_destroy(LOGFMT_STATE *lfs) { + if(lfs) + freez(lfs); +} + +const char *logfmt_parser_error(LOGFMT_STATE *lfs) { + return lfs->msg; +} + +bool logfmt_parse_document(LOGFMT_STATE *lfs, const char *txt) { + lfs->line = txt; + lfs->pos = 0; + lfs->msg[0] = '\0'; + + const char *s; + do { + if(!logfmt_parse_key(lfs)) + return false; + + if(!logftm_parse_value(lfs)) + return false; + + logfmt_skip_spaces(lfs); + + s = logfmt_current_pos(lfs); + } while(*s); + + return true; +} + + +void logfmt_test(void) { + LOG_JOB jb = { .prefix = "NIGNX_" }; + LOGFMT_STATE *logfmt = logfmt_parser_create(&jb); + + logfmt_parse_document(logfmt, "x=1 y=2 z=\"3 \\ 4\" 5 "); + + logfmt_parser_destroy(logfmt); +} diff --git a/collectors/log2journal/log2journal-params.c b/collectors/log2journal/log2journal-params.c new file mode 100644 index 00000000000000..a7bb3e263c6040 --- /dev/null +++ b/collectors/log2journal/log2journal-params.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +// ---------------------------------------------------------------------------- + +void log_job_init(LOG_JOB *jb) { + memset(jb, 0, sizeof(*jb)); + simple_hashtable_init_KEY(&jb->hashtable, 32); + hashed_key_set(&jb->line.key, "LINE"); +} + +static void simple_hashtable_cleanup_allocated_keys(SIMPLE_HASHTABLE_KEY *ht) { + SIMPLE_HASHTABLE_FOREACH_READ_ONLY(ht, sl, _KEY) { + HASHED_KEY *k = SIMPLE_HASHTABLE_FOREACH_READ_ONLY_VALUE(sl); + if(k && k->flags & HK_HASHTABLE_ALLOCATED) { + // the order of these statements is important! + simple_hashtable_del_slot_KEY(ht, sl); // remove any references to n + hashed_key_cleanup(k); // cleanup the internals of n + freez(k); // free n + } + } +} + +void log_job_cleanup(LOG_JOB *jb) { + hashed_key_cleanup(&jb->line.key); + + if(jb->prefix) { + freez((void *) jb->prefix); + jb->prefix = NULL; + } + + if(jb->pattern) { + freez((void *) jb->pattern); + jb->pattern = NULL; + } + + for(size_t i = 0; i < jb->injections.used ;i++) + injection_cleanup(&jb->injections.keys[i]); + + for(size_t i = 0; i < jb->unmatched.injections.used ;i++) + injection_cleanup(&jb->unmatched.injections.keys[i]); + + for(size_t i = 0; i < jb->renames.used ;i++) + rename_cleanup(&jb->renames.array[i]); + + for(size_t i = 0; i < jb->rewrites.used; i++) + rewrite_cleanup(&jb->rewrites.array[i]); + + txt_cleanup(&jb->rewrites.tmp); + txt_cleanup(&jb->filename.current); + + simple_hashtable_cleanup_allocated_keys(&jb->hashtable); + simple_hashtable_destroy_KEY(&jb->hashtable); + + // remove references to everything else, to reveal them in valgrind + memset(jb, 0, sizeof(*jb)); +} + +// ---------------------------------------------------------------------------- + +bool log_job_filename_key_set(LOG_JOB *jb, const char *key, size_t key_len) { + if(!key || !*key) { + log2stderr("filename key cannot be empty."); + return false; + } + + hashed_key_len_set(&jb->filename.key, key, key_len); + + return true; +} + +bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len) { + if(!prefix || !*prefix) { + log2stderr("filename key cannot be empty."); + return false; + } + + if(jb->prefix) + freez((char*)jb->prefix); + + jb->prefix = strndupz(prefix, prefix_len); + + return true; +} + +bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) { + if(!pattern || !*pattern) { + log2stderr("filename key cannot be empty."); + return false; + } + + if(jb->pattern) + freez((char*)jb->pattern); + + jb->pattern = strndupz(pattern, pattern_len); + + return true; +} + +bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) { + if(jb->filter.include.re) { + log2stderr("FILTER INCLUDE: there is already an include filter set"); + return false; + } + + if(!search_pattern_set(&jb->filter.include, pattern, pattern_len)) { + log2stderr("FILTER INCLUDE: failed: %s", jb->filter.include.error.txt); + return false; + } + + return true; +} + +bool log_job_exclude_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) { + if(jb->filter.exclude.re) { + log2stderr("FILTER INCLUDE: there is already an exclude filter set"); + return false; + } + + if(!search_pattern_set(&jb->filter.exclude, pattern, pattern_len)) { + log2stderr("FILTER EXCLUDE: failed: %s", jb->filter.exclude.error.txt); + return false; + } + + return true; +} + +// ---------------------------------------------------------------------------- + +static bool parse_rename(LOG_JOB *jb, const char *param) { + // Search for '=' in param + const char *equal_sign = strchr(param, '='); + if (!equal_sign || equal_sign == param) { + log2stderr("Error: Invalid rename format, '=' not found in %s", param); + return false; + } + + const char *new_key = param; + size_t new_key_len = equal_sign - new_key; + + const char *old_key = equal_sign + 1; + size_t old_key_len = strlen(old_key); + + return log_job_rename_add(jb, new_key, new_key_len, old_key, old_key_len); +} + +static bool is_symbol(char c) { + return !isalpha(c) && !isdigit(c) && !iscntrl(c); +} + +struct { + const char *keyword; + int action; + RW_FLAGS flag; +} rewrite_flags[] = { + {"match", 1, RW_MATCH_PCRE2}, + {"match", 0, RW_MATCH_NON_EMPTY}, + + {"regex", 1, RW_MATCH_PCRE2}, + {"regex", 0, RW_MATCH_NON_EMPTY}, + + {"pcre2", 1, RW_MATCH_PCRE2}, + {"pcre2", 0, RW_MATCH_NON_EMPTY}, + + {"non_empty", 1, RW_MATCH_NON_EMPTY}, + {"non_empty", 0, RW_MATCH_PCRE2}, + + {"non-empty", 1, RW_MATCH_NON_EMPTY}, + {"non-empty", 0, RW_MATCH_PCRE2}, + + {"not_empty", 1, RW_MATCH_NON_EMPTY}, + {"not_empty", 0, RW_MATCH_PCRE2}, + + {"not-empty", 1, RW_MATCH_NON_EMPTY}, + {"not-empty", 0, RW_MATCH_PCRE2}, + + {"stop", 0, RW_DONT_STOP}, + {"no-stop", 1, RW_DONT_STOP}, + {"no_stop", 1, RW_DONT_STOP}, + {"dont-stop", 1, RW_DONT_STOP}, + {"dont_stop", 1, RW_DONT_STOP}, + {"continue", 1, RW_DONT_STOP}, + {"inject", 1, RW_INJECT}, + {"existing", 0, RW_INJECT}, +}; + +RW_FLAGS parse_rewrite_flags(const char *options) { + RW_FLAGS flags = RW_MATCH_PCRE2; // Default option + + // Tokenize the input options using "," + char *token; + char *optionsCopy = strdup(options); // Make a copy to avoid modifying the original + token = strtok(optionsCopy, ","); + + while (token != NULL) { + // Find the keyword-action mapping + bool found = false; + + for (size_t i = 0; i < sizeof(rewrite_flags) / sizeof(rewrite_flags[0]); i++) { + if (strcmp(token, rewrite_flags[i].keyword) == 0) { + if (rewrite_flags[i].action == 1) { + flags |= rewrite_flags[i].flag; // Set the flag + } else { + flags &= ~rewrite_flags[i].flag; // Unset the flag + } + + found = true; + } + } + + if(!found) + log2stderr("Warning: rewrite options '%s' is not understood.", token); + + // Get the next token + token = strtok(NULL, ","); + } + + free(optionsCopy); // Free the copied string + + return flags; +} + + +static bool parse_rewrite(LOG_JOB *jb, const char *param) { + // Search for '=' in param + const char *equal_sign = strchr(param, '='); + if (!equal_sign || equal_sign == param) { + log2stderr("Error: Invalid rewrite format, '=' not found in %s", param); + return false; + } + + // Get the next character as the separator + char separator = *(equal_sign + 1); + if (!separator || !is_symbol(separator)) { + log2stderr("Error: rewrite separator not found after '=', or is not one of /\\|-# in: %s", param); + return false; + } + + // Find the next occurrence of the separator + const char *second_separator = strchr(equal_sign + 2, separator); + if (!second_separator) { + log2stderr("Error: rewrite second separator not found in: %s", param); + return false; + } + + // Check if the search pattern is empty + if (equal_sign + 1 == second_separator) { + log2stderr("Error: rewrite search pattern is empty in: %s", param); + return false; + } + + // Check if the replacement pattern is empty + if (*(second_separator + 1) == '\0') { + log2stderr("Error: rewrite replacement pattern is empty in: %s", param); + return false; + } + + RW_FLAGS flags = RW_MATCH_PCRE2; + const char *third_separator = strchr(second_separator + 1, separator); + if(third_separator) + flags = parse_rewrite_flags(third_separator + 1); + + // Extract key, search pattern, and replacement pattern + char *key = strndupz(param, equal_sign - param); + char *search_pattern = strndupz(equal_sign + 2, second_separator - (equal_sign + 2)); + char *replace_pattern = third_separator ? strndup(second_separator + 1, third_separator - (second_separator + 1)) : strdupz(second_separator + 1); + + if(!*search_pattern) + flags &= ~RW_MATCH_PCRE2; + + bool ret = log_job_rewrite_add(jb, key, flags, search_pattern, replace_pattern); + + freez(key); + freez(search_pattern); + freez(replace_pattern); + + return ret; +} + +static bool parse_inject(LOG_JOB *jb, const char *value, bool unmatched) { + const char *equal = strchr(value, '='); + if (!equal) { + log2stderr("Error: injection '%s' does not have an equal sign.", value); + return false; + } + + const char *key = value; + const char *val = equal + 1; + log_job_injection_add(jb, key, equal - key, val, strlen(val), unmatched); + + return true; +} + +bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) { + for (int i = 1; i < argc; i++) { + char *arg = argv[i]; + if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) { + log_job_command_line_help(argv[0]); + exit(0); + } +#if defined(NETDATA_DEV_MODE) || defined(NETDATA_INTERNAL_CHECKS) + else if(strcmp(arg, "--test") == 0) { + // logfmt_test(); + json_test(); + exit(1); + } +#endif + else if (strcmp(arg, "--show-config") == 0) { + jb->show_config = true; + } + else { + char buffer[1024]; + char *param = NULL; + char *value = NULL; + + char *equal_sign = strchr(arg, '='); + if (equal_sign) { + copy_to_buffer(buffer, sizeof(buffer), arg, equal_sign - arg); + param = buffer; + value = equal_sign + 1; + } + else { + param = arg; + if (i + 1 < argc) { + value = argv[++i]; + } + else { + if (!jb->pattern) { + log_job_pattern_set(jb, arg, strlen(arg)); + continue; + } else { + log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg); + return false; + } + } + } + + if (strcmp(param, "--filename-key") == 0) { + if(!log_job_filename_key_set(jb, value, value ? strlen(value) : 0)) + return false; + } + else if (strcmp(param, "--prefix") == 0) { + if(!log_job_key_prefix_set(jb, value, value ? strlen(value) : 0)) + return false; + } +#ifdef HAVE_LIBYAML + else if (strcmp(param, "-f") == 0 || strcmp(param, "--file") == 0) { + if (!yaml_parse_file(value, jb)) + return false; + } + else if (strcmp(param, "-c") == 0 || strcmp(param, "--config") == 0) { + if (!yaml_parse_config(value, jb)) + return false; + } +#endif + else if (strcmp(param, "--unmatched-key") == 0) + hashed_key_set(&jb->unmatched.key, value); + else if (strcmp(param, "--inject") == 0) { + if (!parse_inject(jb, value, false)) + return false; + } + else if (strcmp(param, "--inject-unmatched") == 0) { + if (!parse_inject(jb, value, true)) + return false; + } + else if (strcmp(param, "--rewrite") == 0) { + if (!parse_rewrite(jb, value)) + return false; + } + else if (strcmp(param, "--rename") == 0) { + if (!parse_rename(jb, value)) + return false; + } + else if (strcmp(param, "--include") == 0) { + if (!log_job_include_pattern_set(jb, value, strlen(value))) + return false; + } + else if (strcmp(param, "--exclude") == 0) { + if (!log_job_exclude_pattern_set(jb, value, strlen(value))) + return false; + } + else { + i--; + if (!jb->pattern) { + log_job_pattern_set(jb, arg, strlen(arg)); + continue; + } else { + log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg); + return false; + } + } + } + } + + // Check if a pattern is set and exactly one pattern is specified + if (!jb->pattern) { + log2stderr("Warning: pattern not specified. Try the default config with: -c default"); + log_job_command_line_help(argv[0]); + return false; + } + + return true; +} diff --git a/collectors/log2journal/log2journal-pattern.c b/collectors/log2journal/log2journal-pattern.c new file mode 100644 index 00000000000000..4b7e9026b3a4c7 --- /dev/null +++ b/collectors/log2journal/log2journal-pattern.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +void search_pattern_cleanup(SEARCH_PATTERN *sp) { + if(sp->pattern) { + freez((void *)sp->pattern); + sp->pattern = NULL; + } + + if(sp->re) { + pcre2_code_free(sp->re); + sp->re = NULL; + } + + if(sp->match_data) { + pcre2_match_data_free(sp->match_data); + sp->match_data = NULL; + } + + txt_cleanup(&sp->error); +} + +static void pcre2_error_message(SEARCH_PATTERN *sp, int rc, int pos) { + char msg[1024]; + pcre2_get_error_in_buffer(msg, sizeof(msg), rc, pos); + txt_replace(&sp->error, msg, strlen(msg)); +} + +static inline bool compile_pcre2(SEARCH_PATTERN *sp) { + int error_number; + PCRE2_SIZE error_offset; + PCRE2_SPTR pattern_ptr = (PCRE2_SPTR)sp->pattern; + + sp->re = pcre2_compile(pattern_ptr, PCRE2_ZERO_TERMINATED, 0, &error_number, &error_offset, NULL); + if (!sp->re) { + pcre2_error_message(sp, error_number, (int) error_offset); + return false; + } + + return true; +} + +bool search_pattern_set(SEARCH_PATTERN *sp, const char *search_pattern, size_t search_pattern_len) { + search_pattern_cleanup(sp); + + sp->pattern = strndupz(search_pattern, search_pattern_len); + if (!compile_pcre2(sp)) + return false; + + sp->match_data = pcre2_match_data_create_from_pattern(sp->re, NULL); + + return true; +} diff --git a/collectors/log2journal/log2journal-pcre2.c b/collectors/log2journal/log2journal-pcre2.c new file mode 100644 index 00000000000000..185e6910864aa4 --- /dev/null +++ b/collectors/log2journal/log2journal-pcre2.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +#define PCRE2_ERROR_LINE_MAX 1024 +#define PCRE2_KEY_MAX 1024 + +struct pcre2_state { + LOG_JOB *jb; + + const char *line; + uint32_t pos; + uint32_t key_start; + + pcre2_code *re; + pcre2_match_data *match_data; + + char key[PCRE2_KEY_MAX]; + char msg[PCRE2_ERROR_LINE_MAX]; +}; + +static inline void copy_and_convert_key(PCRE2_STATE *pcre2, const char *key) { + char *d = &pcre2->key[pcre2->key_start]; + size_t remaining = sizeof(pcre2->key) - pcre2->key_start; + + while(remaining >= 2 && *key) { + *d = journal_key_characters_map[(unsigned) (*key)]; + remaining--; + key++; + d++; + } + + *d = '\0'; +} + +static inline void jb_traverse_pcre2_named_groups_and_send_keys(PCRE2_STATE *pcre2, pcre2_code *re, pcre2_match_data *match_data, char *line) { + PCRE2_SIZE *ovector = pcre2_get_ovector_pointer(match_data); + uint32_t names_count; + pcre2_pattern_info(re, PCRE2_INFO_NAMECOUNT, &names_count); + + if (names_count > 0) { + PCRE2_SPTR name_table; + pcre2_pattern_info(re, PCRE2_INFO_NAMETABLE, &name_table); + uint32_t name_entry_size; + pcre2_pattern_info(re, PCRE2_INFO_NAMEENTRYSIZE, &name_entry_size); + + const unsigned char *table_ptr = name_table; + for (uint32_t i = 0; i < names_count; i++) { + int n = (table_ptr[0] << 8) | table_ptr[1]; + const char *group_name = (const char *)(table_ptr + 2); + + PCRE2_SIZE start_offset = ovector[2 * n]; + PCRE2_SIZE end_offset = ovector[2 * n + 1]; + PCRE2_SIZE group_length = end_offset - start_offset; + + copy_and_convert_key(pcre2, group_name); + log_job_send_extracted_key_value(pcre2->jb, pcre2->key, line + start_offset, group_length); + + table_ptr += name_entry_size; + } + } +} + +void pcre2_get_error_in_buffer(char *msg, size_t msg_len, int rc, int pos) { + int l; + + if(pos >= 0) + l = snprintf(msg, msg_len, "PCRE2 error %d at pos %d on: ", rc, pos); + else + l = snprintf(msg, msg_len, "PCRE2 error %d on: ", rc); + + pcre2_get_error_message(rc, (PCRE2_UCHAR *)&msg[l], msg_len - l); +} + +static void pcre2_error_message(PCRE2_STATE *pcre2, int rc, int pos) { + pcre2_get_error_in_buffer(pcre2->msg, sizeof(pcre2->msg), rc, pos); +} + +bool pcre2_has_error(PCRE2_STATE *pcre2) { + return !pcre2->re || pcre2->msg[0]; +} + +PCRE2_STATE *pcre2_parser_create(LOG_JOB *jb) { + PCRE2_STATE *pcre2 = mallocz(sizeof(PCRE2_STATE)); + memset(pcre2, 0, sizeof(PCRE2_STATE)); + pcre2->jb = jb; + + if(jb->prefix) + pcre2->key_start = copy_to_buffer(pcre2->key, sizeof(pcre2->key), pcre2->jb->prefix, strlen(pcre2->jb->prefix)); + + int rc; + PCRE2_SIZE pos; + pcre2->re = pcre2_compile((PCRE2_SPTR)jb->pattern, PCRE2_ZERO_TERMINATED, 0, &rc, &pos, NULL); + if (!pcre2->re) { + pcre2_error_message(pcre2, rc, pos); + return pcre2; + } + + pcre2->match_data = pcre2_match_data_create_from_pattern(pcre2->re, NULL); + + return pcre2; +} + +void pcre2_parser_destroy(PCRE2_STATE *pcre2) { + if(pcre2) + freez(pcre2); +} + +const char *pcre2_parser_error(PCRE2_STATE *pcre2) { + return pcre2->msg; +} + +bool pcre2_parse_document(PCRE2_STATE *pcre2, const char *txt, size_t len) { + pcre2->line = txt; + pcre2->pos = 0; + pcre2->msg[0] = '\0'; + + if(!len) + len = strlen(txt); + + int rc = pcre2_match(pcre2->re, (PCRE2_SPTR)pcre2->line, len, 0, 0, pcre2->match_data, NULL); + if(rc < 0) { + pcre2_error_message(pcre2, rc, -1); + return false; + } + + jb_traverse_pcre2_named_groups_and_send_keys(pcre2, pcre2->re, pcre2->match_data, (char *)pcre2->line); + + return true; +} + +void pcre2_test(void) { + LOG_JOB jb = { .prefix = "NIGNX_" }; + PCRE2_STATE *pcre2 = pcre2_parser_create(&jb); + + pcre2_parse_document(pcre2, "{\"value\":\"\\u\\u039A\\u03B1\\u03BB\\u03B7\\u03BC\\u03AD\\u03C1\\u03B1\"}", 0); + + pcre2_parser_destroy(pcre2); +} diff --git a/collectors/log2journal/log2journal-rename.c b/collectors/log2journal/log2journal-rename.c new file mode 100644 index 00000000000000..c6975779f6885d --- /dev/null +++ b/collectors/log2journal/log2journal-rename.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +void rename_cleanup(RENAME *rn) { + hashed_key_cleanup(&rn->new_key); + hashed_key_cleanup(&rn->old_key); +} + +bool log_job_rename_add(LOG_JOB *jb, const char *new_key, size_t new_key_len, const char *old_key, size_t old_key_len) { + if(jb->renames.used >= MAX_RENAMES) { + log2stderr("Error: too many renames. You can rename up to %d fields.", MAX_RENAMES); + return false; + } + + RENAME *rn = &jb->renames.array[jb->renames.used++]; + hashed_key_len_set(&rn->new_key, new_key, new_key_len); + hashed_key_len_set(&rn->old_key, old_key, old_key_len); + + return true; +} diff --git a/collectors/log2journal/log2journal-replace.c b/collectors/log2journal/log2journal-replace.c new file mode 100644 index 00000000000000..429d615da5919f --- /dev/null +++ b/collectors/log2journal/log2journal-replace.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +void replace_node_free(REPLACE_NODE *rpn) { + hashed_key_cleanup(&rpn->name); + rpn->next = NULL; + freez(rpn); +} + +void replace_pattern_cleanup(REPLACE_PATTERN *rp) { + if(rp->pattern) { + freez((void *)rp->pattern); + rp->pattern = NULL; + } + + while(rp->nodes) { + REPLACE_NODE *rpn = rp->nodes; + rp->nodes = rpn->next; + replace_node_free(rpn); + } +} + +static REPLACE_NODE *replace_pattern_add_node(REPLACE_NODE **head, bool is_variable, const char *text) { + REPLACE_NODE *new_node = callocz(1, sizeof(REPLACE_NODE)); + if (!new_node) + return NULL; + + hashed_key_set(&new_node->name, text); + new_node->is_variable = is_variable; + new_node->next = NULL; + + if (*head == NULL) + *head = new_node; + + else { + REPLACE_NODE *current = *head; + + // append it + while (current->next != NULL) + current = current->next; + + current->next = new_node; + } + + return new_node; +} + +bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern) { + replace_pattern_cleanup(rp); + + rp->pattern = strdupz(pattern); + const char *current = rp->pattern; + + while (*current != '\0') { + if (*current == '$' && *(current + 1) == '{') { + // Start of a variable + const char *end = strchr(current, '}'); + if (!end) { + log2stderr("Error: Missing closing brace in replacement pattern: %s", rp->pattern); + return false; + } + + size_t name_length = end - current - 2; // Length of the variable name + char *variable_name = strndupz(current + 2, name_length); + if (!variable_name) { + log2stderr("Error: Memory allocation failed for variable name."); + return false; + } + + REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), true, variable_name); + if (!node) { + freez(variable_name); + log2stderr("Error: Failed to add replacement node for variable."); + return false; + } + + current = end + 1; // Move past the variable + } + else { + // Start of literal text + const char *start = current; + while (*current != '\0' && !(*current == '$' && *(current + 1) == '{')) { + current++; + } + + size_t text_length = current - start; + char *text = strndupz(start, text_length); + if (!text) { + log2stderr("Error: Memory allocation failed for literal text."); + return false; + } + + REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), false, text); + if (!node) { + freez(text); + log2stderr("Error: Failed to add replacement node for text."); + return false; + } + } + } + + for(REPLACE_NODE *node = rp->nodes; node; node = node->next) { + if(node->is_variable) { + rp->has_variables = true; + break; + } + } + + return true; +} diff --git a/collectors/log2journal/log2journal-rewrite.c b/collectors/log2journal/log2journal-rewrite.c new file mode 100644 index 00000000000000..112391bf035191 --- /dev/null +++ b/collectors/log2journal/log2journal-rewrite.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +void rewrite_cleanup(REWRITE *rw) { + hashed_key_cleanup(&rw->key); + + if(rw->flags & RW_MATCH_PCRE2) + search_pattern_cleanup(&rw->match_pcre2); + else if(rw->flags & RW_MATCH_NON_EMPTY) + replace_pattern_cleanup(&rw->match_non_empty); + + replace_pattern_cleanup(&rw->value); + rw->flags = RW_NONE; +} + +bool log_job_rewrite_add(LOG_JOB *jb, const char *key, RW_FLAGS flags, const char *search_pattern, const char *replace_pattern) { + if(jb->rewrites.used >= MAX_REWRITES) { + log2stderr("Error: too many rewrites. You can add up to %d rewrite rules.", MAX_REWRITES); + return false; + } + + if((flags & (RW_MATCH_PCRE2|RW_MATCH_NON_EMPTY)) && (!search_pattern || !*search_pattern)) { + log2stderr("Error: rewrite for key '%s' does not specify a search pattern.", key); + return false; + } + + REWRITE *rw = &jb->rewrites.array[jb->rewrites.used++]; + rw->flags = flags; + + hashed_key_set(&rw->key, key); + + if((flags & RW_MATCH_PCRE2) && !search_pattern_set(&rw->match_pcre2, search_pattern, strlen(search_pattern))) { + rewrite_cleanup(rw); + jb->rewrites.used--; + return false; + } + else if((flags & RW_MATCH_NON_EMPTY) && !replace_pattern_set(&rw->match_non_empty, search_pattern)) { + rewrite_cleanup(rw); + jb->rewrites.used--; + return false; + } + + if(replace_pattern && *replace_pattern && !replace_pattern_set(&rw->value, replace_pattern)) { + rewrite_cleanup(rw); + jb->rewrites.used--; + return false; + } + + return true; +} diff --git a/collectors/log2journal/log2journal-yaml.c b/collectors/log2journal/log2journal-yaml.c new file mode 100644 index 00000000000000..862e7bf4b77a70 --- /dev/null +++ b/collectors/log2journal/log2journal-yaml.c @@ -0,0 +1,964 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +// ---------------------------------------------------------------------------- +// yaml configuration file + +#ifdef HAVE_LIBYAML + +static const char *yaml_event_name(yaml_event_type_t type) { + switch (type) { + case YAML_NO_EVENT: + return "YAML_NO_EVENT"; + + case YAML_SCALAR_EVENT: + return "YAML_SCALAR_EVENT"; + + case YAML_ALIAS_EVENT: + return "YAML_ALIAS_EVENT"; + + case YAML_MAPPING_START_EVENT: + return "YAML_MAPPING_START_EVENT"; + + case YAML_MAPPING_END_EVENT: + return "YAML_MAPPING_END_EVENT"; + + case YAML_SEQUENCE_START_EVENT: + return "YAML_SEQUENCE_START_EVENT"; + + case YAML_SEQUENCE_END_EVENT: + return "YAML_SEQUENCE_END_EVENT"; + + case YAML_STREAM_START_EVENT: + return "YAML_STREAM_START_EVENT"; + + case YAML_STREAM_END_EVENT: + return "YAML_STREAM_END_EVENT"; + + case YAML_DOCUMENT_START_EVENT: + return "YAML_DOCUMENT_START_EVENT"; + + case YAML_DOCUMENT_END_EVENT: + return "YAML_DOCUMENT_END_EVENT"; + + default: + return "UNKNOWN"; + } +} + +#define yaml_error(parser, event, fmt, args...) yaml_error_with_trace(parser, event, __LINE__, __FUNCTION__, __FILE__, fmt, ##args) +static void yaml_error_with_trace(yaml_parser_t *parser, yaml_event_t *event, size_t line, const char *function, const char *file, const char *format, ...) __attribute__ ((format(__printf__, 6, 7))); +static void yaml_error_with_trace(yaml_parser_t *parser, yaml_event_t *event, size_t line, const char *function, const char *file, const char *format, ...) { + char buf[1024] = ""; // Initialize buf to an empty string + const char *type = ""; + + if(event) { + type = yaml_event_name(event->type); + + switch (event->type) { + case YAML_SCALAR_EVENT: + copy_to_buffer(buf, sizeof(buf), (char *)event->data.scalar.value, event->data.scalar.length); + break; + + case YAML_ALIAS_EVENT: + snprintf(buf, sizeof(buf), "%s", event->data.alias.anchor); + break; + + default: + break; + } + } + + fprintf(stderr, "YAML %zu@%s, %s(): (line %d, column %d, %s%s%s): ", + line, file, function, + (int)(parser->mark.line + 1), (int)(parser->mark.column + 1), + type, buf[0]? ", near ": "", buf); + + va_list args; + va_start(args, format); + vfprintf(stderr, format, args); + va_end(args); + fprintf(stderr, "\n"); +} + +#define yaml_parse(parser, event) yaml_parse_with_trace(parser, event, __LINE__, __FUNCTION__, __FILE__) +static bool yaml_parse_with_trace(yaml_parser_t *parser, yaml_event_t *event, size_t line __maybe_unused, const char *function __maybe_unused, const char *file __maybe_unused) { + if (!yaml_parser_parse(parser, event)) { + yaml_error(parser, NULL, "YAML parser error %d", parser->error); + return false; + } + +// fprintf(stderr, ">>> %s >>> %.*s\n", +// yaml_event_name(event->type), +// event->type == YAML_SCALAR_EVENT ? event->data.scalar.length : 0, +// event->type == YAML_SCALAR_EVENT ? (char *)event->data.scalar.value : ""); + + return true; +} + +#define yaml_parse_expect_event(parser, type) yaml_parse_expect_event_with_trace(parser, type, __LINE__, __FUNCTION__, __FILE__) +static bool yaml_parse_expect_event_with_trace(yaml_parser_t *parser, yaml_event_type_t type, size_t line, const char *function, const char *file) { + yaml_event_t event; + if (!yaml_parse(parser, &event)) + return false; + + bool ret = true; + if(event.type != type) { + yaml_error_with_trace(parser, &event, line, function, file, "unexpected event - expecting: %s", yaml_event_name(type)); + ret = false; + } +// else +// fprintf(stderr, "OK (%zu@%s, %s()\n", line, file, function); + + yaml_event_delete(&event); + return ret; +} + +#define yaml_scalar_matches(event, s, len) yaml_scalar_matches_with_trace(event, s, len, __LINE__, __FUNCTION__, __FILE__) +static bool yaml_scalar_matches_with_trace(yaml_event_t *event, const char *s, size_t len, size_t line __maybe_unused, const char *function __maybe_unused, const char *file __maybe_unused) { + if(event->type != YAML_SCALAR_EVENT) + return false; + + if(len != event->data.scalar.length) + return false; +// else +// fprintf(stderr, "OK (%zu@%s, %s()\n", line, file, function); + + return strcmp((char *)event->data.scalar.value, s) == 0; +} + +// ---------------------------------------------------------------------------- + +static size_t yaml_parse_filename_injection(yaml_parser_t *parser, LOG_JOB *jb) { + yaml_event_t event; + size_t errors = 0; + + if(!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT)) + return 1; + + if (!yaml_parse(parser, &event)) + return 1; + + if (yaml_scalar_matches(&event, "key", strlen("key"))) { + yaml_event_t sub_event; + if (!yaml_parse(parser, &sub_event)) + errors++; + + else { + if (sub_event.type == YAML_SCALAR_EVENT) { + if(!log_job_filename_key_set(jb, (char *) sub_event.data.scalar.value, + sub_event.data.scalar.length)) + errors++; + } + + else { + yaml_error(parser, &sub_event, "expected the filename as %s", yaml_event_name(YAML_SCALAR_EVENT)); + errors++; + } + + yaml_event_delete(&sub_event); + } + } + + if(!yaml_parse_expect_event(parser, YAML_MAPPING_END_EVENT)) + errors++; + + yaml_event_delete(&event); + return errors; +} + +static size_t yaml_parse_filters(yaml_parser_t *parser, LOG_JOB *jb) { + if(!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT)) + return 1; + + size_t errors = 0; + bool finished = false; + + while(!errors && !finished) { + yaml_event_t event; + + if(!yaml_parse(parser, &event)) + return 1; + + if(event.type == YAML_SCALAR_EVENT) { + if(yaml_scalar_matches(&event, "include", strlen("include"))) { + yaml_event_t sub_event; + if(!yaml_parse(parser, &sub_event)) + errors++; + + else { + if(sub_event.type == YAML_SCALAR_EVENT) { + if(!log_job_include_pattern_set(jb, (char *) sub_event.data.scalar.value, + sub_event.data.scalar.length)) + errors++; + } + + else { + yaml_error(parser, &sub_event, "expected the include as %s", + yaml_event_name(YAML_SCALAR_EVENT)); + errors++; + } + + yaml_event_delete(&sub_event); + } + } + else if(yaml_scalar_matches(&event, "exclude", strlen("exclude"))) { + yaml_event_t sub_event; + if(!yaml_parse(parser, &sub_event)) + errors++; + + else { + if(sub_event.type == YAML_SCALAR_EVENT) { + if(!log_job_exclude_pattern_set(jb,(char *) sub_event.data.scalar.value, + sub_event.data.scalar.length)) + errors++; + } + + else { + yaml_error(parser, &sub_event, "expected the exclude as %s", + yaml_event_name(YAML_SCALAR_EVENT)); + errors++; + } + + yaml_event_delete(&sub_event); + } + } + } + else if(event.type == YAML_MAPPING_END_EVENT) + finished = true; + else { + yaml_error(parser, &event, "expected %s or %s", + yaml_event_name(YAML_SCALAR_EVENT), + yaml_event_name(YAML_MAPPING_END_EVENT)); + errors++; + } + + yaml_event_delete(&event); + } + + return errors; +} + +static size_t yaml_parse_prefix(yaml_parser_t *parser, LOG_JOB *jb) { + yaml_event_t event; + size_t errors = 0; + + if (!yaml_parse(parser, &event)) + return 1; + + if (event.type == YAML_SCALAR_EVENT) { + if(!log_job_key_prefix_set(jb, (char *) event.data.scalar.value, event.data.scalar.length)) + errors++; + } + + yaml_event_delete(&event); + return errors; +} + +static bool yaml_parse_constant_field_injection(yaml_parser_t *parser, LOG_JOB *jb, bool unmatched) { + yaml_event_t event; + if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &event, "Expected scalar for constant field injection key"); + yaml_event_delete(&event); + return false; + } + + char *key = strndupz((char *)event.data.scalar.value, event.data.scalar.length); + char *value = NULL; + bool ret = false; + + yaml_event_delete(&event); + + if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &event, "Expected scalar for constant field injection value"); + goto cleanup; + } + + if(!yaml_scalar_matches(&event, "value", strlen("value"))) { + yaml_error(parser, &event, "Expected scalar 'value'"); + goto cleanup; + } + + if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &event, "Expected scalar for constant field injection value"); + goto cleanup; + } + + value = strndupz((char *)event.data.scalar.value, event.data.scalar.length); + + if(!log_job_injection_add(jb, key, strlen(key), value, strlen(value), unmatched)) + ret = false; + else + ret = true; + + ret = true; + +cleanup: + yaml_event_delete(&event); + freez(key); + freez(value); + return !ret ? 1 : 0; +} + +static bool yaml_parse_injection_mapping(yaml_parser_t *parser, LOG_JOB *jb, bool unmatched) { + yaml_event_t event; + size_t errors = 0; + bool finished = false; + + while (!errors && !finished) { + if (!yaml_parse(parser, &event)) { + errors++; + continue; + } + + switch (event.type) { + case YAML_SCALAR_EVENT: + if (yaml_scalar_matches(&event, "key", strlen("key"))) { + errors += yaml_parse_constant_field_injection(parser, jb, unmatched); + } else { + yaml_error(parser, &event, "Unexpected scalar in injection mapping"); + errors++; + } + break; + + case YAML_MAPPING_END_EVENT: + finished = true; + break; + + default: + yaml_error(parser, &event, "Unexpected event in injection mapping"); + errors++; + break; + } + + yaml_event_delete(&event); + } + + return errors == 0; +} + +static size_t yaml_parse_injections(yaml_parser_t *parser, LOG_JOB *jb, bool unmatched) { + yaml_event_t event; + size_t errors = 0; + bool finished = false; + + if (!yaml_parse_expect_event(parser, YAML_SEQUENCE_START_EVENT)) + return 1; + + while (!errors && !finished) { + if (!yaml_parse(parser, &event)) { + errors++; + continue; + } + + switch (event.type) { + case YAML_MAPPING_START_EVENT: + if (!yaml_parse_injection_mapping(parser, jb, unmatched)) + errors++; + break; + + case YAML_SEQUENCE_END_EVENT: + finished = true; + break; + + default: + yaml_error(parser, &event, "Unexpected event in injections sequence"); + errors++; + break; + } + + yaml_event_delete(&event); + } + + return errors; +} + +static size_t yaml_parse_unmatched(yaml_parser_t *parser, LOG_JOB *jb) { + size_t errors = 0; + bool finished = false; + + if (!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT)) + return 1; + + while (!errors && !finished) { + yaml_event_t event; + if (!yaml_parse(parser, &event)) { + errors++; + continue; + } + + switch (event.type) { + case YAML_SCALAR_EVENT: + if (yaml_scalar_matches(&event, "key", strlen("key"))) { + yaml_event_t sub_event; + if (!yaml_parse(parser, &sub_event)) { + errors++; + } else { + if (sub_event.type == YAML_SCALAR_EVENT) { + hashed_key_len_set(&jb->unmatched.key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + } else { + yaml_error(parser, &sub_event, "expected a scalar value for 'key'"); + errors++; + } + yaml_event_delete(&sub_event); + } + } else if (yaml_scalar_matches(&event, "inject", strlen("inject"))) { + errors += yaml_parse_injections(parser, jb, true); + } else { + yaml_error(parser, &event, "Unexpected scalar in unmatched section"); + errors++; + } + break; + + case YAML_MAPPING_END_EVENT: + finished = true; + break; + + default: + yaml_error(parser, &event, "Unexpected event in unmatched section"); + errors++; + break; + } + + yaml_event_delete(&event); + } + + return errors; +} + +static size_t yaml_parse_rewrites(yaml_parser_t *parser, LOG_JOB *jb) { + size_t errors = 0; + + if (!yaml_parse_expect_event(parser, YAML_SEQUENCE_START_EVENT)) + return 1; + + bool finished = false; + while (!errors && !finished) { + yaml_event_t event; + if (!yaml_parse(parser, &event)) { + errors++; + continue; + } + + switch (event.type) { + case YAML_MAPPING_START_EVENT: + { + RW_FLAGS flags = RW_NONE; + char *key = NULL; + char *search_pattern = NULL; + char *replace_pattern = NULL; + + bool mapping_finished = false; + while (!errors && !mapping_finished) { + yaml_event_t sub_event; + if (!yaml_parse(parser, &sub_event)) { + errors++; + continue; + } + + switch (sub_event.type) { + case YAML_SCALAR_EVENT: + if (yaml_scalar_matches(&sub_event, "key", strlen("key"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rewrite key"); + errors++; + } else { + key = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + yaml_event_delete(&sub_event); + } + } else if (yaml_scalar_matches(&sub_event, "match", strlen("match"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rewrite match PCRE2 pattern"); + errors++; + } + else { + if(search_pattern) + freez(search_pattern); + flags |= RW_MATCH_PCRE2; + flags &= ~RW_MATCH_NON_EMPTY; + search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + yaml_event_delete(&sub_event); + } + } else if (yaml_scalar_matches(&sub_event, "not_empty", strlen("not_empty"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rewrite not empty condition"); + errors++; + } + else { + if(search_pattern) + freez(search_pattern); + flags |= RW_MATCH_NON_EMPTY; + flags &= ~RW_MATCH_PCRE2; + search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + yaml_event_delete(&sub_event); + } + } else if (yaml_scalar_matches(&sub_event, "value", strlen("value"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rewrite value"); + errors++; + } else { + replace_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + yaml_event_delete(&sub_event); + } + } else if (yaml_scalar_matches(&sub_event, "stop", strlen("stop"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rewrite stop boolean"); + errors++; + } else { + if(strncmp((char*)sub_event.data.scalar.value, "no", 2) == 0 || + strncmp((char*)sub_event.data.scalar.value, "false", 5) == 0) + flags |= RW_DONT_STOP; + else + flags &= ~RW_DONT_STOP; + + yaml_event_delete(&sub_event); + } + } else if (yaml_scalar_matches(&sub_event, "inject", strlen("inject"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rewrite inject boolean"); + errors++; + } else { + if(strncmp((char*)sub_event.data.scalar.value, "yes", 3) == 0 || + strncmp((char*)sub_event.data.scalar.value, "true", 4) == 0) + flags |= RW_INJECT; + else + flags &= ~RW_INJECT; + + yaml_event_delete(&sub_event); + } + } else { + yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping"); + errors++; + } + break; + + case YAML_MAPPING_END_EVENT: + if(key) { + if (!log_job_rewrite_add(jb, key, flags, search_pattern, replace_pattern)) + errors++; + } + + freez(key); + key = NULL; + + freez(search_pattern); + search_pattern = NULL; + + freez(replace_pattern); + replace_pattern = NULL; + + flags = RW_NONE; + + mapping_finished = true; + break; + + default: + yaml_error(parser, &sub_event, "Unexpected event in rewrite mapping"); + errors++; + break; + } + + yaml_event_delete(&sub_event); + } + } + break; + + case YAML_SEQUENCE_END_EVENT: + finished = true; + break; + + default: + yaml_error(parser, &event, "Unexpected event in rewrites sequence"); + errors++; + break; + } + + yaml_event_delete(&event); + } + + return errors; +} + +static size_t yaml_parse_renames(yaml_parser_t *parser, LOG_JOB *jb) { + size_t errors = 0; + + if (!yaml_parse_expect_event(parser, YAML_SEQUENCE_START_EVENT)) + return 1; + + bool finished = false; + while (!errors && !finished) { + yaml_event_t event; + if (!yaml_parse(parser, &event)) { + errors++; + continue; + } + + switch (event.type) { + case YAML_MAPPING_START_EVENT: + { + struct key_rename rn = { 0 }; + + bool mapping_finished = false; + while (!errors && !mapping_finished) { + yaml_event_t sub_event; + if (!yaml_parse(parser, &sub_event)) { + errors++; + continue; + } + + switch (sub_event.type) { + case YAML_SCALAR_EVENT: + if (yaml_scalar_matches(&sub_event, "new_key", strlen("new_key"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rename new_key"); + errors++; + } else { + hashed_key_len_set(&rn.new_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + yaml_event_delete(&sub_event); + } + } else if (yaml_scalar_matches(&sub_event, "old_key", strlen("old_key"))) { + if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &sub_event, "Expected scalar for rename old_key"); + errors++; + } else { + hashed_key_len_set(&rn.old_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + yaml_event_delete(&sub_event); + } + } else { + yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping"); + errors++; + } + break; + + case YAML_MAPPING_END_EVENT: + if(rn.old_key.key && rn.new_key.key) { + if (!log_job_rename_add(jb, rn.new_key.key, rn.new_key.len, + rn.old_key.key, rn.old_key.len)) + errors++; + } + rename_cleanup(&rn); + + mapping_finished = true; + break; + + default: + yaml_error(parser, &sub_event, "Unexpected event in rewrite mapping"); + errors++; + break; + } + + yaml_event_delete(&sub_event); + } + } + break; + + case YAML_SEQUENCE_END_EVENT: + finished = true; + break; + + default: + yaml_error(parser, &event, "Unexpected event in rewrites sequence"); + errors++; + break; + } + + yaml_event_delete(&event); + } + + return errors; +} + +static size_t yaml_parse_pattern(yaml_parser_t *parser, LOG_JOB *jb) { + yaml_event_t event; + size_t errors = 0; + + if (!yaml_parse(parser, &event)) + return 1; + + if(event.type == YAML_SCALAR_EVENT) + log_job_pattern_set(jb, (char *) event.data.scalar.value, event.data.scalar.length); + else { + yaml_error(parser, &event, "unexpected event type"); + errors++; + } + + yaml_event_delete(&event); + return errors; +} + +static size_t yaml_parse_initialized(yaml_parser_t *parser, LOG_JOB *jb) { + size_t errors = 0; + + if(!yaml_parse_expect_event(parser, YAML_STREAM_START_EVENT)) { + errors++; + goto cleanup; + } + + if(!yaml_parse_expect_event(parser, YAML_DOCUMENT_START_EVENT)) { + errors++; + goto cleanup; + } + + if(!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT)) { + errors++; + goto cleanup; + } + + bool finished = false; + while (!errors && !finished) { + yaml_event_t event; + if(!yaml_parse(parser, &event)) { + errors++; + continue; + } + + switch(event.type) { + default: + yaml_error(parser, &event, "unexpected type"); + errors++; + break; + + case YAML_MAPPING_END_EVENT: + finished = true; + break; + + case YAML_SCALAR_EVENT: + if (yaml_scalar_matches(&event, "pattern", strlen("pattern"))) + errors += yaml_parse_pattern(parser, jb); + + else if (yaml_scalar_matches(&event, "prefix", strlen("prefix"))) + errors += yaml_parse_prefix(parser, jb); + + else if (yaml_scalar_matches(&event, "filename", strlen("filename"))) + errors += yaml_parse_filename_injection(parser, jb); + + else if (yaml_scalar_matches(&event, "filter", strlen("filter"))) + errors += yaml_parse_filters(parser, jb); + + else if (yaml_scalar_matches(&event, "inject", strlen("inject"))) + errors += yaml_parse_injections(parser, jb, false); + + else if (yaml_scalar_matches(&event, "unmatched", strlen("unmatched"))) + errors += yaml_parse_unmatched(parser, jb); + + else if (yaml_scalar_matches(&event, "rewrite", strlen("rewrite"))) + errors += yaml_parse_rewrites(parser, jb); + + else if (yaml_scalar_matches(&event, "rename", strlen("rename"))) + errors += yaml_parse_renames(parser, jb); + + else { + yaml_error(parser, &event, "unexpected scalar"); + errors++; + } + break; + } + + yaml_event_delete(&event); + } + + if(!errors && !yaml_parse_expect_event(parser, YAML_DOCUMENT_END_EVENT)) { + errors++; + goto cleanup; + } + + if(!errors && !yaml_parse_expect_event(parser, YAML_STREAM_END_EVENT)) { + errors++; + goto cleanup; + } + +cleanup: + return errors; +} + +bool yaml_parse_file(const char *config_file_path, LOG_JOB *jb) { + if(!config_file_path || !*config_file_path) { + log2stderr("yaml configuration filename cannot be empty."); + return false; + } + + FILE *fp = fopen(config_file_path, "r"); + if (!fp) { + log2stderr("Error opening config file: %s", config_file_path); + return false; + } + + yaml_parser_t parser; + yaml_parser_initialize(&parser); + yaml_parser_set_input_file(&parser, fp); + + size_t errors = yaml_parse_initialized(&parser, jb); + + yaml_parser_delete(&parser); + fclose(fp); + return errors == 0; +} + +bool yaml_parse_config(const char *config_name, LOG_JOB *jb) { + char filename[FILENAME_MAX + 1]; + + snprintf(filename, sizeof(filename), "%s/%s.yaml", LOG2JOURNAL_CONFIG_PATH, config_name); + return yaml_parse_file(filename, jb); +} + +#endif // HAVE_LIBYAML + +// ---------------------------------------------------------------------------- +// printing yaml + +static void yaml_print_multiline_value(const char *s, size_t depth) { + if (!s) + s = ""; + + do { + const char* next = strchr(s, '\n'); + if(next) next++; + + size_t len = next ? (size_t)(next - s) : strlen(s); + char buf[len + 1]; + copy_to_buffer(buf, sizeof(buf), s, len); + + fprintf(stderr, "%.*s%s%s", + (int)(depth * 2), " ", + buf, next ? "" : "\n"); + + s = next; + } while(s && *s); +} + +static bool needs_quotes_in_yaml(const char *str) { + // Lookup table for special YAML characters + static bool special_chars[256] = { false }; + static bool table_initialized = false; + + if (!table_initialized) { + // Initialize the lookup table + const char *special_chars_str = ":{}[],&*!|>'\"%@`^"; + for (const char *c = special_chars_str; *c; ++c) { + special_chars[(unsigned char)*c] = true; + } + table_initialized = true; + } + + while (*str) { + if (special_chars[(unsigned char)*str]) { + return true; + } + str++; + } + return false; +} + +static void yaml_print_node(const char *key, const char *value, size_t depth, bool dash) { + if(depth > 10) depth = 10; + const char *quote = "'"; + + const char *second_line = NULL; + if(value && strchr(value, '\n')) { + second_line = value; + value = "|"; + quote = ""; + } + else if(!value || !needs_quotes_in_yaml(value)) + quote = ""; + + fprintf(stderr, "%.*s%s%s%s%s%s%s\n", + (int)(depth * 2), " ", dash ? "- ": "", + key ? key : "", key ? ": " : "", + quote, value ? value : "", quote); + + if(second_line) { + yaml_print_multiline_value(second_line, depth + 1); + } +} + +void log_job_configuration_to_yaml(LOG_JOB *jb) { + if(jb->pattern) + yaml_print_node("pattern", jb->pattern, 0, false); + + if(jb->prefix) { + fprintf(stderr, "\n"); + yaml_print_node("prefix", jb->prefix, 0, false); + } + + if(jb->filename.key.key) { + fprintf(stderr, "\n"); + yaml_print_node("filename", NULL, 0, false); + yaml_print_node("key", jb->filename.key.key, 1, false); + } + + if(jb->filter.include.pattern || jb->filter.exclude.pattern) { + fprintf(stderr, "\n"); + yaml_print_node("filter", NULL, 0, false); + + if(jb->filter.include.pattern) + yaml_print_node("include", jb->filter.include.pattern, 1, false); + + if(jb->filter.exclude.pattern) + yaml_print_node("exclude", jb->filter.exclude.pattern, 1, false); + } + + if(jb->renames.used) { + fprintf(stderr, "\n"); + yaml_print_node("rename", NULL, 0, false); + + for(size_t i = 0; i < jb->renames.used ;i++) { + yaml_print_node("new_key", jb->renames.array[i].new_key.key, 1, true); + yaml_print_node("old_key", jb->renames.array[i].old_key.key, 2, false); + } + } + + if(jb->injections.used) { + fprintf(stderr, "\n"); + yaml_print_node("inject", NULL, 0, false); + + for (size_t i = 0; i < jb->injections.used; i++) { + yaml_print_node("key", jb->injections.keys[i].key.key, 1, true); + yaml_print_node("value", jb->injections.keys[i].value.pattern, 2, false); + } + } + + if(jb->rewrites.used) { + fprintf(stderr, "\n"); + yaml_print_node("rewrite", NULL, 0, false); + + for(size_t i = 0; i < jb->rewrites.used ;i++) { + REWRITE *rw = &jb->rewrites.array[i]; + + yaml_print_node("key", rw->key.key, 1, true); + + if(rw->flags & RW_MATCH_PCRE2) + yaml_print_node("match", rw->match_pcre2.pattern, 2, false); + + else if(rw->flags & RW_MATCH_NON_EMPTY) + yaml_print_node("not_empty", rw->match_non_empty.pattern, 2, false); + + yaml_print_node("value", rw->value.pattern, 2, false); + + if(rw->flags & RW_INJECT) + yaml_print_node("inject", "yes", 2, false); + + if(rw->flags & RW_DONT_STOP) + yaml_print_node("stop", "no", 2, false); + } + } + + if(jb->unmatched.key.key || jb->unmatched.injections.used) { + fprintf(stderr, "\n"); + yaml_print_node("unmatched", NULL, 0, false); + + if(jb->unmatched.key.key) + yaml_print_node("key", jb->unmatched.key.key, 1, false); + + if(jb->unmatched.injections.used) { + fprintf(stderr, "\n"); + yaml_print_node("inject", NULL, 1, false); + + for (size_t i = 0; i < jb->unmatched.injections.used; i++) { + yaml_print_node("key", jb->unmatched.injections.keys[i].key.key, 2, true); + yaml_print_node("value", jb->unmatched.injections.keys[i].value.pattern, 3, false); + } + } + } +} diff --git a/collectors/log2journal/log2journal.c b/collectors/log2journal/log2journal.c new file mode 100644 index 00000000000000..c3204939cda9c5 --- /dev/null +++ b/collectors/log2journal/log2journal.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "log2journal.h" + +// ---------------------------------------------------------------------------- + +const char journal_key_characters_map[256] = { + // control characters + [0] = '\0', [1] = '_', [2] = '_', [3] = '_', [4] = '_', [5] = '_', [6] = '_', [7] = '_', + [8] = '_', [9] = '_', [10] = '_', [11] = '_', [12] = '_', [13] = '_', [14] = '_', [15] = '_', + [16] = '_', [17] = '_', [18] = '_', [19] = '_', [20] = '_', [21] = '_', [22] = '_', [23] = '_', + [24] = '_', [25] = '_', [26] = '_', [27] = '_', [28] = '_', [29] = '_', [30] = '_', [31] = '_', + + // symbols + [' '] = '_', ['!'] = '_', ['"'] = '_', ['#'] = '_', ['$'] = '_', ['%'] = '_', ['&'] = '_', ['\''] = '_', + ['('] = '_', [')'] = '_', ['*'] = '_', ['+'] = '_', [','] = '_', ['-'] = '_', ['.'] = '_', ['/'] = '_', + + // numbers + ['0'] = '0', ['1'] = '1', ['2'] = '2', ['3'] = '3', ['4'] = '4', ['5'] = '5', ['6'] = '6', ['7'] = '7', + ['8'] = '8', ['9'] = '9', + + // symbols + [':'] = '_', [';'] = '_', ['<'] = '_', ['='] = '_', ['>'] = '_', ['?'] = '_', ['@'] = '_', + + // capitals + ['A'] = 'A', ['B'] = 'B', ['C'] = 'C', ['D'] = 'D', ['E'] = 'E', ['F'] = 'F', ['G'] = 'G', ['H'] = 'H', + ['I'] = 'I', ['J'] = 'J', ['K'] = 'K', ['L'] = 'L', ['M'] = 'M', ['N'] = 'N', ['O'] = 'O', ['P'] = 'P', + ['Q'] = 'Q', ['R'] = 'R', ['S'] = 'S', ['T'] = 'T', ['U'] = 'U', ['V'] = 'V', ['W'] = 'W', ['X'] = 'X', + ['Y'] = 'Y', ['Z'] = 'Z', + + // symbols + ['['] = '_', ['\\'] = '_', [']'] = '_', ['^'] = '_', ['_'] = '_', ['`'] = '_', + + // lower to upper + ['a'] = 'A', ['b'] = 'B', ['c'] = 'C', ['d'] = 'D', ['e'] = 'E', ['f'] = 'F', ['g'] = 'G', ['h'] = 'H', + ['i'] = 'I', ['j'] = 'J', ['k'] = 'K', ['l'] = 'L', ['m'] = 'M', ['n'] = 'N', ['o'] = 'O', ['p'] = 'P', + ['q'] = 'Q', ['r'] = 'R', ['s'] = 'S', ['t'] = 'T', ['u'] = 'U', ['v'] = 'V', ['w'] = 'W', ['x'] = 'X', + ['y'] = 'Y', ['z'] = 'Z', + + // symbols + ['{'] = '_', ['|'] = '_', ['}'] = '_', ['~'] = '_', [127] = '_', // Delete (DEL) + + // Extended ASCII characters (128-255) set to underscore + [128] = '_', [129] = '_', [130] = '_', [131] = '_', [132] = '_', [133] = '_', [134] = '_', [135] = '_', + [136] = '_', [137] = '_', [138] = '_', [139] = '_', [140] = '_', [141] = '_', [142] = '_', [143] = '_', + [144] = '_', [145] = '_', [146] = '_', [147] = '_', [148] = '_', [149] = '_', [150] = '_', [151] = '_', + [152] = '_', [153] = '_', [154] = '_', [155] = '_', [156] = '_', [157] = '_', [158] = '_', [159] = '_', + [160] = '_', [161] = '_', [162] = '_', [163] = '_', [164] = '_', [165] = '_', [166] = '_', [167] = '_', + [168] = '_', [169] = '_', [170] = '_', [171] = '_', [172] = '_', [173] = '_', [174] = '_', [175] = '_', + [176] = '_', [177] = '_', [178] = '_', [179] = '_', [180] = '_', [181] = '_', [182] = '_', [183] = '_', + [184] = '_', [185] = '_', [186] = '_', [187] = '_', [188] = '_', [189] = '_', [190] = '_', [191] = '_', + [192] = '_', [193] = '_', [194] = '_', [195] = '_', [196] = '_', [197] = '_', [198] = '_', [199] = '_', + [200] = '_', [201] = '_', [202] = '_', [203] = '_', [204] = '_', [205] = '_', [206] = '_', [207] = '_', + [208] = '_', [209] = '_', [210] = '_', [211] = '_', [212] = '_', [213] = '_', [214] = '_', [215] = '_', + [216] = '_', [217] = '_', [218] = '_', [219] = '_', [220] = '_', [221] = '_', [222] = '_', [223] = '_', + [224] = '_', [225] = '_', [226] = '_', [227] = '_', [228] = '_', [229] = '_', [230] = '_', [231] = '_', + [232] = '_', [233] = '_', [234] = '_', [235] = '_', [236] = '_', [237] = '_', [238] = '_', [239] = '_', + [240] = '_', [241] = '_', [242] = '_', [243] = '_', [244] = '_', [245] = '_', [246] = '_', [247] = '_', + [248] = '_', [249] = '_', [250] = '_', [251] = '_', [252] = '_', [253] = '_', [254] = '_', [255] = '_', +}; + +// ---------------------------------------------------------------------------- + +static inline HASHED_KEY *get_key_from_hashtable(LOG_JOB *jb, HASHED_KEY *k) { + if(k->flags & HK_HASHTABLE_ALLOCATED) + return k; + + if(!k->hashtable_ptr) { + HASHED_KEY *ht_key; + SIMPLE_HASHTABLE_SLOT_KEY *slot = simple_hashtable_get_slot_KEY(&jb->hashtable, k->hash, true); + if((ht_key = SIMPLE_HASHTABLE_SLOT_DATA(slot))) { + if(!(ht_key->flags & HK_COLLISION_CHECKED)) { + ht_key->flags |= HK_COLLISION_CHECKED; + + if(strcmp(ht_key->key, k->key) != 0) + log2stderr("Hashtable collision detected on key '%s' (hash %lx) and '%s' (hash %lx). " + "Please file a bug report.", ht_key->key, (unsigned long) ht_key->hash, k->key + , (unsigned long) k->hash + ); + } + } + else { + ht_key = callocz(1, sizeof(HASHED_KEY)); + ht_key->key = strdupz(k->key); + ht_key->len = k->len; + ht_key->hash = k->hash; + ht_key->flags = HK_HASHTABLE_ALLOCATED; + + simple_hashtable_set_slot_KEY(&jb->hashtable, slot, ht_key->hash, ht_key); + } + + k->hashtable_ptr = ht_key; + } + + return k->hashtable_ptr; +} + +static inline HASHED_KEY *get_key_from_hashtable_with_char_ptr(LOG_JOB *jb, const char *key) { + HASHED_KEY find = { + .key = key, + .len = strlen(key), + }; + find.hash = XXH3_64bits(key, find.len); + + return get_key_from_hashtable(jb, &find); +} + +// ---------------------------------------------------------------------------- + +static inline void validate_key(LOG_JOB *jb __maybe_unused, HASHED_KEY *k) { + if(k->len > JOURNAL_MAX_KEY_LEN) + log2stderr("WARNING: key '%s' has length %zu, which is more than %zu, the max systemd-journal allows", + k->key, (size_t)k->len, (size_t)JOURNAL_MAX_KEY_LEN); + + for(size_t i = 0; i < k->len ;i++) { + char c = k->key[i]; + + if((c < 'A' || c > 'Z') && !isdigit(c) && c != '_') { + log2stderr("WARNING: key '%s' contains characters that are not allowed by systemd-journal.", k->key); + break; + } + } + + if(isdigit(k->key[0])) + log2stderr("WARNING: key '%s' starts with a digit and may not be accepted by systemd-journal.", k->key); + + if(k->key[0] == '_') + log2stderr("WARNING: key '%s' starts with an underscore, which makes it a systemd-journal trusted field. " + "Such fields are accepted by systemd-journal-remote, but not by systemd-journald.", k->key); +} + +// ---------------------------------------------------------------------------- + +static inline size_t replace_evaluate_to_buffer(LOG_JOB *jb, HASHED_KEY *k __maybe_unused, REPLACE_PATTERN *rp, char *dst, size_t dst_size) { + size_t remaining = dst_size; + char *copy_to = dst; + + for(REPLACE_NODE *node = rp->nodes; node != NULL && remaining > 1; node = node->next) { + if(node->is_variable) { + if(hashed_keys_match(&node->name, &jb->line.key)) { + size_t copied = copy_to_buffer(copy_to, remaining, jb->line.trimmed, jb->line.trimmed_len); + copy_to += copied; + remaining -= copied; + } + else { + HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key); + if(ktmp->value.len) { + size_t copied = copy_to_buffer(copy_to, remaining, ktmp->value.txt, ktmp->value.len); + copy_to += copied; + remaining -= copied; + } + } + } + else { + size_t copied = copy_to_buffer(copy_to, remaining, node->name.key, node->name.len); + copy_to += copied; + remaining -= copied; + } + } + + return copy_to - dst; +} + +static inline void replace_evaluate(LOG_JOB *jb, HASHED_KEY *k, REPLACE_PATTERN *rp) { + HASHED_KEY *ht_key = get_key_from_hashtable(jb, k); + + // set it to empty value + k->value.len = 0; + + for(REPLACE_NODE *node = rp->nodes; node != NULL; node = node->next) { + if(node->is_variable) { + if(hashed_keys_match(&node->name, &jb->line.key)) + txt_expand_and_append(&ht_key->value, jb->line.trimmed, jb->line.trimmed_len); + + else { + HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key); + if(ktmp->value.len) + txt_expand_and_append(&ht_key->value, ktmp->value.txt, ktmp->value.len); + } + } + else + txt_expand_and_append(&ht_key->value, node->name.key, node->name.len); + } +} + +static inline void replace_evaluate_from_pcre2(LOG_JOB *jb, HASHED_KEY *k, REPLACE_PATTERN *rp, SEARCH_PATTERN *sp) { + assert(k->flags & HK_HASHTABLE_ALLOCATED); + + // set the temporary TEXT to zero length + jb->rewrites.tmp.len = 0; + + PCRE2_SIZE *ovector = pcre2_get_ovector_pointer(sp->match_data); + + // Iterate through the linked list of replacement nodes + for(REPLACE_NODE *node = rp->nodes; node != NULL; node = node->next) { + if(node->is_variable) { + int group_number = pcre2_substring_number_from_name( + sp->re, (PCRE2_SPTR) node->name.key); + + if(group_number >= 0) { + PCRE2_SIZE start_offset = ovector[2 * group_number]; + PCRE2_SIZE end_offset = ovector[2 * group_number + 1]; + PCRE2_SIZE length = end_offset - start_offset; + + txt_expand_and_append(&jb->rewrites.tmp, k->value.txt + start_offset, length); + } + else { + if(hashed_keys_match(&node->name, &jb->line.key)) + txt_expand_and_append(&jb->rewrites.tmp, jb->line.trimmed, jb->line.trimmed_len); + + else { + HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key); + if(ktmp->value.len) + txt_expand_and_append(&jb->rewrites.tmp, ktmp->value.txt, ktmp->value.len); + } + } + } + else { + txt_expand_and_append(&jb->rewrites.tmp, node->name.key, node->name.len); + } + } + + // swap the values of the temporary TEXT and the key value + TEXT tmp = k->value; + k->value = jb->rewrites.tmp; + jb->rewrites.tmp = tmp; +} + +static inline bool rewrite_conditions_satisfied(LOG_JOB *jb, HASHED_KEY *k, REWRITE *rw) { + assert(k->flags & HK_HASHTABLE_ALLOCATED); + + if(rw->flags & RW_MATCH_PCRE2) { + return search_pattern_matches(&rw->match_pcre2, k->value.txt, k->value.len); + } + else if(rw->flags & RW_MATCH_NON_EMPTY) { + char buffer[2]; // we don't need a big buffer - we just check if anything is written + if(replace_evaluate_to_buffer(jb, k, &rw->match_non_empty, buffer, sizeof(buffer))) + // it copied something + return true; + else + // it copied nothing + return false; + } + else + // no conditions + return true; +} + +// ---------------------------------------------------------------------------- + +static inline HASHED_KEY *rename_key(LOG_JOB *jb, HASHED_KEY *k) { + if(!(k->flags & HK_RENAMES_CHECKED) || k->flags & HK_HAS_RENAMES) { + k->flags |= HK_RENAMES_CHECKED; + + for(size_t i = 0; i < jb->renames.used; i++) { + RENAME *rn = &jb->renames.array[i]; + + if(hashed_keys_match(&rn->old_key, k)) { + k->flags |= HK_HAS_RENAMES; + + return get_key_from_hashtable(jb, &rn->new_key); + } + } + } + + return k; +} + +// ---------------------------------------------------------------------------- + +static inline void send_key_value_constant(LOG_JOB *jb __maybe_unused, HASHED_KEY *key, const char *value, size_t len) { + HASHED_KEY *ht_key = get_key_from_hashtable(jb, key); + + txt_replace(&ht_key->value, value, len); + ht_key->flags |= HK_VALUE_FROM_LOG; + + // fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt); +} + +static inline void send_key_value_error(LOG_JOB *jb, HASHED_KEY *key, const char *format, ...) __attribute__ ((format(__printf__, 3, 4))); +static inline void send_key_value_error(LOG_JOB *jb, HASHED_KEY *key, const char *format, ...) { + HASHED_KEY *ht_key = get_key_from_hashtable(jb, key); + + printf("%s=", ht_key->key); + va_list args; + va_start(args, format); + vprintf(format, args); + va_end(args); + printf("\n"); +} + +inline void log_job_send_extracted_key_value(LOG_JOB *jb, const char *key, const char *value, size_t len) { + HASHED_KEY *ht_key = get_key_from_hashtable_with_char_ptr(jb, key); + HASHED_KEY *nk = rename_key(jb, ht_key); + txt_replace(&nk->value, value, len); + ht_key->flags |= HK_VALUE_FROM_LOG; + +// fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt); +} + +static inline void log_job_process_rewrites(LOG_JOB *jb) { + for(size_t i = 0; i < jb->rewrites.used ;i++) { + REWRITE *rw = &jb->rewrites.array[i]; + + HASHED_KEY *k = get_key_from_hashtable(jb, &rw->key); + + if(!(rw->flags & RW_INJECT) && !(k->flags & HK_VALUE_FROM_LOG) && !k->value.len) + continue; + + if(!(k->flags & HK_VALUE_REWRITTEN) && rewrite_conditions_satisfied(jb, k, rw)) { + if(rw->flags & RW_MATCH_PCRE2) + replace_evaluate_from_pcre2(jb, k, &rw->value, &rw->match_pcre2); + else + replace_evaluate(jb, k, &rw->value); + + if(!(rw->flags & RW_DONT_STOP)) + k->flags |= HK_VALUE_REWRITTEN; + +// fprintf(stderr, "REWRITE %s=%.*s\n", k->key, (int)k->value.len, k->value.txt); + } + } +} + +static inline void send_all_fields(LOG_JOB *jb) { + SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY(&jb->hashtable, kptr, HASHED_KEY, _KEY) { + HASHED_KEY *k = SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY_VALUE(kptr); + + if(k->value.len) { + // the key exists and has some value + + if(!(k->flags & HK_FILTERED)) { + k->flags |= HK_FILTERED; + + bool included = jb->filter.include.re ? search_pattern_matches(&jb->filter.include, k->key, k->len) : true; + bool excluded = jb->filter.exclude.re ? search_pattern_matches(&jb->filter.exclude, k->key, k->len) : false; + + if(included && !excluded) + k->flags |= HK_FILTERED_INCLUDED; + else + k->flags &= ~HK_FILTERED_INCLUDED; + + // log some error if the key does not comply to journal standards + validate_key(jb, k); + } + + if(k->flags & HK_FILTERED_INCLUDED) + printf("%s=%.*s\n", k->key, (int)k->value.len, k->value.txt); + + // reset it for the next round + k->value.txt[0] = '\0'; + k->value.len = 0; + } + + k->flags &= ~(HK_VALUE_REWRITTEN | HK_VALUE_FROM_LOG); + } +} + +// ---------------------------------------------------------------------------- +// injection of constant fields + +static void select_which_injections_should_be_injected_on_unmatched(LOG_JOB *jb) { + // mark all injections to be added to unmatched logs + for(size_t i = 0; i < jb->injections.used ; i++) + jb->injections.keys[i].on_unmatched = true; + + if(jb->injections.used && jb->unmatched.injections.used) { + // we have both injections and injections on unmatched + + // we find all the injections that are also configured as injections on unmatched, + // and we disable them, so that the output will not have the same key twice + + for(size_t i = 0; i < jb->injections.used ;i++) { + for(size_t u = 0; u < jb->unmatched.injections.used ; u++) { + if(strcmp(jb->injections.keys[i].key.key, jb->unmatched.injections.keys[u].key.key) == 0) + jb->injections.keys[i].on_unmatched = false; + } + } + } +} + + +static inline void jb_finalize_injections(LOG_JOB *jb, bool line_is_matched) { + for (size_t j = 0; j < jb->injections.used; j++) { + if(!line_is_matched && !jb->injections.keys[j].on_unmatched) + continue; + + INJECTION *inj = &jb->injections.keys[j]; + + replace_evaluate(jb, &inj->key, &inj->value); + } +} + +// ---------------------------------------------------------------------------- +// filename injection + +static inline void jb_inject_filename(LOG_JOB *jb) { + if (jb->filename.key.key && jb->filename.current.len) + send_key_value_constant(jb, &jb->filename.key, jb->filename.current.txt, jb->filename.current.len); +} + +static inline bool jb_switched_filename(LOG_JOB *jb, const char *line, size_t len) { + // IMPORTANT: + // Return TRUE when the caller should skip this line (because it is ours). + // Unfortunately, we have to consume empty lines too. + + // IMPORTANT: + // filename may not be NULL terminated and have more data than the filename. + + if (!len) { + jb->filename.last_line_was_empty = true; + return true; + } + + // Check if it's a log file change line + if (jb->filename.last_line_was_empty && line[0] == '=' && strncmp(line, "==> ", 4) == 0) { + const char *start = line + 4; + const char *end = strstr(line, " <=="); + while (*start == ' ') start++; + if (*start != '\n' && *start != '\0' && end) { + txt_replace(&jb->filename.current, start, end - start); + return true; + } + } + + jb->filename.last_line_was_empty = false; + return false; +} + +static inline bool jb_send_unmatched_line(LOG_JOB *jb, const char *line) { + if (!jb->unmatched.key.key) + return false; + + // we are sending errors to systemd-journal + send_key_value_error(jb, &jb->unmatched.key, "Parsing error on: %s", line); + + for (size_t j = 0; j < jb->unmatched.injections.used; j++) { + INJECTION *inj = &jb->unmatched.injections.keys[j]; + + replace_evaluate(jb, &inj->key, &inj->value); + } + + return true; +} + +// ---------------------------------------------------------------------------- +// running a job + +static char *get_next_line(LOG_JOB *jb __maybe_unused, char *buffer, size_t size, size_t *line_length) { + if(!fgets(buffer, (int)size, stdin)) { + *line_length = 0; + return NULL; + } + + char *line = buffer; + size_t len = strlen(line); + + // remove trailing newlines and spaces + while(len > 1 && (line[len - 1] == '\n' || isspace(line[len - 1]))) + line[--len] = '\0'; + + // skip leading spaces + while(isspace(*line)) { + line++; + len--; + } + + *line_length = len; + return line; +} + +int log_job_run(LOG_JOB *jb) { + select_which_injections_should_be_injected_on_unmatched(jb); + + PCRE2_STATE *pcre2 = NULL; + LOG_JSON_STATE *json = NULL; + LOGFMT_STATE *logfmt = NULL; + + if(strcmp(jb->pattern, "json") == 0) { + json = json_parser_create(jb); + // never fails + } + else if(strcmp(jb->pattern, "logfmt") == 0) { + logfmt = logfmt_parser_create(jb); + // never fails + } + else if(strcmp(jb->pattern, "none") != 0) { + pcre2 = pcre2_parser_create(jb); + if(pcre2_has_error(pcre2)) { + log2stderr("%s", pcre2_parser_error(pcre2)); + pcre2_parser_destroy(pcre2); + return 1; + } + } + + jb->line.buffer = mallocz(MAX_LINE_LENGTH + 1); + jb->line.size = MAX_LINE_LENGTH + 1; + jb->line.trimmed_len = 0; + jb->line.trimmed = jb->line.buffer; + + while ((jb->line.trimmed = get_next_line(jb, (char *)jb->line.buffer, jb->line.size, &jb->line.trimmed_len))) { + const char *line = jb->line.trimmed; + size_t len = jb->line.trimmed_len; + + if(jb_switched_filename(jb, line, len)) + continue; + + bool line_is_matched = true; + + if(json) + line_is_matched = json_parse_document(json, line); + else if(logfmt) + line_is_matched = logfmt_parse_document(logfmt, line); + else if(pcre2) + line_is_matched = pcre2_parse_document(pcre2, line, len); + + if(!line_is_matched) { + if(json) + log2stderr("%s", json_parser_error(json)); + else if(logfmt) + log2stderr("%s", logfmt_parser_error(logfmt)); + else if(pcre2) + log2stderr("%s", pcre2_parser_error(pcre2)); + + if(!jb_send_unmatched_line(jb, line)) + // just logging to stderr, not sending unmatched lines + continue; + } + + jb_inject_filename(jb); + jb_finalize_injections(jb, line_is_matched); + + log_job_process_rewrites(jb); + send_all_fields(jb); + printf("\n"); + fflush(stdout); + } + + if(json) + json_parser_destroy(json); + + else if(logfmt) + logfmt_parser_destroy(logfmt); + + else if(pcre2) + pcre2_parser_destroy(pcre2); + + freez((void *)jb->line.buffer); + + return 0; +} + +// ---------------------------------------------------------------------------- + +int main(int argc, char *argv[]) { + LOG_JOB log_job; + + log_job_init(&log_job); + + if(!log_job_command_line_parse_parameters(&log_job, argc, argv)) + exit(1); + + if(log_job.show_config) + log_job_configuration_to_yaml(&log_job); + + int ret = log_job_run(&log_job); + + log_job_cleanup(&log_job); + return ret; +} diff --git a/collectors/log2journal/log2journal.d/default.yaml b/collectors/log2journal/log2journal.d/default.yaml new file mode 100644 index 00000000000000..d41efc4abb251d --- /dev/null +++ b/collectors/log2journal/log2journal.d/default.yaml @@ -0,0 +1,15 @@ +pattern: none + +filename: + key: LOG_FILENAME + +inject: + - key: MESSAGE + value: '${LINE}' # a special variable that resolves to the whole line read from the log + + - key: PRIORITY + value: 6 # Valid PRIORITIES: 0=emerg, 1=alert, 2=crit, 3=error, 4=warn, 5=notice, 6=info, 7=debug + + - key: SYSLOG_IDENTIFIER + value: log2journal # the name of the application sending the logs + diff --git a/collectors/log2journal/log2journal.d/nginx-combined.yaml b/collectors/log2journal/log2journal.d/nginx-combined.yaml new file mode 100644 index 00000000000000..003c774d7bd263 --- /dev/null +++ b/collectors/log2journal/log2journal.d/nginx-combined.yaml @@ -0,0 +1,91 @@ +# Netdata log2journal Configuration +# The following parses nginx log files using the combined format. + +# The PCRE2 pattern to match log entries and give names to the fields. +# The journal will have these names, so follow their rules. You can +# initiate an extended PCRE2 pattern by starting the pattern with (?x) +pattern: | + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s # NGINX_REMOTE_ADDR + (?[^ ]+) \s # NGINX_REMOTE_USER + \[ + (?[^\]]+) # NGINX_TIME_LOCAL + \] + \s+ " + (? + (?[A-Z]+) \s+ # NGINX_METHOD + (?[^ ]+) \s+ + (?[^"]+) + ) + " \s+ + (?\d+) \s+ # NGINX_STATUS + (?\d+) \s+ # NGINX_BODY_BYTES_SENT + "(?[^"]*)" \s+ # NGINX_HTTP_REFERER + "(?[^"]*)" # NGINX_HTTP_USER_AGENT + +# When log2journal can detect the filename of each log entry (tail gives it +# only when it tails multiple files), this key will be used to send the +# filename to the journals. +filename: + key: NGINX_LOG_FILENAME + +rename: + - new_key: MESSAGE + old_key: NGINX_REQUEST + +# Inject constant fields into the journal logs. +inject: + - key: SYSLOG_IDENTIFIER + value: nginx-log + + # inject PRIORITY is a duplicate of NGINX_STATUS + - key: PRIORITY + value: '${NGINX_STATUS}' + + # Inject NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS + - key: NGINX_STATUS_FAMILY + value: '${NGINX_STATUS}' + +# Rewrite the value of fields (including the duplicated ones). +# The search pattern can have named groups, and the replace pattern can use +# them as ${name}. +rewrite: + # PRIORITY is a duplicate of NGINX_STATUS + # Valid PRIORITIES: 0=emerg, 1=alert, 2=crit, 3=error, 4=warn, 5=notice, 6=info, 7=debug + - key: PRIORITY + match: '^[123]' + value: 6 + + - key: PRIORITY + match: '^4' + value: 5 + + - key: PRIORITY + match: '^5' + value: 3 + + - key: PRIORITY + match: '.*' + value: 4 + + # NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS + - key: NGINX_STATUS_FAMILY + match: '^(?[1-5])' + value: '${first_digit}xx' + + - key: NGINX_STATUS_FAMILY + match: '.*' + value: 'UNKNOWN' + +# Control what to do when input logs do not match the main PCRE2 pattern. +unmatched: + # The journal key to log the PCRE2 error message to. + # Set this to MESSAGE, so you to see the error in the log. + key: MESSAGE + + # Inject static fields to the unmatched entries. + # Set PRIORITY=1 (alert) to help you spot unmatched entries in the logs. + inject: + - key: PRIORITY + value: 1 diff --git a/collectors/log2journal/log2journal.d/nginx-json.yaml b/collectors/log2journal/log2journal.d/nginx-json.yaml new file mode 100644 index 00000000000000..7fdc4be58459c4 --- /dev/null +++ b/collectors/log2journal/log2journal.d/nginx-json.yaml @@ -0,0 +1,164 @@ +# For all nginx variables, check this: +# https://nginx.org/en/docs/http/ngx_http_core_module.html#var_connection_requests + +pattern: json + +prefix: NGINX_ + +# When log2journal can detect the filename of each log entry (tail gives it +# only when it tails multiple files), this key will be used to send the +# filename to the journals. +filename: + key: NGINX_LOG_FILENAME + +filter: + exclude: '^(NGINX_BINARY_REMOTE_ADDR)$' + +rename: + - new_key: MESSAGE + old_key: NGINX_REQUEST + + # args is an alias for query_string + - new_key: NGINX_QUERY_STRING + old_key: NGINX_ARGS + + # document_uri is an alias for uri + - new_key: NGINX_URI + old_key: NGINX_DOCUMENT_URI + + # is_args states if the request had a query string or not + - new_key: NGINX_HAS_QUERY_STRING + old_key: NGINX_IS_ARGS + + # msec is the timestamp in seconds, with fractional digits for milliseconds + - new_key: NGINX_TIMESTAMP_SEC + old_key: NGINX_MSEC + + # nginx_version is already prefixed with nginx, let's remove one of them + - new_key: NGINX_VERSION + old_key: NGINX_NGINX_VERSION + + # pipe states if the request was pipelined or not + - new_key: NGINX_PIPELINED + old_key: NGINX_PIPE + + # rename numeric TLVs to their names + - new_key: NGINX_PROXY_PROTOCOL_TLV_ALPN + old_key: NGINX_PROXY_PROTOCOL_TLV_0X01 + - new_key: NGINX_PROXY_PROTOCOL_TLV_AUTHORITY + old_key: NGINX_PROXY_PROTOCOL_TLV_0X02 + - new_key: NGINX_PROXY_PROTOCOL_TLV_UNIQUE_ID + old_key: NGINX_PROXY_PROTOCOL_TLV_0X05 + - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL + old_key: NGINX_PROXY_PROTOCOL_TLV_0X20 + - new_key: NGINX_PROXY_PROTOCOL_TLV_NETNS + old_key: NGINX_PROXY_PROTOCOL_TLV_0X30 + + # rename numeric SSL TLVs to their names + - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_VERSION + old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X21 + - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_CN + old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X22 + - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_CIPHER + old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X23 + - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_SIG_ALG + old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X24 + - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_KEY_ALG + old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X25 + +# Inject constant fields into the journal logs. +inject: + - key: SYSLOG_IDENTIFIER + value: nginx-log + + # inject PRIORITY is a duplicate of NGINX_STATUS + - key: PRIORITY + value: '${NGINX_STATUS}' + + # Inject NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS + - key: NGINX_STATUS_FAMILY + value: '${NGINX_STATUS}' + + +# Rewrite the value of fields (including the duplicated ones). +# The search pattern can have named groups, and the replace pattern can use +# them as ${name}. +rewrite: + # a ? means it has query string, everything else means it does not + - key: NGINX_HAS_QUERY_STRING + match: '^\?$' + value: yes + - key: NGINX_HAS_QUERY_STRING + match: '.*' + value: no + + # 'on' means it was HTTPS, everything else means it was not + - key: NGINX_HTTPS + match: '^on$' + value: yes + - key: NGINX_HTTPS + match: '.*' + value: no + + # 'p' means it was pipelined, everything else means it was not + - key: NGINX_PIPELINED + match: '^p$' + value: yes + - key: NGINX_PIPELINED + match: '.*' + value: no + + # zero means client sent a certificate and it was verified, non-zero means otherwise + - key: NGINX_PROXY_PROTOCOL_TLV_SSL_VERIFY + match: '^0$' + value: yes + - key: NGINX_PROXY_PROTOCOL_TLV_SSL_VERIFY + match: '.*' + value: no + + # 'OK' means request completed, everything else means it didn't + - key: NGINX_REQUEST_COMPLETION + match: '^OK$' + value: 'completed' + - key: NGINX_REQUEST_COMPLETION + match: '.*' + value: 'not completed' + + # PRIORTY is a duplicate of NGINX_STATUS + # Valid PRIORITIES: 0=emerg, 1=alert, 2=crit, 3=error, 4=warn, 5=notice, 6=info, 7=debug + - key: PRIORITY + match: '^[123]' + value: 6 + + - key: PRIORITY + match: '^4' + value: 5 + + - key: PRIORITY + match: '^5' + value: 3 + + - key: PRIORITY + match: '.*' + value: 4 + + # NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS + - key: NGINX_STATUS_FAMILY + match: '^(?[1-5])' + value: '${first_digit}xx' + + - key: NGINX_STATUS_FAMILY + match: '.*' + value: 'UNKNOWN' + +# Control what to do when input logs do not match the main PCRE2 pattern. +unmatched: + # The journal key to log the PCRE2 error message to. + # Set this to MESSAGE, so you to see the error in the log. + key: MESSAGE + + # Inject static fields to the unmatched entries. + # Set PRIORITY=1 (alert) to help you spot unmatched entries in the logs. + inject: + - key: PRIORITY + value: 1 diff --git a/collectors/log2journal/log2journal.h b/collectors/log2journal/log2journal.h new file mode 100644 index 00000000000000..834a5b135d8a3c --- /dev/null +++ b/collectors/log2journal/log2journal.h @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOG2JOURNAL_H +#define NETDATA_LOG2JOURNAL_H + +// only for PACKAGE_VERSION +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// ---------------------------------------------------------------------------- +// logging + +// enable the compiler to check for printf like errors on our log2stderr() function +static inline void log2stderr(const char *format, ...) __attribute__ ((format(__printf__, 1, 2))); +static inline void log2stderr(const char *format, ...) { + va_list args; + va_start(args, format); + vfprintf(stderr, format, args); + va_end(args); + fprintf(stderr, "\n"); +} + +// ---------------------------------------------------------------------------- +// allocation functions abstraction + +static inline void *mallocz(size_t size) { + void *ptr = malloc(size); + if (!ptr) { + log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", size); + exit(EXIT_FAILURE); + } + return ptr; +} + +static inline void *callocz(size_t elements, size_t size) { + void *ptr = calloc(elements, size); + if (!ptr) { + log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", elements * size); + exit(EXIT_FAILURE); + } + return ptr; +} + +static inline void *reallocz(void *ptr, size_t size) { + void *new_ptr = realloc(ptr, size); + if (!new_ptr) { + log2stderr("Fatal Error: Memory reallocation failed. Requested size: %zu bytes.", size); + exit(EXIT_FAILURE); + } + return new_ptr; +} + +static inline char *strdupz(const char *s) { + char *ptr = strdup(s); + if (!ptr) { + log2stderr("Fatal Error: Memory allocation failed in strdup."); + exit(EXIT_FAILURE); + } + return ptr; +} + +static inline char *strndupz(const char *s, size_t n) { + char *ptr = strndup(s, n); + if (!ptr) { + log2stderr("Fatal Error: Memory allocation failed in strndup. Requested size: %zu bytes.", n); + exit(EXIT_FAILURE); + } + return ptr; +} + +static inline void freez(void *ptr) { + if (ptr) + free(ptr); +} + +// ---------------------------------------------------------------------------- + +#define XXH_INLINE_ALL +#include "../../libnetdata/xxhash.h" + +#define PCRE2_CODE_UNIT_WIDTH 8 +#include + +#ifdef HAVE_LIBYAML +#include +#endif + +// ---------------------------------------------------------------------------- +// hashtable for HASHED_KEY + +// cleanup hashtable defines +#undef SIMPLE_HASHTABLE_SORT_FUNCTION +#undef SIMPLE_HASHTABLE_VALUE_TYPE +#undef SIMPLE_HASHTABLE_NAME +#undef NETDATA_SIMPLE_HASHTABLE_H + +struct hashed_key; +static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2); +#define SIMPLE_HASHTABLE_SORT_FUNCTION compare_keys +#define SIMPLE_HASHTABLE_VALUE_TYPE struct hashed_key +#define SIMPLE_HASHTABLE_NAME _KEY +#include "../../libnetdata/simple_hashtable.h" + +// ---------------------------------------------------------------------------- + +#define MAX_OUTPUT_KEYS 1024 +#define MAX_LINE_LENGTH (1024 * 1024) +#define MAX_INJECTIONS (MAX_OUTPUT_KEYS / 2) +#define MAX_REWRITES (MAX_OUTPUT_KEYS / 2) +#define MAX_RENAMES (MAX_OUTPUT_KEYS / 2) + +#define JOURNAL_MAX_KEY_LEN 64 // according to systemd-journald +#define JOURNAL_MAX_VALUE_LEN (48 * 1024) // according to systemd-journald + +#define LOG2JOURNAL_CONFIG_PATH LIBCONFIG_DIR "/log2journal.d" + +// ---------------------------------------------------------------------------- +// character conversion for journal keys + +extern const char journal_key_characters_map[256]; + +// ---------------------------------------------------------------------------- +// copy to buffer, while ensuring there is no buffer overflow + +static inline size_t copy_to_buffer(char *dst, size_t dst_size, const char *src, size_t src_len) { + if(dst_size < 2) { + if(dst_size == 1) + *dst = '\0'; + + return 0; + } + + if(src_len <= dst_size - 1) { + memcpy(dst, src, src_len); + dst[src_len] = '\0'; + return src_len; + } + else { + memcpy(dst, src, dst_size - 1); + dst[dst_size - 1] = '\0'; + return dst_size - 1; + } +} + +// ---------------------------------------------------------------------------- +// A dynamically sized, reusable text buffer, +// allowing us to be fast (no allocations during iterations) while having the +// smallest possible allocations. + +typedef struct txt { + char *txt; + uint32_t size; + uint32_t len; +} TEXT; + +static inline void txt_cleanup(TEXT *t) { + if(!t) + return; + + if(t->txt) + freez(t->txt); + + t->txt = NULL; + t->size = 0; + t->len = 0; +} + +static inline void txt_replace(TEXT *t, const char *s, size_t len) { + if(!s || !*s || len == 0) { + s = ""; + len = 0; + } + + if(len + 1 <= t->size) { + // the existing value allocation, fits our value + + memcpy(t->txt, s, len); + t->txt[len] = '\0'; + t->len = len; + } + else { + // no existing value allocation, or too small for our value + // cleanup and increase the buffer + + txt_cleanup(t); + + t->txt = strndupz(s, len); + t->size = len + 1; + t->len = len; + } +} + +static inline void txt_expand_and_append(TEXT *t, const char *s, size_t len) { + if(len + 1 > (t->size - t->len)) { + size_t new_size = t->len + len + 1; + if(new_size < t->size * 2) + new_size = t->size * 2; + + t->txt = reallocz(t->txt, new_size); + t->size = new_size; + } + + char *copy_to = &t->txt[t->len]; + memcpy(copy_to, s, len); + copy_to[len] = '\0'; + t->len += len; +} + +// ---------------------------------------------------------------------------- + +typedef enum __attribute__((__packed__)) { + HK_NONE = 0, + + // permanent flags - they are set once to optimize various decisions and lookups + + HK_HASHTABLE_ALLOCATED = (1 << 0), // this is key object allocated in the hashtable + // objects that do not have this, have a pointer to a key in the hashtable + // objects that have this, value a value allocated + + HK_FILTERED = (1 << 1), // we checked once if this key in filtered + HK_FILTERED_INCLUDED = (1 << 2), // the result of the filtering was to include it in the output + + HK_COLLISION_CHECKED = (1 << 3), // we checked once for collision check of this key + + HK_RENAMES_CHECKED = (1 << 4), // we checked once if there are renames on this key + HK_HAS_RENAMES = (1 << 5), // and we found there is a rename rule related to it + + // ephemeral flags - they are unset at the end of each log line + + HK_VALUE_FROM_LOG = (1 << 14), // the value of this key has been read from the log (or from injection, duplication) + HK_VALUE_REWRITTEN = (1 << 15), // the value of this key has been rewritten due to one of our rewrite rules + +} HASHED_KEY_FLAGS; + +typedef struct hashed_key { + const char *key; + uint32_t len; + HASHED_KEY_FLAGS flags; + XXH64_hash_t hash; + union { + struct hashed_key *hashtable_ptr; // HK_HASHTABLE_ALLOCATED is not set + TEXT value; // HK_HASHTABLE_ALLOCATED is set + }; +} HASHED_KEY; + +static inline void hashed_key_cleanup(HASHED_KEY *k) { + if(k->key) { + freez((void *)k->key); + k->key = NULL; + } + + if(k->flags & HK_HASHTABLE_ALLOCATED) + txt_cleanup(&k->value); + else + k->hashtable_ptr = NULL; +} + +static inline void hashed_key_set(HASHED_KEY *k, const char *name) { + hashed_key_cleanup(k); + + k->key = strdupz(name); + k->len = strlen(k->key); + k->hash = XXH3_64bits(k->key, k->len); + k->flags = HK_NONE; +} + +static inline void hashed_key_len_set(HASHED_KEY *k, const char *name, size_t len) { + hashed_key_cleanup(k); + + k->key = strndupz(name, len); + k->len = len; + k->hash = XXH3_64bits(k->key, k->len); + k->flags = HK_NONE; +} + +static inline bool hashed_keys_match(HASHED_KEY *k1, HASHED_KEY *k2) { + return ((k1 == k2) || (k1->hash == k2->hash && strcmp(k1->key, k2->key) == 0)); +} + +static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2) { + return strcmp(k1->key, k2->key); +} + +// ---------------------------------------------------------------------------- + +typedef struct search_pattern { + const char *pattern; + pcre2_code *re; + pcre2_match_data *match_data; + TEXT error; +} SEARCH_PATTERN; + +void search_pattern_cleanup(SEARCH_PATTERN *sp); +bool search_pattern_set(SEARCH_PATTERN *sp, const char *search_pattern, size_t search_pattern_len); + +static inline bool search_pattern_matches(SEARCH_PATTERN *sp, const char *value, size_t value_len) { + return pcre2_match(sp->re, (PCRE2_SPTR)value, value_len, 0, 0, sp->match_data, NULL) >= 0; +} + +// ---------------------------------------------------------------------------- + +typedef struct replacement_node { + HASHED_KEY name; + bool is_variable; + bool logged_error; + + struct replacement_node *next; +} REPLACE_NODE; + +void replace_node_free(REPLACE_NODE *rpn); + +typedef struct replace_pattern { + const char *pattern; + REPLACE_NODE *nodes; + bool has_variables; +} REPLACE_PATTERN; + +void replace_pattern_cleanup(REPLACE_PATTERN *rp); +bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern); + +// ---------------------------------------------------------------------------- + +typedef struct injection { + bool on_unmatched; + HASHED_KEY key; + REPLACE_PATTERN value; +} INJECTION; + +void injection_cleanup(INJECTION *inj); + +// ---------------------------------------------------------------------------- + +typedef struct key_rename { + HASHED_KEY new_key; + HASHED_KEY old_key; +} RENAME; + +void rename_cleanup(RENAME *rn); + +// ---------------------------------------------------------------------------- + +typedef enum __attribute__((__packed__)) { + RW_NONE = 0, + RW_MATCH_PCRE2 = (1 << 1), // a rewrite rule + RW_MATCH_NON_EMPTY = (1 << 2), // a rewrite rule + RW_DONT_STOP = (1 << 3), + RW_INJECT = (1 << 4), +} RW_FLAGS; + +typedef struct key_rewrite { + RW_FLAGS flags; + HASHED_KEY key; + union { + SEARCH_PATTERN match_pcre2; + REPLACE_PATTERN match_non_empty; + }; + REPLACE_PATTERN value; +} REWRITE; + +void rewrite_cleanup(REWRITE *rw); + +// ---------------------------------------------------------------------------- +// A job configuration and runtime structures + +typedef struct log_job { + bool show_config; + + const char *pattern; + const char *prefix; + + SIMPLE_HASHTABLE_KEY hashtable; + + struct { + const char *buffer; + const char *trimmed; + size_t trimmed_len; + size_t size; + HASHED_KEY key; + } line; + + struct { + SEARCH_PATTERN include; + SEARCH_PATTERN exclude; + } filter; + + struct { + bool last_line_was_empty; + HASHED_KEY key; + TEXT current; + } filename; + + struct { + uint32_t used; + INJECTION keys[MAX_INJECTIONS]; + } injections; + + struct { + HASHED_KEY key; + struct { + uint32_t used; + INJECTION keys[MAX_INJECTIONS]; + } injections; + } unmatched; + + struct { + uint32_t used; + REWRITE array[MAX_REWRITES]; + TEXT tmp; + } rewrites; + + struct { + uint32_t used; + RENAME array[MAX_RENAMES]; + } renames; +} LOG_JOB; + +// initialize a log job +void log_job_init(LOG_JOB *jb); + +// free all resources consumed by the log job +void log_job_cleanup(LOG_JOB *jb); + +// ---------------------------------------------------------------------------- + +// the entry point to send key value pairs to the output +// this implements the pipeline of processing renames, rewrites and duplications +void log_job_send_extracted_key_value(LOG_JOB *jb, const char *key, const char *value, size_t len); + +// ---------------------------------------------------------------------------- +// configuration related + +// management of configuration to set settings +bool log_job_filename_key_set(LOG_JOB *jb, const char *key, size_t key_len); +bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len); +bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len); +bool log_job_injection_add(LOG_JOB *jb, const char *key, size_t key_len, const char *value, size_t value_len, bool unmatched); +bool log_job_rewrite_add(LOG_JOB *jb, const char *key, RW_FLAGS flags, const char *search_pattern, const char *replace_pattern); +bool log_job_rename_add(LOG_JOB *jb, const char *new_key, size_t new_key_len, const char *old_key, size_t old_key_len); +bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len); +bool log_job_exclude_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len); + +// entry point to parse command line parameters +bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv); +void log_job_command_line_help(const char *name); + +// ---------------------------------------------------------------------------- +// YAML configuration related + +#ifdef HAVE_LIBYAML +bool yaml_parse_file(const char *config_file_path, LOG_JOB *jb); +bool yaml_parse_config(const char *config_name, LOG_JOB *jb); +#endif + +void log_job_configuration_to_yaml(LOG_JOB *jb); + +// ---------------------------------------------------------------------------- +// JSON parser + +typedef struct log_json_state LOG_JSON_STATE; +LOG_JSON_STATE *json_parser_create(LOG_JOB *jb); +void json_parser_destroy(LOG_JSON_STATE *js); +const char *json_parser_error(LOG_JSON_STATE *js); +bool json_parse_document(LOG_JSON_STATE *js, const char *txt); +void json_test(void); + +size_t parse_surrogate(const char *s, char *d, size_t *remaining); + +// ---------------------------------------------------------------------------- +// logfmt parser + +typedef struct logfmt_state LOGFMT_STATE; +LOGFMT_STATE *logfmt_parser_create(LOG_JOB *jb); +void logfmt_parser_destroy(LOGFMT_STATE *lfs); +const char *logfmt_parser_error(LOGFMT_STATE *lfs); +bool logfmt_parse_document(LOGFMT_STATE *js, const char *txt); +void logfmt_test(void); + +// ---------------------------------------------------------------------------- +// pcre2 parser + +typedef struct pcre2_state PCRE2_STATE; +PCRE2_STATE *pcre2_parser_create(LOG_JOB *jb); +void pcre2_parser_destroy(PCRE2_STATE *pcre2); +const char *pcre2_parser_error(PCRE2_STATE *pcre2); +bool pcre2_parse_document(PCRE2_STATE *pcre2, const char *txt, size_t len); +bool pcre2_has_error(PCRE2_STATE *pcre2); +void pcre2_test(void); + +void pcre2_get_error_in_buffer(char *msg, size_t msg_len, int rc, int pos); + +#endif //NETDATA_LOG2JOURNAL_H diff --git a/collectors/log2journal/tests.d/default.output b/collectors/log2journal/tests.d/default.output new file mode 100644 index 00000000000000..ef17cb2c7c78f6 --- /dev/null +++ b/collectors/log2journal/tests.d/default.output @@ -0,0 +1,20 @@ +MESSAGE=key1=value01 key2=value02 key3=value03 key4=value04 +PRIORITY=6 +SYSLOG_IDENTIFIER=log2journal + +MESSAGE=key1=value11 key2=value12 key3=value13 key4= +PRIORITY=6 +SYSLOG_IDENTIFIER=log2journal + +MESSAGE=key1=value21 key2=value22 key3=value23 key4=value24 +PRIORITY=6 +SYSLOG_IDENTIFIER=log2journal + +MESSAGE=key1=value31 key2=value32 key3=value33 key4= +PRIORITY=6 +SYSLOG_IDENTIFIER=log2journal + +MESSAGE=key1=value41 key2=value42 key3=value43 key4=value44 +PRIORITY=6 +SYSLOG_IDENTIFIER=log2journal + diff --git a/collectors/log2journal/tests.d/full.output b/collectors/log2journal/tests.d/full.output new file mode 100644 index 00000000000000..074092d4ed1f74 --- /dev/null +++ b/collectors/log2journal/tests.d/full.output @@ -0,0 +1,77 @@ +pattern: | + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s # NGINX_REMOTE_ADDR + (?[^ ]+) \s # NGINX_REMOTE_USER + \[ + (?[^\]]+) # NGINX_TIME_LOCAL + \] + \s+ " + (? + (?[A-Z]+) \s+ # NGINX_METHOD + (?[^ ]+) \s+ + HTTP/(?[^"]+) + ) + " \s+ + (?\d+) \s+ # NGINX_STATUS + (?\d+) \s+ # NGINX_BODY_BYTES_SENT + "(?[^"]*)" \s+ # NGINX_HTTP_REFERER + "(?[^"]*)" # NGINX_HTTP_USER_AGENT + +prefix: NGINX_ + +filename: + key: NGINX_LOG_FILENAME + +filter: + include: '.*' + exclude: '.*HELLO.*WORLD.*' + +rename: + - new_key: TEST1 + old_key: TEST2 + - new_key: TEST3 + old_key: TEST4 + +inject: + - key: SYSLOG_IDENTIFIER + value: nginx-log + - key: SYSLOG_IDENTIFIER2 + value: nginx-log2 + - key: PRIORITY + value: '${NGINX_STATUS}' + - key: NGINX_STATUS_FAMILY + value: '${NGINX_STATUS}${NGINX_METHOD}' + +rewrite: + - key: PRIORITY + value: '${NGINX_STATUS}' + inject: yes + stop: no + - key: PRIORITY + match: '^[123]' + value: 6 + - key: PRIORITY + match: '^4' + value: 5 + - key: PRIORITY + match: '^5' + value: 3 + - key: PRIORITY + match: '.*' + value: 4 + - key: NGINX_STATUS_FAMILY + match: '^(?[1-5])' + value: '${first_digit}xx' + - key: NGINX_STATUS_FAMILY + match: '.*' + value: UNKNOWN + +unmatched: + key: MESSAGE + + inject: + - key: PRIORITY + value: 1 + - key: PRIORITY2 + value: 2 diff --git a/collectors/log2journal/tests.d/full.yaml b/collectors/log2journal/tests.d/full.yaml new file mode 100644 index 00000000000000..86cafb5a2a1189 --- /dev/null +++ b/collectors/log2journal/tests.d/full.yaml @@ -0,0 +1,76 @@ +pattern: | + (?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s # NGINX_REMOTE_ADDR + (?[^ ]+) \s # NGINX_REMOTE_USER + \[ + (?[^\]]+) # NGINX_TIME_LOCAL + \] + \s+ " + (? + (?[A-Z]+) \s+ # NGINX_METHOD + (?[^ ]+) \s+ + HTTP/(?[^"]+) + ) + " \s+ + (?\d+) \s+ # NGINX_STATUS + (?\d+) \s+ # NGINX_BODY_BYTES_SENT + "(?[^"]*)" \s+ # NGINX_HTTP_REFERER + "(?[^"]*)" # NGINX_HTTP_USER_AGENT + +prefix: NGINX_ + +filename: + key: NGINX_LOG_FILENAME + +filter: + include: '.*' + exclude: '.*HELLO.*WORLD.*' + +rename: + - new_key: TEST1 + old_key: TEST2 + - new_key: TEST3 + old_key: TEST4 + +inject: + - key: SYSLOG_IDENTIFIER + value: 'nginx-log' + - key: SYSLOG_IDENTIFIER2 + value: 'nginx-log2' + - key: PRIORITY + value: '${NGINX_STATUS}' + - key: NGINX_STATUS_FAMILY + value: '${NGINX_STATUS}${NGINX_METHOD}' + +rewrite: + - key: "PRIORITY" + value: "${NGINX_STATUS}" + inject: yes + stop: no + - key: "PRIORITY" + match: "^[123]" + value: 6 + - key: "PRIORITY" + match: "^4" + value: 5 + - key: "PRIORITY" + match: "^5" + value: 3 + - key: "PRIORITY" + match: ".*" + value: 4 + - key: "NGINX_STATUS_FAMILY" + match: "^(?[1-5])" + value: "${first_digit}xx" + - key: "NGINX_STATUS_FAMILY" + match: ".*" + value: "UNKNOWN" + +unmatched: + key: MESSAGE + inject: + - key: PRIORITY + value: 1 + - key: PRIORITY2 + value: 2 diff --git a/collectors/log2journal/tests.d/json-exclude.output b/collectors/log2journal/tests.d/json-exclude.output new file mode 100644 index 00000000000000..a8f6f83e6ce2d5 --- /dev/null +++ b/collectors/log2journal/tests.d/json-exclude.output @@ -0,0 +1,153 @@ +ARRAY2_0=1 +ARRAY2_1=-2.345 +ARRAY2_2=Array Element +ARRAY2_3=true +ARRAY2_4=false +ARRAY2_5=null +ARRAY2_6_BOOLEANFALSE=false +ARRAY2_6_BOOLEANTRUE=true +ARRAY2_6_FLOATNEGATIVE=-0.123 +ARRAY2_6_FLOATPOSITIVE=0.987 +ARRAY2_6_NULLVALUE=null +ARRAY2_6_NUMERICNEGATIVE=-456 +ARRAY2_6_NUMERICPOSITIVE=123 +ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4 +ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5 +ARRAY2_6_STRING=Nested Object in Array2 +ARRAY2_7_BOOLEANFALSE=false +ARRAY2_7_BOOLEANTRUE=true +ARRAY2_7_FLOATNEGATIVE=-2.71828 +ARRAY2_7_FLOATPOSITIVE=3.14159 +ARRAY2_7_NULLVALUE=null +ARRAY2_7_NUMERICNEGATIVE=-123 +ARRAY2_7_NUMERICPOSITIVE=42 +ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY2_7_STRING=Array Element with Object in Array2 +BOOLEANFALSE=false +BOOLEANTRUE=true +FLOATNEGATIVE=-2.71828 +FLOATPOSITIVE=3.14159 +NULLVALUE=null +NUMERICNEGATIVE=-123 +NUMERICPOSITIVE=42 +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object +SCIENTIFICFLOATNEGATIVE=-2.5e-3 +SCIENTIFICINTPOSITIVE=1e5 +SCIENTIFICSMALLPOSITIVE=1e-4 +STRING=Hello, World! + +ARRAY2_0=1 +ARRAY2_1=-2.345 +ARRAY2_2=Array Element +ARRAY2_3=true +ARRAY2_4=false +ARRAY2_5=null +ARRAY2_6_BOOLEANFALSE=false +ARRAY2_6_BOOLEANTRUE=true +ARRAY2_6_FLOATNEGATIVE=-0.123 +ARRAY2_6_FLOATPOSITIVE=0.987 +ARRAY2_6_NULLVALUE=null +ARRAY2_6_NUMERICNEGATIVE=-456 +ARRAY2_6_NUMERICPOSITIVE=123 +ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4 +ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5 +ARRAY2_6_STRING=Nested Object in Array2 +ARRAY2_7_BOOLEANFALSE=false +ARRAY2_7_BOOLEANTRUE=true +ARRAY2_7_FLOATNEGATIVE=-2.71828 +ARRAY2_7_FLOATPOSITIVE=3.14159 +ARRAY2_7_NULLVALUE=null +ARRAY2_7_NUMERICNEGATIVE=-123 +ARRAY2_7_NUMERICPOSITIVE=42 +ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY2_7_STRING=Array Element with Object in Array2 +BOOLEANFALSE=false +BOOLEANTRUE=true +FLOATNEGATIVE=-2.71828 +FLOATPOSITIVE=3.14159 +NULLVALUE=null +NUMERICNEGATIVE=-123 +NUMERICPOSITIVE=42 +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object +SCIENTIFICFLOATNEGATIVE=-2.5e-3 +SCIENTIFICINTPOSITIVE=1e5 +SCIENTIFICSMALLPOSITIVE=1e-4 +STRING=Hello, World! + +ARRAY2_0=1 +ARRAY2_1=-2.345 +ARRAY2_2=Array Element +ARRAY2_3=true +ARRAY2_4=false +ARRAY2_5=null +ARRAY2_6_BOOLEANFALSE=false +ARRAY2_6_BOOLEANTRUE=true +ARRAY2_6_FLOATNEGATIVE=-0.123 +ARRAY2_6_FLOATPOSITIVE=0.987 +ARRAY2_6_NULLVALUE=null +ARRAY2_6_NUMERICNEGATIVE=-456 +ARRAY2_6_NUMERICPOSITIVE=123 +ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4 +ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5 +ARRAY2_6_STRING=Nested Object in Array2 +ARRAY2_7_BOOLEANFALSE=false +ARRAY2_7_BOOLEANTRUE=true +ARRAY2_7_FLOATNEGATIVE=-2.71828 +ARRAY2_7_FLOATPOSITIVE=3.14159 +ARRAY2_7_NULLVALUE=null +ARRAY2_7_NUMERICNEGATIVE=-123 +ARRAY2_7_NUMERICPOSITIVE=42 +ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY2_7_STRING=Array Element with Object in Array2 +BOOLEANFALSE=false +BOOLEANTRUE=true +FLOATNEGATIVE=-2.71828 +FLOATPOSITIVE=3.14159 +NULLVALUE=null +NUMERICNEGATIVE=-123 +NUMERICPOSITIVE=42 +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object +SCIENTIFICFLOATNEGATIVE=-2.5e-3 +SCIENTIFICINTPOSITIVE=1e5 +SCIENTIFICSMALLPOSITIVE=1e-4 +STRING=Hello, World! + diff --git a/collectors/log2journal/tests.d/json-include.output b/collectors/log2journal/tests.d/json-include.output new file mode 100644 index 00000000000000..326c58da22edb5 --- /dev/null +++ b/collectors/log2journal/tests.d/json-include.output @@ -0,0 +1,54 @@ +OBJECT_ARRAY_0=1 +OBJECT_ARRAY_1=-2 +OBJECT_ARRAY_2=3 +OBJECT_ARRAY_3=Nested Array +OBJECT_ARRAY_4=true +OBJECT_ARRAY_5=null +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object + +OBJECT_ARRAY_0=1 +OBJECT_ARRAY_1=-2 +OBJECT_ARRAY_2=3 +OBJECT_ARRAY_3=Nested Array +OBJECT_ARRAY_4=true +OBJECT_ARRAY_5=null +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object + +OBJECT_ARRAY_0=1 +OBJECT_ARRAY_1=-2 +OBJECT_ARRAY_2=3 +OBJECT_ARRAY_3=Nested Array +OBJECT_ARRAY_4=true +OBJECT_ARRAY_5=null +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object + diff --git a/collectors/log2journal/tests.d/json.log b/collectors/log2journal/tests.d/json.log new file mode 100644 index 00000000000000..3f133496050948 --- /dev/null +++ b/collectors/log2journal/tests.d/json.log @@ -0,0 +1,3 @@ +{ "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Hello, World!", "nullValue": null, "object": { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object", "nullValue": null, "array": [1, -2, 3, "Nested Array", true, null] }, "array": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 987, "numericNegative": -654, "string": "Nested Object in Array", "array": [null, false, true] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object", true, null] } ], "array2": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null]}]} +{ "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Hello, World!", "nullValue": null, "object": { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object", "nullValue": null, "array": [1, -2, 3, "Nested Array", true, null] }, "array": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 987, "numericNegative": -654, "string": "Nested Object in Array", "array": [null, false, true] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object", true, null] } ], "array2": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null]}]} +{ "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Hello, World!", "nullValue": null, "object": { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object", "nullValue": null, "array": [1, -2, 3, "Nested Array", true, null] }, "array": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 987, "numericNegative": -654, "string": "Nested Object in Array", "array": [null, false, true] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object", true, null] } ], "array2": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null]}]} diff --git a/collectors/log2journal/tests.d/json.output b/collectors/log2journal/tests.d/json.output new file mode 100644 index 00000000000000..83499cc55f428f --- /dev/null +++ b/collectors/log2journal/tests.d/json.output @@ -0,0 +1,294 @@ +ARRAY2_0=1 +ARRAY2_1=-2.345 +ARRAY2_2=Array Element +ARRAY2_3=true +ARRAY2_4=false +ARRAY2_5=null +ARRAY2_6_ARRAY_0=1 +ARRAY2_6_ARRAY_1=-2 +ARRAY2_6_ARRAY_2=3 +ARRAY2_6_ARRAY_3=Nested Array in Object2 +ARRAY2_6_ARRAY_4=true +ARRAY2_6_ARRAY_5=null +ARRAY2_6_BOOLEANFALSE=false +ARRAY2_6_BOOLEANTRUE=true +ARRAY2_6_FLOATNEGATIVE=-0.123 +ARRAY2_6_FLOATPOSITIVE=0.987 +ARRAY2_6_NULLVALUE=null +ARRAY2_6_NUMERICNEGATIVE=-456 +ARRAY2_6_NUMERICPOSITIVE=123 +ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4 +ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5 +ARRAY2_6_STRING=Nested Object in Array2 +ARRAY2_7_ARRAY_0=1 +ARRAY2_7_ARRAY_1=-2 +ARRAY2_7_ARRAY_2=3 +ARRAY2_7_ARRAY_3=Nested Array in Object2 +ARRAY2_7_ARRAY_4=true +ARRAY2_7_ARRAY_5=null +ARRAY2_7_BOOLEANFALSE=false +ARRAY2_7_BOOLEANTRUE=true +ARRAY2_7_FLOATNEGATIVE=-2.71828 +ARRAY2_7_FLOATPOSITIVE=3.14159 +ARRAY2_7_NULLVALUE=null +ARRAY2_7_NUMERICNEGATIVE=-123 +ARRAY2_7_NUMERICPOSITIVE=42 +ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY2_7_STRING=Array Element with Object in Array2 +ARRAY_0=1 +ARRAY_1=-2.345 +ARRAY_2=Array Element +ARRAY_3=true +ARRAY_4=false +ARRAY_5=null +ARRAY_6_ARRAY_0=null +ARRAY_6_ARRAY_1=false +ARRAY_6_ARRAY_2=true +ARRAY_6_NUMERICNEGATIVE=-654 +ARRAY_6_NUMERICPOSITIVE=987 +ARRAY_6_STRING=Nested Object in Array +ARRAY_7_ARRAY_0=1 +ARRAY_7_ARRAY_1=-2 +ARRAY_7_ARRAY_2=3 +ARRAY_7_ARRAY_3=Nested Array in Object +ARRAY_7_ARRAY_4=true +ARRAY_7_ARRAY_5=null +ARRAY_7_BOOLEANFALSE=false +ARRAY_7_BOOLEANTRUE=true +ARRAY_7_FLOATNEGATIVE=-2.71828 +ARRAY_7_FLOATPOSITIVE=3.14159 +ARRAY_7_NULLVALUE=null +ARRAY_7_NUMERICNEGATIVE=-123 +ARRAY_7_NUMERICPOSITIVE=42 +ARRAY_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY_7_STRING=Array Element with Object +BOOLEANFALSE=false +BOOLEANTRUE=true +FLOATNEGATIVE=-2.71828 +FLOATPOSITIVE=3.14159 +NULLVALUE=null +NUMERICNEGATIVE=-123 +NUMERICPOSITIVE=42 +OBJECT_ARRAY_0=1 +OBJECT_ARRAY_1=-2 +OBJECT_ARRAY_2=3 +OBJECT_ARRAY_3=Nested Array +OBJECT_ARRAY_4=true +OBJECT_ARRAY_5=null +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object +SCIENTIFICFLOATNEGATIVE=-2.5e-3 +SCIENTIFICINTPOSITIVE=1e5 +SCIENTIFICSMALLPOSITIVE=1e-4 +STRING=Hello, World! + +ARRAY2_0=1 +ARRAY2_1=-2.345 +ARRAY2_2=Array Element +ARRAY2_3=true +ARRAY2_4=false +ARRAY2_5=null +ARRAY2_6_ARRAY_0=1 +ARRAY2_6_ARRAY_1=-2 +ARRAY2_6_ARRAY_2=3 +ARRAY2_6_ARRAY_3=Nested Array in Object2 +ARRAY2_6_ARRAY_4=true +ARRAY2_6_ARRAY_5=null +ARRAY2_6_BOOLEANFALSE=false +ARRAY2_6_BOOLEANTRUE=true +ARRAY2_6_FLOATNEGATIVE=-0.123 +ARRAY2_6_FLOATPOSITIVE=0.987 +ARRAY2_6_NULLVALUE=null +ARRAY2_6_NUMERICNEGATIVE=-456 +ARRAY2_6_NUMERICPOSITIVE=123 +ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4 +ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5 +ARRAY2_6_STRING=Nested Object in Array2 +ARRAY2_7_ARRAY_0=1 +ARRAY2_7_ARRAY_1=-2 +ARRAY2_7_ARRAY_2=3 +ARRAY2_7_ARRAY_3=Nested Array in Object2 +ARRAY2_7_ARRAY_4=true +ARRAY2_7_ARRAY_5=null +ARRAY2_7_BOOLEANFALSE=false +ARRAY2_7_BOOLEANTRUE=true +ARRAY2_7_FLOATNEGATIVE=-2.71828 +ARRAY2_7_FLOATPOSITIVE=3.14159 +ARRAY2_7_NULLVALUE=null +ARRAY2_7_NUMERICNEGATIVE=-123 +ARRAY2_7_NUMERICPOSITIVE=42 +ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY2_7_STRING=Array Element with Object in Array2 +ARRAY_0=1 +ARRAY_1=-2.345 +ARRAY_2=Array Element +ARRAY_3=true +ARRAY_4=false +ARRAY_5=null +ARRAY_6_ARRAY_0=null +ARRAY_6_ARRAY_1=false +ARRAY_6_ARRAY_2=true +ARRAY_6_NUMERICNEGATIVE=-654 +ARRAY_6_NUMERICPOSITIVE=987 +ARRAY_6_STRING=Nested Object in Array +ARRAY_7_ARRAY_0=1 +ARRAY_7_ARRAY_1=-2 +ARRAY_7_ARRAY_2=3 +ARRAY_7_ARRAY_3=Nested Array in Object +ARRAY_7_ARRAY_4=true +ARRAY_7_ARRAY_5=null +ARRAY_7_BOOLEANFALSE=false +ARRAY_7_BOOLEANTRUE=true +ARRAY_7_FLOATNEGATIVE=-2.71828 +ARRAY_7_FLOATPOSITIVE=3.14159 +ARRAY_7_NULLVALUE=null +ARRAY_7_NUMERICNEGATIVE=-123 +ARRAY_7_NUMERICPOSITIVE=42 +ARRAY_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY_7_STRING=Array Element with Object +BOOLEANFALSE=false +BOOLEANTRUE=true +FLOATNEGATIVE=-2.71828 +FLOATPOSITIVE=3.14159 +NULLVALUE=null +NUMERICNEGATIVE=-123 +NUMERICPOSITIVE=42 +OBJECT_ARRAY_0=1 +OBJECT_ARRAY_1=-2 +OBJECT_ARRAY_2=3 +OBJECT_ARRAY_3=Nested Array +OBJECT_ARRAY_4=true +OBJECT_ARRAY_5=null +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object +SCIENTIFICFLOATNEGATIVE=-2.5e-3 +SCIENTIFICINTPOSITIVE=1e5 +SCIENTIFICSMALLPOSITIVE=1e-4 +STRING=Hello, World! + +ARRAY2_0=1 +ARRAY2_1=-2.345 +ARRAY2_2=Array Element +ARRAY2_3=true +ARRAY2_4=false +ARRAY2_5=null +ARRAY2_6_ARRAY_0=1 +ARRAY2_6_ARRAY_1=-2 +ARRAY2_6_ARRAY_2=3 +ARRAY2_6_ARRAY_3=Nested Array in Object2 +ARRAY2_6_ARRAY_4=true +ARRAY2_6_ARRAY_5=null +ARRAY2_6_BOOLEANFALSE=false +ARRAY2_6_BOOLEANTRUE=true +ARRAY2_6_FLOATNEGATIVE=-0.123 +ARRAY2_6_FLOATPOSITIVE=0.987 +ARRAY2_6_NULLVALUE=null +ARRAY2_6_NUMERICNEGATIVE=-456 +ARRAY2_6_NUMERICPOSITIVE=123 +ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4 +ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5 +ARRAY2_6_STRING=Nested Object in Array2 +ARRAY2_7_ARRAY_0=1 +ARRAY2_7_ARRAY_1=-2 +ARRAY2_7_ARRAY_2=3 +ARRAY2_7_ARRAY_3=Nested Array in Object2 +ARRAY2_7_ARRAY_4=true +ARRAY2_7_ARRAY_5=null +ARRAY2_7_BOOLEANFALSE=false +ARRAY2_7_BOOLEANTRUE=true +ARRAY2_7_FLOATNEGATIVE=-2.71828 +ARRAY2_7_FLOATPOSITIVE=3.14159 +ARRAY2_7_NULLVALUE=null +ARRAY2_7_NUMERICNEGATIVE=-123 +ARRAY2_7_NUMERICPOSITIVE=42 +ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY2_7_STRING=Array Element with Object in Array2 +ARRAY_0=1 +ARRAY_1=-2.345 +ARRAY_2=Array Element +ARRAY_3=true +ARRAY_4=false +ARRAY_5=null +ARRAY_6_ARRAY_0=null +ARRAY_6_ARRAY_1=false +ARRAY_6_ARRAY_2=true +ARRAY_6_NUMERICNEGATIVE=-654 +ARRAY_6_NUMERICPOSITIVE=987 +ARRAY_6_STRING=Nested Object in Array +ARRAY_7_ARRAY_0=1 +ARRAY_7_ARRAY_1=-2 +ARRAY_7_ARRAY_2=3 +ARRAY_7_ARRAY_3=Nested Array in Object +ARRAY_7_ARRAY_4=true +ARRAY_7_ARRAY_5=null +ARRAY_7_BOOLEANFALSE=false +ARRAY_7_BOOLEANTRUE=true +ARRAY_7_FLOATNEGATIVE=-2.71828 +ARRAY_7_FLOATPOSITIVE=3.14159 +ARRAY_7_NULLVALUE=null +ARRAY_7_NUMERICNEGATIVE=-123 +ARRAY_7_NUMERICPOSITIVE=42 +ARRAY_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3 +ARRAY_7_SCIENTIFICINTPOSITIVE=1e5 +ARRAY_7_SCIENTIFICSMALLPOSITIVE=1e-4 +ARRAY_7_STRING=Array Element with Object +BOOLEANFALSE=false +BOOLEANTRUE=true +FLOATNEGATIVE=-2.71828 +FLOATPOSITIVE=3.14159 +NULLVALUE=null +NUMERICNEGATIVE=-123 +NUMERICPOSITIVE=42 +OBJECT_ARRAY_0=1 +OBJECT_ARRAY_1=-2 +OBJECT_ARRAY_2=3 +OBJECT_ARRAY_3=Nested Array +OBJECT_ARRAY_4=true +OBJECT_ARRAY_5=null +OBJECT_BOOLEANFALSE=false +OBJECT_BOOLEANTRUE=true +OBJECT_FLOATNEGATIVE=-0.123 +OBJECT_FLOATPOSITIVE=0.987 +OBJECT_NULLVALUE=null +OBJECT_NUMERICNEGATIVE=-456 +OBJECT_NUMERICPOSITIVE=123 +OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2 +OBJECT_SCIENTIFICINTPOSITIVE=6e4 +OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5 +OBJECT_STRING=Nested Object +SCIENTIFICFLOATNEGATIVE=-2.5e-3 +SCIENTIFICINTPOSITIVE=1e5 +SCIENTIFICSMALLPOSITIVE=1e-4 +STRING=Hello, World! + diff --git a/collectors/log2journal/tests.d/logfmt.log b/collectors/log2journal/tests.d/logfmt.log new file mode 100644 index 00000000000000..e55a83bbbbb37c --- /dev/null +++ b/collectors/log2journal/tests.d/logfmt.log @@ -0,0 +1,5 @@ +key1=value01 key2=value02 key3=value03 key4=value04 +key1=value11 key2=value12 key3=value13 key4= +key1=value21 key2=value22 key3=value23 key4=value24 +key1=value31 key2=value32 key3=value33 key4= +key1=value41 key2=value42 key3=value43 key4=value44 diff --git a/collectors/log2journal/tests.d/logfmt.output b/collectors/log2journal/tests.d/logfmt.output new file mode 100644 index 00000000000000..4291c966507eea --- /dev/null +++ b/collectors/log2journal/tests.d/logfmt.output @@ -0,0 +1,37 @@ +INJECTED=Key INJECTED had value 'value01 - value02' and now has this, but only on the first row of the log. +KEY1=value01 +KEY2=value02 +KEY3=value03 +KEY4=value04 +SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value02' +YET_ANOTHER_INJECTION=value01 - value02 - Key INJECTED had value 'value01 - value02' and now has this, but only on the first row of the log. - this should work because inject is yes + +INJECTED=value11 - value12 +KEY1=value11 +KEY2=value12 +KEY3=value13 +SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value12' +YET_ANOTHER_INJECTION=value11 - value12 - value11 - value12 - this should work because inject is yes + +INJECTED=KEY4 has the value 'value24'; it is not empty, so INJECTED has been rewritten. +KEY1=value21 +KEY2=value22 +KEY3=value23 +KEY4=value24 +SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value22' +YET_ANOTHER_INJECTION=value21 - value22 - KEY4 has the value 'value24'; it is not empty, so INJECTED has been rewritten. - this should work because inject is yes + +INJECTED=value31 - value32 +KEY1=value31 +KEY2=value32 +KEY3=value33 +YET_ANOTHER_INJECTION=value31 - value32 - value31 - value32 - this should work because inject is yes + +INJECTED=KEY4 has the value 'value44'; it is not empty, so INJECTED has been rewritten. +KEY1=value41 +KEY2=value42 +KEY3=value43 +KEY4=value44 +SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value42' +YET_ANOTHER_INJECTION=value41 - value42 - KEY4 has the value 'value44'; it is not empty, so INJECTED has been rewritten. - this should work because inject is yes + diff --git a/collectors/log2journal/tests.d/logfmt.yaml b/collectors/log2journal/tests.d/logfmt.yaml new file mode 100644 index 00000000000000..91e93a71ecf9f4 --- /dev/null +++ b/collectors/log2journal/tests.d/logfmt.yaml @@ -0,0 +1,34 @@ +pattern: logfmt + +inject: + - key: SIMPLE_INJECTION + value: "An unset variable looks like '${this}', while the value of KEY2 is '${KEY2}'" + +rewrite: + - key: INJECTED + value: "${KEY1} - ${KEY2}" + inject: yes + stop: no + + - key: INJECTED + match: '^value01' + value: "Key INJECTED had value '${INJECTED}' and now has this, but only on the first row of the log." + + - key: INJECTED + not_empty: "${KEY4}" + value: "KEY4 has the value '${KEY4}'; it is not empty, so INJECTED has been rewritten." + + - key: INJECTED + match: '^KEY4 has the value' + value: "This value should not appear in the logs, because the previous one matched and stopped the pipeline." + + - key: ANOTHER_INJECTION + value: "${KEY1} - ${KEY2} - ${INJECTED} - should not work because inject is not true amd ANOTHER_INJECTION is not in the log file." + + - key: YET_ANOTHER_INJECTION + value: "${KEY1} - ${KEY2} - ${INJECTED} - this should work because inject is yes" + inject: yes + + - key: SIMPLE_INJECTION + match: "KEY2 is 'value32'" + value: "" # empty, so SIMPLE_INJECTION should not be available on row 3 diff --git a/collectors/log2journal/tests.d/nginx-combined.log b/collectors/log2journal/tests.d/nginx-combined.log new file mode 100644 index 00000000000000..b0faa81e906bc2 --- /dev/null +++ b/collectors/log2journal/tests.d/nginx-combined.log @@ -0,0 +1,14 @@ +2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "GET /api/v1/data?chart=system.net&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775349 HTTP/1.1" 200 4844 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "OPTIONS /api/v1/data?chart=netdata.clients&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "OPTIONS /api/v1/data?chart=netdata.requests&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +127.0.0.1 - - [30/Nov/2023:19:35:28 +0000] "GET /stub_status HTTP/1.1" 200 120 "-" "Go-http-client/1.1" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1" 200 1918 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.requests&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1" 200 1632 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.clients&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1" 200 588 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "OPTIONS /api/v1/data?chart=system.cpu&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=system.cpu&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1" 200 6085 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1" 200 1918 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "OPTIONS /api/v1/data?chart=system.io&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" +2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=system.io&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1" 200 3503 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36" diff --git a/collectors/log2journal/tests.d/nginx-combined.output b/collectors/log2journal/tests.d/nginx-combined.output new file mode 100644 index 00000000000000..07fd110144df0c --- /dev/null +++ b/collectors/log2journal/tests.d/nginx-combined.output @@ -0,0 +1,210 @@ +MESSAGE=GET /api/v1/data?chart=system.net&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775349 HTTP/1.1 +NGINX_BODY_BYTES_SENT=4844 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/api/v1/data?chart=system.net&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775349 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=OPTIONS /api/v1/data?chart=netdata.clients&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1 +NGINX_BODY_BYTES_SENT=29 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=OPTIONS +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.clients&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1 +NGINX_BODY_BYTES_SENT=29 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=OPTIONS +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=OPTIONS /api/v1/data?chart=netdata.requests&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1 +NGINX_BODY_BYTES_SENT=29 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=OPTIONS +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.requests&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /stub_status HTTP/1.1 +NGINX_BODY_BYTES_SENT=120 +NGINX_HTTP_REFERER=- +NGINX_HTTP_USER_AGENT=Go-http-client/1.1 +NGINX_REMOTE_ADDR=127.0.0.1 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/stub_status +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1 +NGINX_BODY_BYTES_SENT=1918 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /api/v1/data?chart=netdata.requests&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1 +NGINX_BODY_BYTES_SENT=1632 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.requests&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /api/v1/data?chart=netdata.clients&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1 +NGINX_BODY_BYTES_SENT=588 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.clients&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=OPTIONS /api/v1/data?chart=system.cpu&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1 +NGINX_BODY_BYTES_SENT=29 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=OPTIONS +NGINX_REQUEST_URI=/api/v1/data?chart=system.cpu&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1 +NGINX_BODY_BYTES_SENT=29 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=OPTIONS +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /api/v1/data?chart=system.cpu&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1 +NGINX_BODY_BYTES_SENT=6085 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/api/v1/data?chart=system.cpu&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1 +NGINX_BODY_BYTES_SENT=1918 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average>ime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=OPTIONS /api/v1/data?chart=system.io&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1 +NGINX_BODY_BYTES_SENT=29 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=OPTIONS +NGINX_REQUEST_URI=/api/v1/data?chart=system.io&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /api/v1/data?chart=system.io&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1 +NGINX_BODY_BYTES_SENT=3503 +NGINX_HTTP_REFERER=http://192.168.69.5:19999/ +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36 +NGINX_REMOTE_ADDR=2a02:169:1210::2000 +NGINX_REMOTE_USER=- +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_URI=/api/v1/data?chart=system.io&format=json&points=267&group=average>ime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + diff --git a/collectors/log2journal/tests.d/nginx-json.log b/collectors/log2journal/tests.d/nginx-json.log new file mode 100644 index 00000000000000..7e2b5d5f59ea02 --- /dev/null +++ b/collectors/log2journal/tests.d/nginx-json.log @@ -0,0 +1,9 @@ +{"msec":"1644997905.123","connection":12345,"connection_requests":5,"pid":9876,"request_id":"8f3ebc1e38fbb92f","request_length":345,"remote_addr":"192.168.1.100","remote_user":"john_doe","remote_port":54321,"time_local":"19/Feb/2023:14:15:05 +0000","request":"GET /index.html HTTP/1.1","request_uri":"/index.html?param=value","args":"param=value","status":200,"body_bytes_sent":5432,"bytes_sent":6543,"http_referer":"https://example.com","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"192.168.1.50, 10.0.0.1","host":"example.com","request_time":0.123,"upstream":"10.0.0.2:8080","upstream_connect_time":0.045,"upstream_header_time":0.020,"upstream_response_time":0.058,"upstream_response_length":7890,"upstream_cache_status":"MISS","ssl_protocol":"TLSv1.2","ssl_cipher":"AES256-SHA256","scheme":"https","request_method":"GET","server_protocol":"HTTP/1.1","pipe":".","gzip_ratio":"2.1","http_cf_ray":"abc123def456","geoip_country_code":"US"} +{"msec":"1644997910.789","connection":54321,"connection_requests":10,"pid":5432,"request_id":"4a7bca5e19d3f8e7","request_length":432,"remote_addr":"10.0.0.3","remote_user":"","remote_port":12345,"time_local":"19/Feb/2023:14:15:10 +0000","request":"POST /api/update HTTP/1.1","request_uri":"/api/update","args":"","status":204,"body_bytes_sent":0,"bytes_sent":123,"http_referer":"","http_user_agent":"curl/7.68.0","http_x_forwarded_for":"","host":"api.example.com","request_time":0.032,"upstream":"backend-server-1:8080","upstream_connect_time":0.012,"upstream_header_time":0.020,"upstream_response_time":0.010,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"http","request_method":"POST","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""} +{"msec":"1644997920.456","connection":98765,"connection_requests":15,"pid":1234,"request_id":"63f8ad2c3e1b4090","request_length":567,"remote_addr":"2001:0db8:85a3:0000:0000:8a2e:0370:7334","remote_user":"alice","remote_port":6789,"time_local":"19/Feb/2023:14:15:20 +0000","request":"GET /page?param1=value1¶m2=value2 HTTP/2.0","request_uri":"/page?param1=value1¶m2=value2","args":"param1=value1¶m2=value2","status":404,"body_bytes_sent":0,"bytes_sent":0,"http_referer":"","http_user_agent":"Mozilla/5.0 (Linux; Android 10; Pixel 3)","http_x_forwarded_for":"","host":"example.org","request_time":0.045,"upstream":"","upstream_connect_time":0.0,"upstream_header_time":0.0,"upstream_response_time":0.0,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"https","request_method":"GET","server_protocol":"HTTP/2.0","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":"GB"} +{"msec":"1644997930.987","connection":123,"connection_requests":3,"pid":5678,"request_id":"9e632a5b24c18f76","request_length":234,"remote_addr":"192.168.0.1","remote_user":"jane_doe","remote_port":9876,"time_local":"19/Feb/2023:14:15:30 +0000","request":"PUT /api/update HTTP/1.1","request_uri":"/api/update","args":"","status":500,"body_bytes_sent":543,"bytes_sent":876,"http_referer":"https://example.com/page","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"","host":"api.example.com","request_time":0.123,"upstream":"backend-server-2:8080","upstream_connect_time":0.045,"upstream_header_time":0.020,"upstream_response_time":0.058,"upstream_response_length":7890,"upstream_cache_status":"HIT","ssl_protocol":"TLSv1.2","ssl_cipher":"AES256-SHA256","scheme":"https","request_method":"PUT","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"1.8","http_cf_ray":"xyz789abc123","geoip_country_code":"CA"} +{"msec":"1644997940.234","connection":9876,"connection_requests":8,"pid":4321,"request_id":"1b6c59c8aef7d24a","request_length":456,"remote_addr":"203.0.113.1","remote_user":"","remote_port":5432,"time_local":"19/Feb/2023:14:15:40 +0000","request":"DELETE /api/resource HTTP/2.0","request_uri":"/api/resource","args":"","status":204,"body_bytes_sent":0,"bytes_sent":123,"http_referer":"","http_user_agent":"curl/7.68.0","http_x_forwarded_for":"","host":"api.example.com","request_time":0.032,"upstream":"backend-server-1:8080","upstream_connect_time":0.012,"upstream_header_time":0.020,"upstream_response_time":0.010,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"http","request_method":"DELETE","server_protocol":"HTTP/2.0","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""} +{"msec":"1644997950.789","connection":5432,"connection_requests":12,"pid":6543,"request_id":"72692d781d0b8a4f","request_length":789,"remote_addr":"198.51.100.2","remote_user":"bob","remote_port":8765,"time_local":"19/Feb/2023:14:15:50 +0000","request":"GET /profile?user=bob HTTP/1.1","request_uri":"/profile?user=bob","args":"user=bob","status":200,"body_bytes_sent":1234,"bytes_sent":2345,"http_referer":"","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"","host":"example.com","request_time":0.065,"upstream":"10.0.0.2:8080","upstream_connect_time":0.045,"upstream_header_time":0.020,"upstream_response_time":0.058,"upstream_response_length":7890,"upstream_cache_status":"MISS","ssl_protocol":"TLSv1.3","ssl_cipher":"AES128-GCM-SHA256","scheme":"https","request_method":"GET","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"","http_cf_ray":"","geoip_country_code":"US"} +{"msec":"1644997960.321","connection":65432,"connection_requests":7,"pid":7890,"request_id":"c3e158d41e75a9d7","request_length":321,"remote_addr":"203.0.113.2","remote_user":"","remote_port":9876,"time_local":"19/Feb/2023:14:15:60 +0000","request":"GET /dashboard HTTP/2.0","request_uri":"/dashboard","args":"","status":301,"body_bytes_sent":0,"bytes_sent":123,"http_referer":"","http_user_agent":"Mozilla/5.0 (Linux; Android 10; Pixel 3)","http_x_forwarded_for":"","host":"dashboard.example.org","request_time":0.032,"upstream":"","upstream_connect_time":0.0,"upstream_header_time":0.0,"upstream_response_time":0.0,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"https","request_method":"GET","server_protocol":"HTTP/2.0","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""} +{"msec":"1644997970.555","connection":8765,"connection_requests":9,"pid":8765,"request_id":"f9f6e8235de54af4","request_length":654,"remote_addr":"10.0.0.4","remote_user":"","remote_port":12345,"time_local":"19/Feb/2023:14:15:70 +0000","request":"POST /submit-form HTTP/1.1","request_uri":"/submit-form","args":"","status":201,"body_bytes_sent":876,"bytes_sent":987,"http_referer":"","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"","host":"example.com","request_time":0.045,"upstream":"backend-server-3:8080","upstream_connect_time":0.012,"upstream_header_time":0.020,"upstream_response_time":0.010,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"http","request_method":"POST","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""} +{"msec":"1644997980.987","connection":23456,"connection_requests":6,"pid":3456,"request_id":"2ec3e8859e7a406c","request_length":432,"remote_addr":"198.51.100.3","remote_user":"mary","remote_port":5678,"time_local":"19/Feb/2023:14:15:80 +0000","request":"GET /contact HTTP/1.1","request_uri":"/contact","args":"","status":404,"body_bytes_sent":0,"bytes_sent":0,"http_referer":"","http_user_agent":"Mozilla/5.0 (Linux; Android 10; Pixel 3)","http_x_forwarded_for":"","host":"example.org","request_time":0.032,"upstream":"","upstream_connect_time":0.0,"upstream_header_time":0.0,"upstream_response_time":0.0,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"https","request_method":"GET","server_protocol":"HTTP/1.1","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":"FR"} diff --git a/collectors/log2journal/tests.d/nginx-json.output b/collectors/log2journal/tests.d/nginx-json.output new file mode 100644 index 00000000000000..e7db9dcbde8cfc --- /dev/null +++ b/collectors/log2journal/tests.d/nginx-json.output @@ -0,0 +1,296 @@ +MESSAGE=GET /index.html HTTP/1.1 +NGINX_BODY_BYTES_SENT=5432 +NGINX_BYTES_SENT=6543 +NGINX_CONNECTION=12345 +NGINX_CONNECTION_REQUESTS=5 +NGINX_GEOIP_COUNTRY_CODE=US +NGINX_GZIP_RATIO=2.1 +NGINX_HOST=example.com +NGINX_HTTP_CF_RAY=abc123def456 +NGINX_HTTP_REFERER=https://example.com +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64) +NGINX_HTTP_X_FORWARDED_FOR=192.168.1.50, 10.0.0.1 +NGINX_PID=9876 +NGINX_PIPELINED=no +NGINX_QUERY_STRING=param=value +NGINX_REMOTE_ADDR=192.168.1.100 +NGINX_REMOTE_PORT=54321 +NGINX_REMOTE_USER=john_doe +NGINX_REQUEST_ID=8f3ebc1e38fbb92f +NGINX_REQUEST_LENGTH=345 +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_TIME=0.123 +NGINX_REQUEST_URI=/index.html?param=value +NGINX_SCHEME=https +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_SSL_CIPHER=AES256-SHA256 +NGINX_SSL_PROTOCOL=TLSv1.2 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIMESTAMP_SEC=1644997905.123 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:05 +0000 +NGINX_UPSTREAM=10.0.0.2:8080 +NGINX_UPSTREAM_CACHE_STATUS=MISS +NGINX_UPSTREAM_CONNECT_TIME=0.045 +NGINX_UPSTREAM_HEADER_TIME=0.020 +NGINX_UPSTREAM_RESPONSE_LENGTH=7890 +NGINX_UPSTREAM_RESPONSE_TIME=0.058 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=POST /api/update HTTP/1.1 +NGINX_BODY_BYTES_SENT=0 +NGINX_BYTES_SENT=123 +NGINX_CONNECTION=54321 +NGINX_CONNECTION_REQUESTS=10 +NGINX_HOST=api.example.com +NGINX_HTTP_USER_AGENT=curl/7.68.0 +NGINX_PID=5432 +NGINX_PIPELINED=yes +NGINX_REMOTE_ADDR=10.0.0.3 +NGINX_REMOTE_PORT=12345 +NGINX_REQUEST_ID=4a7bca5e19d3f8e7 +NGINX_REQUEST_LENGTH=432 +NGINX_REQUEST_METHOD=POST +NGINX_REQUEST_TIME=0.032 +NGINX_REQUEST_URI=/api/update +NGINX_SCHEME=http +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=204 +NGINX_STATUS_FAMILY=2xx +NGINX_TIMESTAMP_SEC=1644997910.789 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:10 +0000 +NGINX_UPSTREAM=backend-server-1:8080 +NGINX_UPSTREAM_CONNECT_TIME=0.012 +NGINX_UPSTREAM_HEADER_TIME=0.020 +NGINX_UPSTREAM_RESPONSE_LENGTH=0 +NGINX_UPSTREAM_RESPONSE_TIME=0.010 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /page?param1=value1¶m2=value2 HTTP/2.0 +NGINX_BODY_BYTES_SENT=0 +NGINX_BYTES_SENT=0 +NGINX_CONNECTION=98765 +NGINX_CONNECTION_REQUESTS=15 +NGINX_GEOIP_COUNTRY_CODE=GB +NGINX_HOST=example.org +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Linux; Android 10; Pixel 3) +NGINX_PID=1234 +NGINX_PIPELINED=no +NGINX_QUERY_STRING=param1=value1¶m2=value2 +NGINX_REMOTE_ADDR=2001:0db8:85a3:0000:0000:8a2e:0370:7334 +NGINX_REMOTE_PORT=6789 +NGINX_REMOTE_USER=alice +NGINX_REQUEST_ID=63f8ad2c3e1b4090 +NGINX_REQUEST_LENGTH=567 +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_TIME=0.045 +NGINX_REQUEST_URI=/page?param1=value1¶m2=value2 +NGINX_SCHEME=https +NGINX_SERVER_PROTOCOL=HTTP/2.0 +NGINX_STATUS=404 +NGINX_STATUS_FAMILY=4xx +NGINX_TIMESTAMP_SEC=1644997920.456 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:20 +0000 +NGINX_UPSTREAM_CONNECT_TIME=0.0 +NGINX_UPSTREAM_HEADER_TIME=0.0 +NGINX_UPSTREAM_RESPONSE_LENGTH=0 +NGINX_UPSTREAM_RESPONSE_TIME=0.0 +PRIORITY=5 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=PUT /api/update HTTP/1.1 +NGINX_BODY_BYTES_SENT=543 +NGINX_BYTES_SENT=876 +NGINX_CONNECTION=123 +NGINX_CONNECTION_REQUESTS=3 +NGINX_GEOIP_COUNTRY_CODE=CA +NGINX_GZIP_RATIO=1.8 +NGINX_HOST=api.example.com +NGINX_HTTP_CF_RAY=xyz789abc123 +NGINX_HTTP_REFERER=https://example.com/page +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64) +NGINX_PID=5678 +NGINX_PIPELINED=yes +NGINX_REMOTE_ADDR=192.168.0.1 +NGINX_REMOTE_PORT=9876 +NGINX_REMOTE_USER=jane_doe +NGINX_REQUEST_ID=9e632a5b24c18f76 +NGINX_REQUEST_LENGTH=234 +NGINX_REQUEST_METHOD=PUT +NGINX_REQUEST_TIME=0.123 +NGINX_REQUEST_URI=/api/update +NGINX_SCHEME=https +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_SSL_CIPHER=AES256-SHA256 +NGINX_SSL_PROTOCOL=TLSv1.2 +NGINX_STATUS=500 +NGINX_STATUS_FAMILY=5xx +NGINX_TIMESTAMP_SEC=1644997930.987 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:30 +0000 +NGINX_UPSTREAM=backend-server-2:8080 +NGINX_UPSTREAM_CACHE_STATUS=HIT +NGINX_UPSTREAM_CONNECT_TIME=0.045 +NGINX_UPSTREAM_HEADER_TIME=0.020 +NGINX_UPSTREAM_RESPONSE_LENGTH=7890 +NGINX_UPSTREAM_RESPONSE_TIME=0.058 +PRIORITY=3 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=DELETE /api/resource HTTP/2.0 +NGINX_BODY_BYTES_SENT=0 +NGINX_BYTES_SENT=123 +NGINX_CONNECTION=9876 +NGINX_CONNECTION_REQUESTS=8 +NGINX_HOST=api.example.com +NGINX_HTTP_USER_AGENT=curl/7.68.0 +NGINX_PID=4321 +NGINX_PIPELINED=no +NGINX_REMOTE_ADDR=203.0.113.1 +NGINX_REMOTE_PORT=5432 +NGINX_REQUEST_ID=1b6c59c8aef7d24a +NGINX_REQUEST_LENGTH=456 +NGINX_REQUEST_METHOD=DELETE +NGINX_REQUEST_TIME=0.032 +NGINX_REQUEST_URI=/api/resource +NGINX_SCHEME=http +NGINX_SERVER_PROTOCOL=HTTP/2.0 +NGINX_STATUS=204 +NGINX_STATUS_FAMILY=2xx +NGINX_TIMESTAMP_SEC=1644997940.234 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:40 +0000 +NGINX_UPSTREAM=backend-server-1:8080 +NGINX_UPSTREAM_CONNECT_TIME=0.012 +NGINX_UPSTREAM_HEADER_TIME=0.020 +NGINX_UPSTREAM_RESPONSE_LENGTH=0 +NGINX_UPSTREAM_RESPONSE_TIME=0.010 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /profile?user=bob HTTP/1.1 +NGINX_BODY_BYTES_SENT=1234 +NGINX_BYTES_SENT=2345 +NGINX_CONNECTION=5432 +NGINX_CONNECTION_REQUESTS=12 +NGINX_GEOIP_COUNTRY_CODE=US +NGINX_HOST=example.com +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64) +NGINX_PID=6543 +NGINX_PIPELINED=yes +NGINX_QUERY_STRING=user=bob +NGINX_REMOTE_ADDR=198.51.100.2 +NGINX_REMOTE_PORT=8765 +NGINX_REMOTE_USER=bob +NGINX_REQUEST_ID=72692d781d0b8a4f +NGINX_REQUEST_LENGTH=789 +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_TIME=0.065 +NGINX_REQUEST_URI=/profile?user=bob +NGINX_SCHEME=https +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_SSL_CIPHER=AES128-GCM-SHA256 +NGINX_SSL_PROTOCOL=TLSv1.3 +NGINX_STATUS=200 +NGINX_STATUS_FAMILY=2xx +NGINX_TIMESTAMP_SEC=1644997950.789 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:50 +0000 +NGINX_UPSTREAM=10.0.0.2:8080 +NGINX_UPSTREAM_CACHE_STATUS=MISS +NGINX_UPSTREAM_CONNECT_TIME=0.045 +NGINX_UPSTREAM_HEADER_TIME=0.020 +NGINX_UPSTREAM_RESPONSE_LENGTH=7890 +NGINX_UPSTREAM_RESPONSE_TIME=0.058 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /dashboard HTTP/2.0 +NGINX_BODY_BYTES_SENT=0 +NGINX_BYTES_SENT=123 +NGINX_CONNECTION=65432 +NGINX_CONNECTION_REQUESTS=7 +NGINX_HOST=dashboard.example.org +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Linux; Android 10; Pixel 3) +NGINX_PID=7890 +NGINX_PIPELINED=no +NGINX_REMOTE_ADDR=203.0.113.2 +NGINX_REMOTE_PORT=9876 +NGINX_REQUEST_ID=c3e158d41e75a9d7 +NGINX_REQUEST_LENGTH=321 +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_TIME=0.032 +NGINX_REQUEST_URI=/dashboard +NGINX_SCHEME=https +NGINX_SERVER_PROTOCOL=HTTP/2.0 +NGINX_STATUS=301 +NGINX_STATUS_FAMILY=3xx +NGINX_TIMESTAMP_SEC=1644997960.321 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:60 +0000 +NGINX_UPSTREAM_CONNECT_TIME=0.0 +NGINX_UPSTREAM_HEADER_TIME=0.0 +NGINX_UPSTREAM_RESPONSE_LENGTH=0 +NGINX_UPSTREAM_RESPONSE_TIME=0.0 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=POST /submit-form HTTP/1.1 +NGINX_BODY_BYTES_SENT=876 +NGINX_BYTES_SENT=987 +NGINX_CONNECTION=8765 +NGINX_CONNECTION_REQUESTS=9 +NGINX_HOST=example.com +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64) +NGINX_PID=8765 +NGINX_PIPELINED=yes +NGINX_REMOTE_ADDR=10.0.0.4 +NGINX_REMOTE_PORT=12345 +NGINX_REQUEST_ID=f9f6e8235de54af4 +NGINX_REQUEST_LENGTH=654 +NGINX_REQUEST_METHOD=POST +NGINX_REQUEST_TIME=0.045 +NGINX_REQUEST_URI=/submit-form +NGINX_SCHEME=http +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=201 +NGINX_STATUS_FAMILY=2xx +NGINX_TIMESTAMP_SEC=1644997970.555 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:70 +0000 +NGINX_UPSTREAM=backend-server-3:8080 +NGINX_UPSTREAM_CONNECT_TIME=0.012 +NGINX_UPSTREAM_HEADER_TIME=0.020 +NGINX_UPSTREAM_RESPONSE_LENGTH=0 +NGINX_UPSTREAM_RESPONSE_TIME=0.010 +PRIORITY=6 +SYSLOG_IDENTIFIER=nginx-log + +MESSAGE=GET /contact HTTP/1.1 +NGINX_BODY_BYTES_SENT=0 +NGINX_BYTES_SENT=0 +NGINX_CONNECTION=23456 +NGINX_CONNECTION_REQUESTS=6 +NGINX_GEOIP_COUNTRY_CODE=FR +NGINX_HOST=example.org +NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Linux; Android 10; Pixel 3) +NGINX_PID=3456 +NGINX_PIPELINED=no +NGINX_REMOTE_ADDR=198.51.100.3 +NGINX_REMOTE_PORT=5678 +NGINX_REMOTE_USER=mary +NGINX_REQUEST_ID=2ec3e8859e7a406c +NGINX_REQUEST_LENGTH=432 +NGINX_REQUEST_METHOD=GET +NGINX_REQUEST_TIME=0.032 +NGINX_REQUEST_URI=/contact +NGINX_SCHEME=https +NGINX_SERVER_PROTOCOL=HTTP/1.1 +NGINX_STATUS=404 +NGINX_STATUS_FAMILY=4xx +NGINX_TIMESTAMP_SEC=1644997980.987 +NGINX_TIME_LOCAL=19/Feb/2023:14:15:80 +0000 +NGINX_UPSTREAM_CONNECT_TIME=0.0 +NGINX_UPSTREAM_HEADER_TIME=0.0 +NGINX_UPSTREAM_RESPONSE_LENGTH=0 +NGINX_UPSTREAM_RESPONSE_TIME=0.0 +PRIORITY=5 +SYSLOG_IDENTIFIER=nginx-log + diff --git a/collectors/log2journal/tests.sh b/collectors/log2journal/tests.sh new file mode 100755 index 00000000000000..40243886691769 --- /dev/null +++ b/collectors/log2journal/tests.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +if [ -f "${PWD}/log2journal" ]; then + log2journal_bin="${PWD}/log2journal" +else + log2journal_bin="$(which log2journal)" +fi + +[ -z "${log2journal_bin}" ] && echo >&2 "Cannot find log2journal binary" && exit 1 +echo >&2 "Using: ${log2journal_bin}" + +script_dir=$(dirname "$(readlink -f "$0")") +tests="${script_dir}/tests.d" + +if [ ! -d "${tests}" ]; then + echo >&2 "tests directory '${tests}' is not found." + exit 1 +fi + +# Create a random directory name in /tmp +tmp=$(mktemp -d /tmp/script_temp.XXXXXXXXXX) + +# Function to clean up the temporary directory on exit +cleanup() { + echo "Cleaning up..." + rm -rf "$tmp" +} + +# Register the cleanup function to run on script exit +trap cleanup EXIT + +# Change to the temporary directory +cd "$tmp" || exit 1 + +# ----------------------------------------------------------------------------- + +test_log2journal_config() { + local in="${1}" + local out="${2}" + shift 2 + + [ -f output ] && rm output + + printf >&2 "running: " + printf >&2 "%q " "${log2journal_bin}" "${@}" + printf >&2 "\n" + + "${log2journal_bin}" <"${in}" "${@}" >output 2>&1 + ret=$? + + [ $ret -ne 0 ] && echo >&2 "${log2journal_bin} exited with code: $ret" && cat output && exit 1 + + diff --ignore-all-space "${out}" output + [ $? -ne -0 ] && echo >&2 "${log2journal_bin} output does not match!" && exit 1 + + echo >&2 "OK" + echo >&2 + + return 0 +} + +# test yaml parsing +echo >&2 +echo >&2 "Testing full yaml config parsing..." +test_log2journal_config /dev/null "${tests}/full.output" -f "${tests}/full.yaml" --show-config || exit 1 + +echo >&2 "Testing command line parsing..." +test_log2journal_config /dev/null "${tests}/full.output" --show-config \ + --prefix=NGINX_ \ + --filename-key NGINX_LOG_FILENAME \ + --inject SYSLOG_IDENTIFIER=nginx-log \ + --inject=SYSLOG_IDENTIFIER2=nginx-log2 \ + --inject 'PRIORITY=${NGINX_STATUS}' \ + --inject='NGINX_STATUS_FAMILY=${NGINX_STATUS}${NGINX_METHOD}' \ + --rewrite 'PRIORITY=//${NGINX_STATUS}/inject,dont-stop' \ + --rewrite "PRIORITY=/^[123]/6" \ + --rewrite='PRIORITY=|^4|5' \ + '--rewrite=PRIORITY=-^5-3' \ + --rewrite "PRIORITY=;.*;4" \ + --rewrite 'NGINX_STATUS_FAMILY=|^(?[1-5])|${first_digit}xx' \ + --rewrite 'NGINX_STATUS_FAMILY=|.*|UNKNOWN' \ + --rename TEST1=TEST2 \ + --rename=TEST3=TEST4 \ + --unmatched-key MESSAGE \ + --inject-unmatched PRIORITY=1 \ + --inject-unmatched=PRIORITY2=2 \ + --include=".*" \ + --exclude ".*HELLO.*WORLD.*" \ + '(?x) # Enable PCRE2 extended mode + ^ + (?[^ ]+) \s - \s # NGINX_REMOTE_ADDR + (?[^ ]+) \s # NGINX_REMOTE_USER + \[ + (?[^\]]+) # NGINX_TIME_LOCAL + \] + \s+ " + (? + (?[A-Z]+) \s+ # NGINX_METHOD + (?[^ ]+) \s+ + HTTP/(?[^"]+) + ) + " \s+ + (?\d+) \s+ # NGINX_STATUS + (?\d+) \s+ # NGINX_BODY_BYTES_SENT + "(?[^"]*)" \s+ # NGINX_HTTP_REFERER + "(?[^"]*)" # NGINX_HTTP_USER_AGENT' \ + || exit 1 + +# ----------------------------------------------------------------------------- + +test_log2journal() { + local n="${1}" + local in="${2}" + local out="${3}" + shift 3 + + printf >&2 "running test No ${n}: " + printf >&2 "%q " "${log2journal_bin}" "${@}" + printf >&2 "\n" + echo >&2 "using as input : ${in}" + echo >&2 "expecting output: ${out}" + + [ -f output ] && rm output + + "${log2journal_bin}" <"${in}" "${@}" >output 2>&1 + ret=$? + + [ $ret -ne 0 ] && echo >&2 "${log2journal_bin} exited with code: $ret" && cat output && exit 1 + + diff "${out}" output + [ $? -ne -0 ] && echo >&2 "${log2journal_bin} output does not match! - here is what we got:" && cat output && exit 1 + + echo >&2 "OK" + echo >&2 + + return 0 +} + +echo >&2 +echo >&2 "Testing parsing and output..." + +test_log2journal 1 "${tests}/json.log" "${tests}/json.output" json +test_log2journal 2 "${tests}/json.log" "${tests}/json-include.output" json --include "OBJECT" +test_log2journal 3 "${tests}/json.log" "${tests}/json-exclude.output" json --exclude "ARRAY[^2]" +test_log2journal 4 "${tests}/nginx-json.log" "${tests}/nginx-json.output" -f "${script_dir}/log2journal.d/nginx-json.yaml" +test_log2journal 5 "${tests}/nginx-combined.log" "${tests}/nginx-combined.output" -f "${script_dir}/log2journal.d/nginx-combined.yaml" +test_log2journal 6 "${tests}/logfmt.log" "${tests}/logfmt.output" -f "${tests}/logfmt.yaml" +test_log2journal 7 "${tests}/logfmt.log" "${tests}/default.output" -f "${script_dir}/log2journal.d/default.yaml" diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md deleted file mode 100644 index 509e22edce4753..00000000000000 --- a/collectors/macos.plugin/README.md +++ /dev/null @@ -1,16 +0,0 @@ - - -# macos.plugin - -Collects resource usage and performance data on macOS systems - -By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. - - diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md new file mode 120000 index 00000000000000..2ea6842e42c77c --- /dev/null +++ b/collectors/macos.plugin/README.md @@ -0,0 +1 @@ +integrations/macos.md \ No newline at end of file diff --git a/collectors/macos.plugin/integrations/macos.md b/collectors/macos.plugin/integrations/macos.md new file mode 100644 index 00000000000000..5128a5a77c2f76 --- /dev/null +++ b/collectors/macos.plugin/integrations/macos.md @@ -0,0 +1,286 @@ + + +# macOS + + + + + +Plugin: macos.plugin +Module: mach_smi + + + +## Overview + +Monitor macOS metrics for efficient operating system performance. + +The plugin uses three different methods to collect data: + - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time. + - The functtion `host_statistic` is called to collect CPU and Virtual memory data; + - The function `IOServiceGetMatchingServices` to collect storage information. + + +This collector is only supported on the following platforms: + +- macOS + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per macOS instance + +These metrics refer to hardware and network monitoring. + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.cpu | user, nice, system, idle | percentage | +| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB | +| mem.swapio | io, out | KiB/s | +| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s | +| system.load | load1, load5, load15 | load | +| mem.swap | free, used | MiB | +| system.ipv4 | received, sent | kilobits/s | +| ipv4.tcppackets | received, sent | packets/s | +| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s | +| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s | +| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s | +| ipv4.tcpofo | inqueue | packets/s | +| ipv4.tcpsyncookies | received, sent, failed | packets/s | +| ipv4.ecnpkts | CEP, NoECTP | packets/s | +| ipv4.udppackets | received, sent | packets/s | +| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s | +| ipv4.icmp | received, sent | packets/s | +| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s | +| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s | +| ipv4.packets | received, sent, forwarded, delivered | packets/s | +| ipv4.fragsout | ok, failed, created | packets/s | +| ipv4.fragsin | ok, failed, all | packets/s | +| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s | +| ipv6.packets | received, sent, forwarded, delivers | packets/s | +| ipv6.fragsout | ok, failed, all | packets/s | +| ipv6.fragsin | ok, failed, timeout, all | packets/s | +| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s | +| ipv6.icmp | received, sent | messages/s | +| ipv6.icmpredir | received, sent | redirects/s | +| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s | +| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s | +| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s | +| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s | +| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s | +| system.uptime | uptime | seconds | +| system.io | in, out | KiB/s | + +### Per disk + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| disk.io | read, writes | KiB/s | +| disk.ops | read, writes | operations/s | +| disk.util | utilization | % of time working | +| disk.iotime | reads, writes | milliseconds/s | +| disk.await | reads, writes | milliseconds/operation | +| disk.avgsz | reads, writes | KiB/operation | +| disk.svctm | svctm | milliseconds/operation | + +### Per mount point + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| disk.space | avail, used, reserved_for_root | GiB | +| disk.inodes | avail, used, reserved_for_root | inodes | + +### Per network device + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| net.net | received, sent | kilobits/s | +| net.packets | received, sent, multicast_received, multicast_sent | packets/s | +| net.errors | inbound, outbound | errors/s | +| net.drops | inbound | drops/s | +| net.events | frames, collisions, carrier | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ interface_speed ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | network interface ${label:device} current speed | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + +There are three sections in the file which you can configure: + +- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time. +- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory. +- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no | +| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no | +| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no | +| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no | +| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no | +| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no | +| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no | +| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no | +| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no | +| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no | +| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no | +| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no | +| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no | +| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no | +| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no | +| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no | +| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no | +| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no | +| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no | +| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no | +| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no | +| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no | +| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no | +| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no | +| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no | +| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no | +| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no | +| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no | +| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no | +| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no | +| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no | +| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no | +| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no | +| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no | +| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no | +| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no | +| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no | +| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no | + +
+ +#### Examples + +##### Disable swap monitoring. + +A basic example that discards swap monitoring + +
Config + +```yaml +[plugin:macos:sysctl] + system swap = no +[plugin:macos:mach_smi] + swap i/o = no + +``` +
+ +##### Disable complete Machine SMI section. + +A basic example that discards swap monitoring + +
Config + +```yaml +[plugin:macos:mach_smi] + cpu utilization = no + system ram = no + swap i/o = no + memory page faults = no + disk i/o = no + +``` +
+ + diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c index ca06f428e1cf49..75ef386b9a2d6d 100644 --- a/collectors/macos.plugin/macos_fw.c +++ b/collectors/macos.plugin/macos_fw.c @@ -435,7 +435,7 @@ int do_macos_iokit(int update_every, usec_t dt) { if (likely(do_space)) { st = rrdset_find_active_bytype_localhost("disk_space", mntbuf[i].f_mntonname); if (unlikely(!st)) { - snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); + snprintfz(title, sizeof(title) - 1, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); st = rrdset_create_localhost( "disk_space" , mntbuf[i].f_mntonname @@ -467,7 +467,7 @@ int do_macos_iokit(int update_every, usec_t dt) { if (likely(do_inodes)) { st = rrdset_find_active_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname); if (unlikely(!st)) { - snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); + snprintfz(title, sizeof(title) - 1, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); st = rrdset_create_localhost( "disk_inodes" , mntbuf[i].f_mntonname diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md deleted file mode 100644 index ae6597a409f6dc..00000000000000 --- a/collectors/nfacct.plugin/README.md +++ /dev/null @@ -1,63 +0,0 @@ - - -# Monitor Netfilter statistics (nfacct.plugin) - -`nfacct.plugin` collects Netfilter statistics. - -## Prerequisites - -If you are using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), install the -`netdata-plugin-nfacct` package using your system package manager. - -If you built Netdata locally: - -1. install `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system. - -2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build `netdata.plugin`. - -Keep in mind that NFACCT requires root access, so the plugin is setuid to root. - -## Charts - -The plugin provides Netfilter connection tracker statistics and nfacct packet and bandwidth accounting: - -Connection tracker: - -1. Connections. -2. Changes. -3. Expectations. -4. Errors. -5. Searches. - -Netfilter accounting: - -1. Packets. -2. Bandwidth. - -## Configuration - -If you need to disable NFACCT for Netdata, edit /etc/netdata/netdata.conf and set: - -``` -[plugins] - nfacct = no -``` - -## Debugging - -You can run the plugin by hand: - -``` -sudo /usr/libexec/netdata/plugins.d/nfacct.plugin 1 debug -``` - -You will get verbose output on what the plugin does. - - diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md new file mode 120000 index 00000000000000..ea320d1399cb30 --- /dev/null +++ b/collectors/nfacct.plugin/README.md @@ -0,0 +1 @@ +integrations/netfilter.md \ No newline at end of file diff --git a/collectors/nfacct.plugin/integrations/netfilter.md b/collectors/nfacct.plugin/integrations/netfilter.md new file mode 100644 index 00000000000000..831b6fb5b920e7 --- /dev/null +++ b/collectors/nfacct.plugin/integrations/netfilter.md @@ -0,0 +1,132 @@ + + +# Netfilter + + + + + +Plugin: nfacct.plugin +Module: nfacct.plugin + + + +## Overview + +Monitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations. + +Netdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +This plugin needs setuid. + +### Default Behavior + +#### Auto-Detection + +This plugin uses socket to connect with netfilter to collect data + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Netfilter instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netfilter.netlink_new | new, ignore, invalid | connections/s | +| netfilter.netlink_changes | insert, delete, delete_list | changes/s | +| netfilter.netlink_search | searched, search_restart, found | searches/s | +| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s | +| netfilter.netlink_expect | created, deleted, new | expectations/s | +| netfilter.nfacct_packets | a dimension per nfacct object | packets/s | +| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install required packages + +Install `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system. + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:nfacct]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| command options | Additinal parameters for collector | | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c index 430ceab52f069f..2863cd7eb08b02 100644 --- a/collectors/nfacct.plugin/plugin_nfacct.c +++ b/collectors/nfacct.plugin/plugin_nfacct.c @@ -18,6 +18,8 @@ #define NETDATA_CHART_PRIO_NETFILTER_PACKETS 8906 #define NETDATA_CHART_PRIO_NETFILTER_BYTES 8907 +#define NFACCT_RESTART_EVERY_SECONDS 86400 // restart the plugin every this many seconds + static inline size_t mnl_buffer_size() { long s = MNL_SOCKET_BUFFER_SIZE; if(s <= 0) return 8192; @@ -745,20 +747,8 @@ void nfacct_signals() } int main(int argc, char **argv) { - stderror = stderr; clocks_init(); - - // ------------------------------------------------------------------------ - // initialization of netdata plugin - - program_name = "nfacct.plugin"; - - // disable syslog - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; + nd_log_initialize_for_external_plugins("nfacct.plugin"); // ------------------------------------------------------------------------ // parse command line parameters @@ -852,7 +842,7 @@ int main(int argc, char **argv) { if(unlikely(netdata_exit)) break; if(debug && iteration) - fprintf(stderr, "nfacct.plugin: iteration %zu, dt %llu usec\n" + fprintf(stderr, "nfacct.plugin: iteration %zu, dt %"PRIu64" usec\n" , iteration , dt ); @@ -879,9 +869,11 @@ int main(int argc, char **argv) { fflush(stdout); - // restart check (14400 seconds) - if(now_monotonic_sec() - started_t > 14400) break; + if (now_monotonic_sec() - started_t > NFACCT_RESTART_EVERY_SECONDS) { + collector_info("NFACCT reached my lifetime expectancy. Exiting to restart."); + fprintf(stdout, "EXIT\n"); + fflush(stdout); + exit(0); + } } - - collector_info("NFACCT process exiting"); } diff --git a/collectors/perf.plugin/README.md b/collectors/perf.plugin/README.md deleted file mode 100644 index a8bd4b0e5ee1fd..00000000000000 --- a/collectors/perf.plugin/README.md +++ /dev/null @@ -1,87 +0,0 @@ - - -# Monitor CPU performance statistics (perf.plugin) - -`perf.plugin` collects system-wide CPU performance statistics from Performance Monitoring Units (PMU) using -the `perf_event_open()` system call. - -## Important Notes - -If you are using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), you will need to install -the `netdata-plugin-perf` package using your system package manager. - -Accessing hardware PMUs requires root permissions, so the plugin is setuid to root. - -Keep in mind that the number of PMUs in a system is usually quite limited and every hardware monitoring -event for every CPU core needs a separate file descriptor to be opened. - -## Charts - -The plugin provides statistics for general hardware and software performance monitoring events: - -Hardware events: - -1. CPU cycles -2. Instructions -3. Branch instructions -4. Cache operations -5. BUS cycles -6. Stalled frontend and backend cycles - -Software events: - -1. CPU migrations -2. Alignment faults -3. Emulation faults - -Hardware cache events: - -1. L1D cache operations -2. L1D prefetch cache operations -3. L1I cache operations -4. LL cache operations -5. DTLB cache operations -6. ITLB cache operations -7. PBU cache operations - -## Configuration - -The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to -allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software. If you need to -enable the perf plugin, edit /etc/netdata/netdata.conf and set: - -```raw -[plugins] - perf = yes -``` - -```raw -[plugin:perf] - update every = 1 - command options = all -``` - -You can use the `command options` parameter to pick what data should be collected and which charts should be -displayed. If `all` is used, all general performance monitoring counters are probed and corresponding charts -are enabled for the available counters. You can also define a particular set of enabled charts using the -following keywords: `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, -`emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. - -## Debugging - -You can run the plugin by hand: - -```raw -sudo /usr/libexec/netdata/plugins.d/perf.plugin 1 all debug -``` - -You will get verbose output on what the plugin does. - - diff --git a/collectors/perf.plugin/README.md b/collectors/perf.plugin/README.md new file mode 120000 index 00000000000000..fb8a0cd69644fa --- /dev/null +++ b/collectors/perf.plugin/README.md @@ -0,0 +1 @@ +integrations/cpu_performance.md \ No newline at end of file diff --git a/collectors/perf.plugin/integrations/cpu_performance.md b/collectors/perf.plugin/integrations/cpu_performance.md new file mode 100644 index 00000000000000..d3c316d2e942d0 --- /dev/null +++ b/collectors/perf.plugin/integrations/cpu_performance.md @@ -0,0 +1,192 @@ + + +# CPU performance + + + + + +Plugin: perf.plugin +Module: perf.plugin + + + +## Overview + +This collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more. + +It uses syscall (2) to open a file descriptior to monitor the perf events. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +It needs setuid to use necessary syscall to collect perf events. Netada sets the permission during installation time. + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per CPU performance instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| perf.cpu_cycles | cpu, ref_cpu | cycles/s | +| perf.instructions | instructions | instructions/s | +| perf.instructions_per_cycle | ipc | instructions/cycle | +| perf.branch_instructions | instructions, misses | instructions/s | +| perf.cache | references, misses | operations/s | +| perf.bus_cycles | bus | cycles/s | +| perf.stalled_cycles | frontend, backend | cycles/s | +| perf.migrations | migrations | migrations | +| perf.alignment_faults | faults | faults | +| perf.emulation_faults | faults | faults | +| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s | +| perf.l1d_cache_prefetch | prefetches | prefetches/s | +| perf.l1i_cache | read_access, read_misses | events/s | +| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s | +| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s | +| perf.itlb_cache | read_access, read_misses | events/s | +| perf.pbu_cache | read_access | events/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install perf plugin + +If you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed. + + +#### Enable the pref plugin + +The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software. + +To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory, if different +sudo ./edit-config netdata.conf +``` + +Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:perf]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + +You can get the available options running: + +```bash +/usr/libexec/netdata/plugins.d/perf.plugin --help +```` + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| command options | Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes | + +
+ +#### Examples + +##### All metrics + +Monitor all metrics available. + +```yaml +[plugin:perf] + command options = all + +``` +##### CPU cycles + +Monitor CPU cycles. + +
Config + +```yaml +[plugin:perf] + command options = cycles + +``` +
+ + + +## Troubleshooting + +### Debug Mode + + + + diff --git a/collectors/perf.plugin/metadata.yaml b/collectors/perf.plugin/metadata.yaml index d7539b502539c6..eada3351dcd5e1 100644 --- a/collectors/perf.plugin/metadata.yaml +++ b/collectors/perf.plugin/metadata.yaml @@ -40,7 +40,22 @@ modules: description: "" setup: prerequisites: - list: [] + list: + - title: Install perf plugin + description: | + If you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed. + - title: Enable the pref plugin + description: | + The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software. + + To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file. + + ```bash + cd /etc/netdata # Replace this path with your Netdata config directory, if different + sudo ./edit-config netdata.conf + ``` + + Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. configuration: file: name: "netdata.conf" @@ -49,7 +64,7 @@ modules: options: description: | You can get the available options running: - + ```bash /usr/libexec/netdata/plugins.d/perf.plugin --help ```` @@ -62,7 +77,7 @@ modules: default_value: 1 required: false - name: command options - description: Command options that specify charts shown by plugin. + description: Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. default_value: 1 required: true examples: @@ -84,7 +99,28 @@ modules: command options = cycles troubleshooting: problems: - list: [] + list: + - name: Debug Mode + description: | + You can run `perf.plugin` with the debug option enabled, to troubleshoot issues with it. The output should give you clues as to why the collector isn't working. + + - Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + + - Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + + - Run the `perf.plugin` in debug mode: + + ```bash + ./perf.plugin 1 all debug + ``` alerts: [] metrics: folding: diff --git a/collectors/perf.plugin/perf_plugin.c b/collectors/perf.plugin/perf_plugin.c index 68c0f917d3682b..fe3b04daa67cbb 100644 --- a/collectors/perf.plugin/perf_plugin.c +++ b/collectors/perf.plugin/perf_plugin.c @@ -1283,20 +1283,8 @@ void parse_command_line(int argc, char **argv) { } int main(int argc, char **argv) { - stderror = stderr; clocks_init(); - - // ------------------------------------------------------------------------ - // initialization of netdata plugin - - program_name = "perf.plugin"; - - // disable syslog - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; + nd_log_initialize_for_external_plugins("perf.plugin"); parse_command_line(argc, argv); @@ -1328,7 +1316,7 @@ int main(int argc, char **argv) { if(unlikely(netdata_exit)) break; if(unlikely(debug && iteration)) - fprintf(stderr, "perf.plugin: iteration %zu, dt %llu usec\n" + fprintf(stderr, "perf.plugin: iteration %zu, dt %"PRIu64" usec\n" , iteration , dt ); diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md index 1c3b50cb717ec1..0752d389bd3b38 100644 --- a/collectors/plugins.d/README.md +++ b/collectors/plugins.d/README.md @@ -14,20 +14,20 @@ from external processes, thus allowing Netdata to use **external plugins**. ## Provided External Plugins -|plugin|language|O/S|description| -|:----:|:------:|:-:|:----------| -|[apps.plugin](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.| -|[charts.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.| -|[cups.plugin](https://github.com/netdata/netdata/blob/master/collectors/cups.plugin/README.md)|`C`|all|monitors **CUPS**| -|[ebpf.plugin](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md)|`C`|linux|monitors different metrics on environments using kernel internal functions.| -|[go.d.plugin](https://github.com/netdata/go.d.plugin/blob/master/README.md)|`GO`|all|collects metrics from the system, applications, or third-party APIs.| -|[ioping.plugin](https://github.com/netdata/netdata/blob/master/collectors/ioping.plugin/README.md)|`C`|all|measures disk latency.| -|[freeipmi.plugin](https://github.com/netdata/netdata/blob/master/collectors/freeipmi.plugin/README.md)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.| -|[nfacct.plugin](https://github.com/netdata/netdata/blob/master/collectors/nfacct.plugin/README.md)|`C`|linux|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`.| -|[xenstat.plugin](https://github.com/netdata/netdata/blob/master/collectors/xenstat.plugin/README.md)|`C`|linux|collects XenServer and XCP-ng metrics using `lxenstat`.| -|[perf.plugin](https://github.com/netdata/netdata/blob/master/collectors/perf.plugin/README.md)|`C`|linux|collects CPU performance metrics using performance monitoring units (PMU).| -|[python.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).| -|[slabinfo.plugin](https://github.com/netdata/netdata/blob/master/collectors/slabinfo.plugin/README.md)|`C`|linux|collects kernel internal cache objects (SLAB) metrics.| +| plugin | language | O/S | description | +|:------------------------------------------------------------------------------------------------------:|:--------:|:--------------:|:----------------------------------------------------------------------------------------------------------------------------------------| +| [apps.plugin](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) | `C` | linux, freebsd | monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. | +| [charts.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md) | `BASH` | all | a **plugin orchestrator** for data collection modules written in `BASH` v4+. | +| [cups.plugin](https://github.com/netdata/netdata/blob/master/collectors/cups.plugin/README.md) | `C` | all | monitors **CUPS** | +| [ebpf.plugin](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md) | `C` | linux | monitors different metrics on environments using kernel internal functions. | +| [go.d.plugin](https://github.com/netdata/go.d.plugin/blob/master/README.md) | `GO` | all | collects metrics from the system, applications, or third-party APIs. | +| [ioping.plugin](https://github.com/netdata/netdata/blob/master/collectors/ioping.plugin/README.md) | `C` | all | measures disk latency. | +| [freeipmi.plugin](https://github.com/netdata/netdata/blob/master/collectors/freeipmi.plugin/README.md) | `C` | linux | collects metrics from enterprise hardware sensors, on Linux servers. | +| [nfacct.plugin](https://github.com/netdata/netdata/blob/master/collectors/nfacct.plugin/README.md) | `C` | linux | collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`. | +| [xenstat.plugin](https://github.com/netdata/netdata/blob/master/collectors/xenstat.plugin/README.md) | `C` | linux | collects XenServer and XCP-ng metrics using `lxenstat`. | +| [perf.plugin](https://github.com/netdata/netdata/blob/master/collectors/perf.plugin/README.md) | `C` | linux | collects CPU performance metrics using performance monitoring units (PMU). | +| [python.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md) | `python` | all | a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). | +| [slabinfo.plugin](https://github.com/netdata/netdata/blob/master/collectors/slabinfo.plugin/README.md) | `C` | linux | collects kernel internal cache objects (SLAB) metrics. | Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator). Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation. @@ -154,18 +154,18 @@ every 5 seconds. There are a few environment variables that are set by `netdata` and are available for the plugin to use. -|variable|description| -|:------:|:----------| -|`NETDATA_USER_CONFIG_DIR`|The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`).| -|`NETDATA_STOCK_CONFIG_DIR`|The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`).| -|`NETDATA_PLUGINS_DIR`|The directory where all Netdata plugins are stored.| -|`NETDATA_USER_PLUGINS_DIRS`|The list of directories where custom plugins are stored.| -|`NETDATA_WEB_DIR`|The directory where the web files of Netdata are saved.| -|`NETDATA_CACHE_DIR`|The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.| -|`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata.| -|`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.| -|`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information.| -|`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.| +| variable | description | +|:---------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `NETDATA_USER_CONFIG_DIR` | The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`). | +| `NETDATA_STOCK_CONFIG_DIR` | The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`). | +| `NETDATA_PLUGINS_DIR` | The directory where all Netdata plugins are stored. | +| `NETDATA_USER_PLUGINS_DIRS` | The list of directories where custom plugins are stored. | +| `NETDATA_WEB_DIR` | The directory where the web files of Netdata are saved. | +| `NETDATA_CACHE_DIR` | The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. | +| `NETDATA_LOG_DIR` | The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata. | +| `NETDATA_HOST_PREFIX` | This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. | +| `NETDATA_DEBUG_FLAGS` | This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information. | +| `NETDATA_UPDATE_EVERY` | The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. | ### The output of the plugin @@ -298,7 +298,7 @@ the template is: the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context` - this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it + this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alerts to it - `charttype` @@ -388,12 +388,12 @@ the template is: > VARIABLE [SCOPE] name = value -`VARIABLE` defines a variable that can be used in alarms. This is to used for setting constants (like the max connections a server may accept). +`VARIABLE` defines a variable that can be used in alerts. This is to used for setting constants (like the max connections a server may accept). Variables support 2 scopes: - `GLOBAL` or `HOST` to define the variable at the host level. -- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates. +- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alert templates. The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope. diff --git a/collectors/plugins.d/gperf-config.txt b/collectors/plugins.d/gperf-config.txt index b8140e66c5b482..bad51367ce1abb 100644 --- a/collectors/plugins.d/gperf-config.txt +++ b/collectors/plugins.d/gperf-config.txt @@ -12,44 +12,47 @@ PARSER_KEYWORD; # # Plugins Only Keywords # -FLUSH, 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1 -DISABLE, 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2 -EXIT, 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3 -HOST, 71, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 4 -HOST_DEFINE, 72, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 5 -HOST_DEFINE_END, 73, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 6 -HOST_LABEL, 74, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 7 +FLUSH, 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1 +DISABLE, 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2 +EXIT, 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3 +HOST, 71, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4 +HOST_DEFINE, 72, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5 +HOST_DEFINE_END, 73, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6 +HOST_LABEL, 74, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7 # # Common keywords # -BEGIN, 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8 -CHART, 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 9 -CLABEL, 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 10 -CLABEL_COMMIT, 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 11 -DIMENSION, 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 12 -END, 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13 -FUNCTION, 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 14 -FUNCTION_RESULT_BEGIN, 42, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15 -LABEL, 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16 -OVERWRITE, 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 17 -SET, 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 18 -VARIABLE, 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19 -DYNCFG_ENABLE, 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 20 -DYNCFG_REGISTER_MODULE, 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21 -REPORT_JOB_STATUS, 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22 +BEGIN, 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8 +CHART, 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9 +CLABEL, 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10 +CLABEL_COMMIT, 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11 +DIMENSION, 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12 +END, 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13 +FUNCTION, 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14 +FUNCTION_RESULT_BEGIN, 42, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15 +LABEL, 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 16 +OVERWRITE, 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17 +SET, 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 18 +VARIABLE, 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 19 +DYNCFG_ENABLE, 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 20 +DYNCFG_REGISTER_MODULE, 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21 +DYNCFG_REGISTER_JOB, 103, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22 +DYNCFG_RESET, 104, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23 +REPORT_JOB_STATUS, 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24 +DELETE_JOB, 111, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25 # # Streaming only keywords # -CLAIMED_ID, 61, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23 -BEGIN2, 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24 -SET2, 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25 -END2, 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 26 +CLAIMED_ID, 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26 +BEGIN2, 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27 +SET2, 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28 +END2, 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29 # # Streaming Replication keywords # -CHART_DEFINITION_END, 33, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27 -RBEGIN, 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28 -RDSTATE, 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29 -REND, 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30 -RSET, 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31 -RSSTATE, 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32 +CHART_DEFINITION_END, 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 30 +RBEGIN, 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31 +RDSTATE, 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32 +REND, 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33 +RSET, 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34 +RSSTATE, 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35 diff --git a/collectors/plugins.d/gperf-hashtable.h b/collectors/plugins.d/gperf-hashtable.h index e7d20126f6388a..b327d8d6d3404a 100644 --- a/collectors/plugins.d/gperf-hashtable.h +++ b/collectors/plugins.d/gperf-hashtable.h @@ -30,12 +30,12 @@ #endif -#define GPERF_PARSER_TOTAL_KEYWORDS 32 +#define GPERF_PARSER_TOTAL_KEYWORDS 35 #define GPERF_PARSER_MIN_WORD_LENGTH 3 #define GPERF_PARSER_MAX_WORD_LENGTH 22 #define GPERF_PARSER_MIN_HASH_VALUE 3 -#define GPERF_PARSER_MAX_HASH_VALUE 41 -/* maximum key range = 39, duplicates = 0 */ +#define GPERF_PARSER_MAX_HASH_VALUE 47 +/* maximum key range = 45, duplicates = 0 */ #ifdef __GNUC__ __inline @@ -49,32 +49,32 @@ gperf_keyword_hash_function (register const char *str, register size_t len) { static unsigned char asso_values[] = { - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 16, 7, 2, 11, 0, - 8, 42, 3, 9, 42, 42, 9, 42, 0, 2, - 42, 42, 1, 3, 42, 7, 17, 42, 27, 2, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42 + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 11, 18, 0, 0, 0, + 6, 48, 9, 0, 48, 48, 20, 48, 0, 8, + 48, 48, 1, 12, 48, 20, 18, 48, 2, 0, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48 }; return len + asso_values[(unsigned char)str[1]] + asso_values[(unsigned char)str[0]]; } @@ -83,71 +83,79 @@ static PARSER_KEYWORD gperf_keywords[] = { {(char*)0}, {(char*)0}, {(char*)0}, #line 30 "gperf-config.txt" - {"END", 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13}, -#line 46 "gperf-config.txt" - {"END2", 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 26}, -#line 53 "gperf-config.txt" - {"REND", 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30}, + {"END", 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13}, +#line 49 "gperf-config.txt" + {"END2", 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29}, +#line 56 "gperf-config.txt" + {"REND", 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33}, +#line 17 "gperf-config.txt" + {"EXIT", 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3}, +#line 16 "gperf-config.txt" + {"DISABLE", 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2}, +#line 55 "gperf-config.txt" + {"RDSTATE", 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32}, +#line 29 "gperf-config.txt" + {"DIMENSION", 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12}, +#line 42 "gperf-config.txt" + {"DELETE_JOB", 111, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25}, + {(char*)0}, +#line 40 "gperf-config.txt" + {"DYNCFG_RESET", 104, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23}, +#line 37 "gperf-config.txt" + {"DYNCFG_ENABLE", 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 20}, +#line 26 "gperf-config.txt" + {"CHART", 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9}, #line 35 "gperf-config.txt" - {"SET", 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 18}, -#line 45 "gperf-config.txt" - {"SET2", 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25}, -#line 54 "gperf-config.txt" - {"RSET", 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31}, + {"SET", 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 18}, +#line 48 "gperf-config.txt" + {"SET2", 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28}, +#line 57 "gperf-config.txt" + {"RSET", 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34}, +#line 41 "gperf-config.txt" + {"REPORT_JOB_STATUS", 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24}, +#line 39 "gperf-config.txt" + {"DYNCFG_REGISTER_JOB", 103, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22}, +#line 58 "gperf-config.txt" + {"RSSTATE", 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35}, #line 18 "gperf-config.txt" - {"HOST", 71, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 4}, -#line 26 "gperf-config.txt" - {"CHART", 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 9}, -#line 55 "gperf-config.txt" - {"RSSTATE", 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32}, + {"HOST", 71, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4}, +#line 38 "gperf-config.txt" + {"DYNCFG_REGISTER_MODULE", 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21}, #line 25 "gperf-config.txt" - {"BEGIN", 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8}, -#line 44 "gperf-config.txt" - {"BEGIN2", 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24}, -#line 51 "gperf-config.txt" - {"RBEGIN", 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28}, + {"BEGIN", 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8}, +#line 47 "gperf-config.txt" + {"BEGIN2", 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27}, +#line 54 "gperf-config.txt" + {"RBEGIN", 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31}, +#line 27 "gperf-config.txt" + {"CLABEL", 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10}, #line 21 "gperf-config.txt" - {"HOST_LABEL", 74, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 7}, + {"HOST_LABEL", 74, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7}, #line 19 "gperf-config.txt" - {"HOST_DEFINE", 72, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 5}, -#line 27 "gperf-config.txt" - {"CLABEL", 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 10}, -#line 39 "gperf-config.txt" - {"REPORT_JOB_STATUS", 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22}, -#line 52 "gperf-config.txt" - {"RDSTATE", 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29}, -#line 20 "gperf-config.txt" - {"HOST_DEFINE_END", 73, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 6}, -#line 43 "gperf-config.txt" - {"CLAIMED_ID", 61, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23}, + {"HOST_DEFINE", 72, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5}, +#line 53 "gperf-config.txt" + {"CHART_DEFINITION_END", 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 30}, +#line 46 "gperf-config.txt" + {"CLAIMED_ID", 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26}, #line 15 "gperf-config.txt" - {"FLUSH", 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1}, -#line 31 "gperf-config.txt" - {"FUNCTION", 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 14}, + {"FLUSH", 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1}, +#line 20 "gperf-config.txt" + {"HOST_DEFINE_END", 73, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6}, #line 28 "gperf-config.txt" - {"CLABEL_COMMIT", 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 11}, -#line 50 "gperf-config.txt" - {"CHART_DEFINITION_END", 33, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27}, -#line 37 "gperf-config.txt" - {"DYNCFG_ENABLE", 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 20}, -#line 16 "gperf-config.txt" - {"DISABLE", 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2}, + {"CLABEL_COMMIT", 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11}, +#line 31 "gperf-config.txt" + {"FUNCTION", 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14}, #line 34 "gperf-config.txt" - {"OVERWRITE", 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 17}, -#line 29 "gperf-config.txt" - {"DIMENSION", 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 12}, + {"OVERWRITE", 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17}, #line 33 "gperf-config.txt" - {"LABEL", 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16}, -#line 17 "gperf-config.txt" - {"EXIT", 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3}, - {(char*)0}, {(char*)0}, {(char*)0}, -#line 38 "gperf-config.txt" - {"DYNCFG_REGISTER_MODULE", 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21}, -#line 32 "gperf-config.txt" - {"FUNCTION_RESULT_BEGIN", 42, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15}, - {(char*)0}, {(char*)0}, {(char*)0}, {(char*)0}, + {"LABEL", 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 16}, #line 36 "gperf-config.txt" - {"VARIABLE", 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19} + {"VARIABLE", 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 19}, + {(char*)0}, {(char*)0}, {(char*)0}, {(char*)0}, + {(char*)0}, {(char*)0}, {(char*)0}, {(char*)0}, + {(char*)0}, +#line 32 "gperf-config.txt" + {"FUNCTION_RESULT_BEGIN", 42, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15} }; PARSER_KEYWORD * diff --git a/collectors/plugins.d/local_listeners.c b/collectors/plugins.d/local_listeners.c index a39de79748db6c..f2c5e688bbc6e4 100644 --- a/collectors/plugins.d/local_listeners.c +++ b/collectors/plugins.d/local_listeners.c @@ -338,25 +338,59 @@ bool read_proc_net_x(const char *filename, PROC_NET_PROTOCOLS protocol) { } // ---------------------------------------------------------------------------- - -int main(int argc __maybe_unused, char **argv __maybe_unused) { +typedef struct { + bool read_tcp; + bool read_tcp6; + bool read_udp; + bool read_udp6; +} CommandLineArguments; + +int main(int argc, char **argv) { char path[FILENAME_MAX + 1]; hashTable_key_inode_port_value = createHashTable(); netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); if(!netdata_configured_host_prefix) netdata_configured_host_prefix = ""; - snprintfz(path, FILENAME_MAX, "%s/proc/net/tcp", netdata_configured_host_prefix); - read_proc_net_x(path, PROC_NET_PROTOCOL_TCP); + CommandLineArguments args = {.read_tcp = false, .read_tcp6 = false, .read_udp = false, .read_udp6 = false}; + + for (int i = 1; i < argc; i++) { + if (strcmp("tcp", argv[i]) == 0) { + args.read_tcp = true; + continue; + } else if (strcmp("tcp6", argv[i]) == 0) { + args.read_tcp6 = true; + continue; + } else if (strcmp("udp", argv[i]) == 0) { + args.read_udp = true; + continue; + } else if (strcmp("udp6", argv[i]) == 0) { + args.read_udp6 = true; + continue; + } + } + + bool read_all_files = (!args.read_tcp && !args.read_tcp6 && !args.read_udp && !args.read_udp6); - snprintfz(path, FILENAME_MAX, "%s/proc/net/udp", netdata_configured_host_prefix); - read_proc_net_x(path, PROC_NET_PROTOCOL_UDP); + if (read_all_files || args.read_tcp) { + snprintfz(path, FILENAME_MAX, "%s/proc/net/tcp", netdata_configured_host_prefix); + read_proc_net_x(path, PROC_NET_PROTOCOL_TCP); + } - snprintfz(path, FILENAME_MAX, "%s/proc/net/tcp6", netdata_configured_host_prefix); - read_proc_net_x(path, PROC_NET_PROTOCOL_TCP6); + if (read_all_files || args.read_udp) { + snprintfz(path, FILENAME_MAX, "%s/proc/net/udp", netdata_configured_host_prefix); + read_proc_net_x(path, PROC_NET_PROTOCOL_UDP); + } - snprintfz(path, FILENAME_MAX, "%s/proc/net/udp6", netdata_configured_host_prefix); - read_proc_net_x(path, PROC_NET_PROTOCOL_UDP6); + if (read_all_files || args.read_tcp6) { + snprintfz(path, FILENAME_MAX, "%s/proc/net/tcp6", netdata_configured_host_prefix); + read_proc_net_x(path, PROC_NET_PROTOCOL_TCP6); + } + + if (read_all_files || args.read_udp6) { + snprintfz(path, FILENAME_MAX, "%s/proc/net/udp6", netdata_configured_host_prefix); + read_proc_net_x(path, PROC_NET_PROTOCOL_UDP6); + } snprintfz(path, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); find_all_sockets_in_proc(path); diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c index 08c26a198ba30c..20061ad29c49f3 100644 --- a/collectors/plugins.d/plugins_d.c +++ b/collectors/plugins.d/plugins_d.c @@ -47,8 +47,7 @@ static inline bool plugin_is_running(struct plugind *cd) { return ret; } -static void pluginsd_worker_thread_cleanup(void *arg) -{ +static void pluginsd_worker_thread_cleanup(void *arg) { struct plugind *cd = (struct plugind *)arg; worker_unregister(); @@ -143,41 +142,64 @@ static void *pluginsd_worker_thread(void *arg) { netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg); - struct plugind *cd = (struct plugind *)arg; - plugin_set_running(cd); + { + struct plugind *cd = (struct plugind *) arg; + plugin_set_running(cd); - size_t count = 0; + size_t count = 0; - while (service_running(SERVICE_COLLECTORS)) { - FILE *fp_child_input = NULL; - FILE *fp_child_output = netdata_popen(cd->cmd, &cd->unsafe.pid, &fp_child_input); + while(service_running(SERVICE_COLLECTORS)) { + FILE *fp_child_input = NULL; + FILE *fp_child_output = netdata_popen(cd->cmd, &cd->unsafe.pid, &fp_child_input); - if (unlikely(!fp_child_input || !fp_child_output)) { - netdata_log_error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").", rrdhost_hostname(cd->host), cd->cmd); - break; - } + if(unlikely(!fp_child_input || !fp_child_output)) { + netdata_log_error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").", + rrdhost_hostname(cd->host), cd->cmd); + break; + } - netdata_log_info("PLUGINSD: 'host:%s' connected to '%s' running on pid %d", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "PLUGINSD: 'host:%s' connected to '%s' running on pid %d", + rrdhost_hostname(cd->host), + cd->fullfilename, cd->unsafe.pid); - count = pluginsd_process(cd->host, cd, fp_child_input, fp_child_output, 0); + const char *plugin = strrchr(cd->fullfilename, '/'); + if(plugin) + plugin++; + else + plugin = cd->fullfilename; - netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) disconnected after %zu successful data collections (ENDs).", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, count); + char module[100]; + snprintfz(module, sizeof(module), "plugins.d[%s]", plugin); + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_MODULE, module), + ND_LOG_FIELD_TXT(NDF_NIDL_NODE, rrdhost_hostname(cd->host)), + ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "pluginsd"), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); - killpid(cd->unsafe.pid); + count = pluginsd_process(cd->host, cd, fp_child_input, fp_child_output, 0); - int worker_ret_code = netdata_pclose(fp_child_input, fp_child_output, cd->unsafe.pid); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "PLUGINSD: 'host:%s', '%s' (pid %d) disconnected after %zu successful data collections (ENDs).", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, count); - if (likely(worker_ret_code == 0)) - pluginsd_worker_thread_handle_success(cd); - else - pluginsd_worker_thread_handle_error(cd, worker_ret_code); + killpid(cd->unsafe.pid); - cd->unsafe.pid = 0; - if (unlikely(!plugin_is_enabled(cd))) - break; - } + int worker_ret_code = netdata_pclose(fp_child_input, fp_child_output, cd->unsafe.pid); + + if(likely(worker_ret_code == 0)) + pluginsd_worker_thread_handle_success(cd); + else + pluginsd_worker_thread_handle_error(cd, worker_ret_code); + + cd->unsafe.pid = 0; + + if(unlikely(!plugin_is_enabled(cd))) + break; + } + } netdata_thread_cleanup_pop(1); return NULL; @@ -217,6 +239,13 @@ void *pluginsd_main(void *ptr) // disable some plugins by default config_get_boolean(CONFIG_SECTION_PLUGINS, "slabinfo", CONFIG_BOOLEAN_NO); + config_get_boolean(CONFIG_SECTION_PLUGINS, "logs-management", +#if defined(LOGS_MANAGEMENT_DEV_MODE) + CONFIG_BOOLEAN_YES +#else + CONFIG_BOOLEAN_NO +#endif + ); // it crashes (both threads) on Alpine after we made it multi-threaded // works with "--device /dev/ipmi0", but this is not default // see https://github.com/netdata/netdata/pull/15564 for details diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h index 4988b50719e07d..37c70f7e39c0c4 100644 --- a/collectors/plugins.d/plugins_d.h +++ b/collectors/plugins.d/plugins_d.h @@ -10,51 +10,16 @@ #define PLUGINSD_CMD_MAX (FILENAME_MAX*2) #define PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH 0 -#define PLUGINSD_KEYWORD_CHART "CHART" -#define PLUGINSD_KEYWORD_CHART_DEFINITION_END "CHART_DEFINITION_END" -#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION" -#define PLUGINSD_KEYWORD_BEGIN "BEGIN" -#define PLUGINSD_KEYWORD_SET "SET" -#define PLUGINSD_KEYWORD_END "END" -#define PLUGINSD_KEYWORD_FLUSH "FLUSH" -#define PLUGINSD_KEYWORD_DISABLE "DISABLE" -#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE" -#define PLUGINSD_KEYWORD_LABEL "LABEL" -#define PLUGINSD_KEYWORD_OVERWRITE "OVERWRITE" -#define PLUGINSD_KEYWORD_CLABEL "CLABEL" -#define PLUGINSD_KEYWORD_CLABEL_COMMIT "CLABEL_COMMIT" -#define PLUGINSD_KEYWORD_FUNCTION "FUNCTION" -#define PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN "FUNCTION_RESULT_BEGIN" -#define PLUGINSD_KEYWORD_FUNCTION_RESULT_END "FUNCTION_RESULT_END" - -#define PLUGINSD_KEYWORD_REPLAY_CHART "REPLAY_CHART" -#define PLUGINSD_KEYWORD_REPLAY_BEGIN "RBEGIN" -#define PLUGINSD_KEYWORD_REPLAY_SET "RSET" -#define PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE "RDSTATE" -#define PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE "RSSTATE" -#define PLUGINSD_KEYWORD_REPLAY_END "REND" - -#define PLUGINSD_KEYWORD_BEGIN_V2 "BEGIN2" -#define PLUGINSD_KEYWORD_SET_V2 "SET2" -#define PLUGINSD_KEYWORD_END_V2 "END2" - -#define PLUGINSD_KEYWORD_HOST_DEFINE "HOST_DEFINE" -#define PLUGINSD_KEYWORD_HOST_DEFINE_END "HOST_DEFINE_END" -#define PLUGINSD_KEYWORD_HOST_LABEL "HOST_LABEL" -#define PLUGINSD_KEYWORD_HOST "HOST" +#define PLUGINSD_KEYWORD_FUNCTION_PAYLOAD "FUNCTION_PAYLOAD" +#define PLUGINSD_KEYWORD_FUNCTION_PAYLOAD_END "FUNCTION_PAYLOAD_END" #define PLUGINSD_KEYWORD_DYNCFG_ENABLE "DYNCFG_ENABLE" #define PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE "DYNCFG_REGISTER_MODULE" +#define PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB "DYNCFG_REGISTER_JOB" +#define PLUGINSD_KEYWORD_DYNCFG_RESET "DYNCFG_RESET" #define PLUGINSD_KEYWORD_REPORT_JOB_STATUS "REPORT_JOB_STATUS" - -#define PLUGINSD_KEYWORD_EXIT "EXIT" - -#define PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT 10 // seconds - -#define PLUGINSD_LINE_MAX_SSL_READ 512 - -#define PLUGINSD_MAX_WORDS 20 +#define PLUGINSD_KEYWORD_DELETE_JOB "DELETE_JOB" #define PLUGINSD_MAX_DIRECTORIES 20 extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES]; @@ -99,37 +64,4 @@ void pluginsd_process_thread_cleanup(void *ptr); size_t pluginsd_initialize_plugin_directories(); -#define pluginsd_function_result_begin_to_buffer(wb, transaction, code, content_type, expires) \ - buffer_sprintf(wb \ - , PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " \"%s\" %d \"%s\" %ld\n" \ - , (transaction) ? (transaction) : "" \ - , (int)(code) \ - , (content_type) ? (content_type) : "" \ - , (long int)(expires) \ - ) - -#define pluginsd_function_result_end_to_buffer(wb) \ - buffer_strcat(wb, "\n" PLUGINSD_KEYWORD_FUNCTION_RESULT_END "\n") - -#define pluginsd_function_result_begin_to_stdout(transaction, code, content_type, expires) \ - fprintf(stdout \ - , PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " \"%s\" %d \"%s\" %ld\n" \ - , (transaction) ? (transaction) : "" \ - , (int)(code) \ - , (content_type) ? (content_type) : "" \ - , (long int)(expires) \ - ) - -#define pluginsd_function_result_end_to_stdout() \ - fprintf(stdout, "\n" PLUGINSD_KEYWORD_FUNCTION_RESULT_END "\n") - -static inline void pluginsd_function_json_error(const char *transaction, int code, const char *msg) { - char buffer[PLUGINSD_LINE_MAX + 1]; - json_escape_string(buffer, msg, PLUGINSD_LINE_MAX); - - pluginsd_function_result_begin_to_stdout(transaction, code, "application/json", now_realtime_sec()); - fprintf(stdout, "{\"status\":%d,\"error_message\":\"%s\"}", code, buffer); - pluginsd_function_result_end_to_stdout(); -} - #endif /* NETDATA_PLUGINS_D_H */ diff --git a/collectors/plugins.d/pluginsd_parser.c b/collectors/plugins.d/pluginsd_parser.c index bc265a3afd37ec..3b47c6c0fe2ef9 100644 --- a/collectors/plugins.d/pluginsd_parser.c +++ b/collectors/plugins.d/pluginsd_parser.c @@ -4,12 +4,20 @@ #define LOG_FUNCTIONS false +#define SERVING_STREAMING(parser) ((parser)->repertoire == PARSER_INIT_STREAMING) +#define SERVING_PLUGINSD(parser) ((parser)->repertoire == PARSER_INIT_PLUGINSD) + static ssize_t send_to_plugin(const char *txt, void *data) { PARSER *parser = data; if(!txt || !*txt) return 0; +#ifdef ENABLE_H2O + if(parser->h2o_ctx) + return h2o_stream_write(parser->h2o_ctx, txt, strlen(txt)); +#endif + errno = 0; spinlock_lock(&parser->writer.spinlock); ssize_t bytes = -1; @@ -107,23 +115,6 @@ static inline bool pluginsd_unlock_rrdset_data_collection(PARSER *parser) { return false; } -void pluginsd_rrdset_cleanup(RRDSET *st) { - spinlock_lock(&st->pluginsd.spinlock); - - for(size_t i = 0; i < st->pluginsd.size ; i++) { - rrddim_acquired_release(st->pluginsd.rda[i]); // can be NULL - st->pluginsd.rda[i] = NULL; - } - - freez(st->pluginsd.rda); - st->pluginsd.collector_tid = 0; - st->pluginsd.rda = NULL; - st->pluginsd.size = 0; - st->pluginsd.pos = 0; - - spinlock_unlock(&st->pluginsd.spinlock); -} - static inline void pluginsd_unlock_previous_scope_chart(PARSER *parser, const char *keyword, bool stale) { if(unlikely(pluginsd_unlock_rrdset_data_collection(parser))) { if(stale) @@ -147,7 +138,12 @@ static inline void pluginsd_unlock_previous_scope_chart(PARSER *parser, const ch static inline void pluginsd_clear_scope_chart(PARSER *parser, const char *keyword) { pluginsd_unlock_previous_scope_chart(parser, keyword, true); + + if(parser->user.cleanup_slots && parser->user.st) + rrdset_pluginsd_receive_unslot(parser->user.st); + parser->user.st = NULL; + parser->user.cleanup_slots = false; } static inline bool pluginsd_set_scope_chart(PARSER *parser, RRDSET *st, const char *keyword) { @@ -157,11 +153,12 @@ static inline bool pluginsd_set_scope_chart(PARSER *parser, RRDSET *st, const ch if(unlikely(old_collector_tid)) { if(old_collector_tid != my_collector_tid) { - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "PLUGINSD: keyword %s: 'host:%s/chart:%s' is collected twice (my tid %d, other collector tid %d)", - keyword ? keyword : "UNKNOWN", - rrdhost_hostname(st->rrdhost), rrdset_id(st), - my_collector_tid, old_collector_tid); + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, + "PLUGINSD: keyword %s: 'host:%s/chart:%s' is collected twice (my tid %d, other collector tid %d)", + keyword ? keyword : "UNKNOWN", + rrdhost_hostname(st->rrdhost), rrdset_id(st), + my_collector_tid, old_collector_tid); return false; } @@ -173,61 +170,141 @@ static inline bool pluginsd_set_scope_chart(PARSER *parser, RRDSET *st, const ch pluginsd_clear_scope_chart(parser, keyword); - size_t dims = dictionary_entries(st->rrddim_root_index); - if(unlikely(st->pluginsd.size < dims)) { - st->pluginsd.rda = reallocz(st->pluginsd.rda, dims * sizeof(RRDDIM_ACQUIRED *)); + st->pluginsd.pos = 0; + parser->user.st = st; + parser->user.cleanup_slots = false; + + return true; +} + +static inline void pluginsd_rrddim_put_to_slot(PARSER *parser, RRDSET *st, RRDDIM *rd, ssize_t slot, bool obsolete) { + size_t wanted_size = st->pluginsd.size; + + if(slot >= 1) { + st->pluginsd.dims_with_slots = true; + wanted_size = slot; + } + else { + st->pluginsd.dims_with_slots = false; + wanted_size = dictionary_entries(st->rrddim_root_index); + } + + if(wanted_size > st->pluginsd.size) { + st->pluginsd.prd_array = reallocz(st->pluginsd.prd_array, wanted_size * sizeof(struct pluginsd_rrddim)); // initialize the empty slots - for(ssize_t i = (ssize_t)dims - 1; i >= (ssize_t)st->pluginsd.size ;i--) - st->pluginsd.rda[i] = NULL; + for(ssize_t i = (ssize_t) wanted_size - 1; i >= (ssize_t) st->pluginsd.size; i--) { + st->pluginsd.prd_array[i].rda = NULL; + st->pluginsd.prd_array[i].rd = NULL; + st->pluginsd.prd_array[i].id = NULL; + } - st->pluginsd.size = dims; + st->pluginsd.size = wanted_size; } - st->pluginsd.pos = 0; - parser->user.st = st; + if(st->pluginsd.dims_with_slots) { + struct pluginsd_rrddim *prd = &st->pluginsd.prd_array[slot - 1]; - return true; + if(prd->rd != rd) { + prd->rda = rrddim_find_and_acquire(st, string2str(rd->id)); + prd->rd = rrddim_acquired_to_rrddim(prd->rda); + prd->id = string2str(prd->rd->id); + } + + if(obsolete) + parser->user.cleanup_slots = true; + } } -static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, const char *dimension, const char *cmd) { +static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, const char *dimension, ssize_t slot, const char *cmd) { if (unlikely(!dimension || !*dimension)) { netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, without a dimension.", rrdhost_hostname(host), rrdset_id(st), cmd); return NULL; } - if(unlikely(st->pluginsd.pos >= st->pluginsd.size)) - st->pluginsd.pos = 0; + if (unlikely(!st->pluginsd.size)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, but the chart has no dimensions.", + rrdhost_hostname(host), rrdset_id(st), cmd); + return NULL; + } + + struct pluginsd_rrddim *prd; + RRDDIM *rd; + + if(likely(st->pluginsd.dims_with_slots)) { + // caching with slots - RRDDIM_ACQUIRED *rda = st->pluginsd.rda[st->pluginsd.pos]; + if(unlikely(slot < 1 || slot > st->pluginsd.size)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s with slot %zd, but slots in the range [1 - %u] are expected.", + rrdhost_hostname(host), rrdset_id(st), cmd, slot, st->pluginsd.size); + return NULL; + } + + prd = &st->pluginsd.prd_array[slot - 1]; - if(likely(rda)) { - RRDDIM *rd = rrddim_acquired_to_rrddim(rda); - if (likely(rd && string_strcmp(rd->id, dimension) == 0)) { - // we found a cached RDA - st->pluginsd.pos++; + rd = prd->rd; + if(likely(rd)) { +#ifdef NETDATA_INTERNAL_CHECKS + if(strcmp(prd->id, dimension) != 0) { + ssize_t t; + for(t = 0; t < st->pluginsd.size ;t++) { + if (strcmp(st->pluginsd.prd_array[t].id, dimension) == 0) + break; + } + if(t >= st->pluginsd.size) + t = -1; + + internal_fatal(true, + "PLUGINSD: expected to find dimension '%s' on slot %zd, but found '%s', " + "the right slot is %zd", + dimension, slot, prd->id, t); + } +#endif return rd; } - else { - // the collector is sending dimensions in a different order - // release the previous one, to reuse this slot - rrddim_acquired_release(rda); - st->pluginsd.rda[st->pluginsd.pos] = NULL; + } + else { + // caching without slots + + if(unlikely(st->pluginsd.pos >= st->pluginsd.size)) + st->pluginsd.pos = 0; + + prd = &st->pluginsd.prd_array[st->pluginsd.pos++]; + + rd = prd->rd; + if(likely(rd)) { + const char *id = prd->id; + + if(strcmp(id, dimension) == 0) { + // we found it cached + return rd; + } + else { + // the cached one is not good for us + rrddim_acquired_release(prd->rda); + prd->rda = NULL; + prd->rd = NULL; + prd->id = NULL; + } } } - rda = rrddim_find_and_acquire(st, dimension); + // we need to find the dimension and set it to prd + + RRDDIM_ACQUIRED *rda = rrddim_find_and_acquire(st, dimension); if (unlikely(!rda)) { netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s but dimension does not exist.", - rrdhost_hostname(host), rrdset_id(st), dimension, cmd); + rrdhost_hostname(host), rrdset_id(st), dimension, cmd); return NULL; } - st->pluginsd.rda[st->pluginsd.pos++] = rda; + prd->rda = rda; + prd->rd = rd = rrddim_acquired_to_rrddim(rda); + prd->id = string2str(rd->id); - return rrddim_acquired_to_rrddim(rda); + return rd; } static inline RRDSET *pluginsd_find_chart(RRDHOST *host, const char *chart, const char *cmd) { @@ -245,20 +322,89 @@ static inline RRDSET *pluginsd_find_chart(RRDHOST *host, const char *chart, cons return st; } +static inline ssize_t pluginsd_parse_rrd_slot(char **words, size_t num_words) { + ssize_t slot = -1; + char *id = get_word(words, num_words, 1); + if(id && id[0] == PLUGINSD_KEYWORD_SLOT[0] && id[1] == PLUGINSD_KEYWORD_SLOT[1] && + id[2] == PLUGINSD_KEYWORD_SLOT[2] && id[3] == PLUGINSD_KEYWORD_SLOT[3] && id[4] == ':') { + slot = (ssize_t) str2ull_encoded(&id[5]); + if(slot < 0) slot = 0; // to make the caller increment its idx of the words + } + + return slot; +} + +static inline void pluginsd_rrdset_cache_put_to_slot(PARSER *parser, RRDSET *st, ssize_t slot, bool obsolete) { + // clean possible old cached data + rrdset_pluginsd_receive_unslot(st); + + if(unlikely(slot < 1 || slot >= INT32_MAX)) + return; + + RRDHOST *host = st->rrdhost; + + if(unlikely((size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) { + spinlock_lock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); + size_t old_slots = host->rrdpush.receive.pluginsd_chart_slots.size; + size_t new_slots = (old_slots < PLUGINSD_MIN_RRDSET_POINTERS_CACHE) ? PLUGINSD_MIN_RRDSET_POINTERS_CACHE : old_slots * 2; + + if(new_slots < (size_t)slot) + new_slots = slot; + + host->rrdpush.receive.pluginsd_chart_slots.array = + reallocz(host->rrdpush.receive.pluginsd_chart_slots.array, new_slots * sizeof(RRDSET *)); + + for(size_t i = old_slots; i < new_slots ;i++) + host->rrdpush.receive.pluginsd_chart_slots.array[i] = NULL; + + host->rrdpush.receive.pluginsd_chart_slots.size = new_slots; + spinlock_unlock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); + } + + host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1] = st; + st->pluginsd.last_slot = (int32_t)slot - 1; + parser->user.cleanup_slots = obsolete; +} + +static inline RRDSET *pluginsd_rrdset_cache_get_from_slot(PARSER *parser, RRDHOST *host, const char *id, ssize_t slot, const char *keyword) { + if(unlikely(slot < 1 || (size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) + return pluginsd_find_chart(host, id, keyword); + + RRDSET *st = host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1]; + + if(!st) { + st = pluginsd_find_chart(host, id, keyword); + if(st) + pluginsd_rrdset_cache_put_to_slot(parser, st, slot, rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)); + } + else { + internal_fatal(string_strcmp(st->id, id) != 0, + "PLUGINSD: wrong chart in slot %zd, expected '%s', found '%s'", + slot - 1, id, string2str(st->id)); + } + + return st; +} + static inline PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg) { parser->user.enabled = 0; if(keyword && msg) { - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "PLUGINSD: keyword %s: %s", keyword, msg); + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_INFO, + "PLUGINSD: keyword %s: %s", keyword, msg); } return PARSER_RC_ERROR; } static inline PARSER_RC pluginsd_set(char **words, size_t num_words, PARSER *parser) { - char *dimension = get_word(words, num_words, 1); - char *value = get_word(words, num_words, 2); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *value = get_word(words, num_words, idx++); RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); @@ -266,7 +412,7 @@ static inline PARSER_RC pluginsd_set(char **words, size_t num_words, PARSER *par RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET, PLUGINSD_KEYWORD_CHART); if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, PLUGINSD_KEYWORD_SET); + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET); if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); st->pluginsd.set = true; @@ -282,13 +428,17 @@ static inline PARSER_RC pluginsd_set(char **words, size_t num_words, PARSER *par } static inline PARSER_RC pluginsd_begin(char **words, size_t num_words, PARSER *parser) { - char *id = get_word(words, num_words, 1); - char *microseconds_txt = get_word(words, num_words, 2); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *microseconds_txt = get_word(words, num_words, idx++); RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - RRDSET *st = pluginsd_find_chart(host, id, PLUGINSD_KEYWORD_BEGIN); + RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN); if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN)) @@ -329,8 +479,9 @@ static inline PARSER_RC pluginsd_begin(char **words, size_t num_words, PARSER *p } static inline PARSER_RC pluginsd_end(char **words, size_t num_words, PARSER *parser) { - UNUSED(words); - UNUSED(num_words); + char *tv_sec = get_word(words, num_words, 1); + char *tv_usec = get_word(words, num_words, 2); + char *pending_rrdset_next = get_word(words, num_words, 3); RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); @@ -344,16 +495,22 @@ static inline PARSER_RC pluginsd_end(char **words, size_t num_words, PARSER *par pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_END); parser->user.data_collections_count++; - struct timeval now; - now_realtime_timeval(&now); - rrdset_timed_done(st, now, /* pending_rrdset_next = */ false); + struct timeval tv = { + .tv_sec = (tv_sec && *tv_sec) ? str2ll(tv_sec, NULL) : 0, + .tv_usec = (tv_usec && *tv_usec) ? str2ll(tv_usec, NULL) : 0 + }; + + if(!tv.tv_sec) + now_realtime_timeval(&tv); + + rrdset_timed_done(st, tv, pending_rrdset_next && *pending_rrdset_next ? true : false); return PARSER_RC_OK; } static void pluginsd_host_define_cleanup(PARSER *parser) { string_freez(parser->user.host_define.hostname); - dictionary_destroy(parser->user.host_define.rrdlabels); + rrdlabels_destroy(parser->user.host_define.rrdlabels); parser->user.host_define.hostname = NULL; parser->user.host_define.rrdlabels = NULL; @@ -390,17 +547,17 @@ static inline PARSER_RC pluginsd_host_define(char **words, size_t num_words, PAR return PARSER_RC_OK; } -static inline PARSER_RC pluginsd_host_dictionary(char **words, size_t num_words, PARSER *parser, DICTIONARY *dict, const char *keyword) { +static inline PARSER_RC pluginsd_host_dictionary(char **words, size_t num_words, PARSER *parser, RRDLABELS *labels, const char *keyword) { char *name = get_word(words, num_words, 1); char *value = get_word(words, num_words, 2); if(!name || !*name || !value) return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "missing parameters"); - if(!parser->user.host_define.parsing_host || !dict) + if(!parser->user.host_define.parsing_host || !labels) return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "host is not defined, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this"); - rrdlabels_add(dict, name, value, RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels, name, value, RRDLABEL_SRC_CONFIG); return PARSER_RC_OK; } @@ -416,30 +573,29 @@ static inline PARSER_RC pluginsd_host_define_end(char **words __maybe_unused, si return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END, "missing initialization, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this"); RRDHOST *host = rrdhost_find_or_create( - string2str(parser->user.host_define.hostname), - string2str(parser->user.host_define.hostname), - parser->user.host_define.machine_guid_str, - "Netdata Virtual Host 1.0", - netdata_configured_timezone, - netdata_configured_abbrev_timezone, - netdata_configured_utc_offset, - NULL, - program_name, - program_version, - default_rrd_update_every, - default_rrd_history_entries, - default_rrd_memory_mode, - default_health_enabled, - default_rrdpush_enabled, - default_rrdpush_destination, - default_rrdpush_api_key, - default_rrdpush_send_charts_matching, - default_rrdpush_enable_replication, - default_rrdpush_seconds_to_replicate, - default_rrdpush_replication_step, - rrdhost_labels_to_system_info(parser->user.host_define.rrdlabels), - false - ); + string2str(parser->user.host_define.hostname), + string2str(parser->user.host_define.hostname), + parser->user.host_define.machine_guid_str, + "Netdata Virtual Host 1.0", + netdata_configured_timezone, + netdata_configured_abbrev_timezone, + netdata_configured_utc_offset, + NULL, + program_name, + program_version, + default_rrd_update_every, + default_rrd_history_entries, + default_rrd_memory_mode, + default_health_enabled, + default_rrdpush_enabled, + default_rrdpush_destination, + default_rrdpush_api_key, + default_rrdpush_send_charts_matching, + default_rrdpush_enable_replication, + default_rrdpush_seconds_to_replicate, + default_rrdpush_replication_step, + rrdhost_labels_to_system_info(parser->user.host_define.rrdlabels), + false); rrdhost_option_set(host, RRDHOST_OPTION_VIRTUAL_HOST); @@ -489,18 +645,22 @@ static inline PARSER_RC pluginsd_chart(char **words, size_t num_words, PARSER *p RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - char *type = get_word(words, num_words, 1); - char *name = get_word(words, num_words, 2); - char *title = get_word(words, num_words, 3); - char *units = get_word(words, num_words, 4); - char *family = get_word(words, num_words, 5); - char *context = get_word(words, num_words, 6); - char *chart = get_word(words, num_words, 7); - char *priority_s = get_word(words, num_words, 8); - char *update_every_s = get_word(words, num_words, 9); - char *options = get_word(words, num_words, 10); - char *plugin = get_word(words, num_words, 11); - char *module = get_word(words, num_words, 12); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *type = get_word(words, num_words, idx++); + char *name = get_word(words, num_words, idx++); + char *title = get_word(words, num_words, idx++); + char *units = get_word(words, num_words, idx++); + char *family = get_word(words, num_words, idx++); + char *context = get_word(words, num_words, idx++); + char *chart = get_word(words, num_words, idx++); + char *priority_s = get_word(words, num_words, idx++); + char *update_every_s = get_word(words, num_words, idx++); + char *options = get_word(words, num_words, idx++); + char *plugin = get_word(words, num_words, idx++); + char *module = get_word(words, num_words, idx++); // parse the id from type char *id = NULL; @@ -567,14 +727,15 @@ static inline PARSER_RC pluginsd_chart(char **words, size_t num_words, PARSER *p module, priority, update_every, chart_type); + bool obsolete = false; if (likely(st)) { if (options && *options) { if (strstr(options, "obsolete")) { - pluginsd_rrdset_cleanup(st); - rrdset_is_obsolete(st); + rrdset_is_obsolete___safe_from_collector_thread(st); + obsolete = true; } else - rrdset_isnot_obsolete(st); + rrdset_isnot_obsolete___safe_from_collector_thread(st); if (strstr(options, "detail")) rrdset_flag_set(st, RRDSET_FLAG_DETAIL); @@ -592,13 +753,15 @@ static inline PARSER_RC pluginsd_chart(char **words, size_t num_words, PARSER *p rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); } else { - rrdset_isnot_obsolete(st); + rrdset_isnot_obsolete___safe_from_collector_thread(st); rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); } if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_CHART)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + pluginsd_rrdset_cache_put_to_slot(parser, st, slot, obsolete); } else pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_CHART); @@ -649,12 +812,16 @@ static inline PARSER_RC pluginsd_chart_definition_end(char **words, size_t num_w } static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSER *parser) { - char *id = get_word(words, num_words, 1); - char *name = get_word(words, num_words, 2); - char *algorithm = get_word(words, num_words, 3); - char *multiplier_s = get_word(words, num_words, 4); - char *divisor_s = get_word(words, num_words, 5); - char *options = get_word(words, num_words, 6); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *name = get_word(words, num_words, idx++); + char *algorithm = get_word(words, num_words, idx++); + char *multiplier_s = get_word(words, num_words, idx++); + char *divisor_s = get_word(words, num_words, idx++); + char *options = get_word(words, num_words, idx++); RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_DIMENSION); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); @@ -693,11 +860,14 @@ static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSE int unhide_dimension = 1; rrddim_option_clear(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); + bool obsolete = false; if (options && *options) { - if (strstr(options, "obsolete") != NULL) - rrddim_is_obsolete(st, rd); + if (strstr(options, "obsolete") != NULL) { + obsolete = true; + rrddim_is_obsolete___safe_from_collector_thread(st, rd); + } else - rrddim_isnot_obsolete(st, rd); + rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); unhide_dimension = !strstr(options, "hidden"); @@ -705,8 +875,9 @@ static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSE rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); if (strstr(options, "nooverflow") != NULL) rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); - } else - rrddim_isnot_obsolete(st, rd); + } + else + rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); bool should_update_dimension = false; @@ -724,6 +895,8 @@ static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSE rrdhost_flag_set(rd->rrdset->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); } + pluginsd_rrddim_put_to_slot(parser, st, rd, slot, obsolete); + return PARSER_RC_OK; } @@ -733,14 +906,16 @@ static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSE struct inflight_function { int code; int timeout; - BUFFER *destination_wb; STRING *function; - void (*callback)(BUFFER *wb, int code, void *callback_data); - void *callback_data; + BUFFER *result_body_wb; + rrd_function_result_callback_t result_cb; + void *result_cb_data; usec_t timeout_ut; usec_t started_ut; usec_t sent_ut; const char *payload; + PARSER *parser; + bool virtual; }; static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void *func, void *parser_ptr) { @@ -751,42 +926,44 @@ static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void // leave this code as default, so that when the dictionary is destroyed this will be sent back to the caller pf->code = HTTP_RESP_GATEWAY_TIMEOUT; + const char *transaction = dictionary_acquired_item_name(item); + char buffer[2048 + 1]; - snprintfz(buffer, 2048, "%s %s %d \"%s\"\n", + snprintfz(buffer, sizeof(buffer) - 1, "%s %s %d \"%s\"\n", pf->payload ? "FUNCTION_PAYLOAD" : "FUNCTION", - dictionary_acquired_item_name(item), + transaction, pf->timeout, string2str(pf->function)); // send the command to the plugin - int ret = send_to_plugin(buffer, parser); + ssize_t ret = send_to_plugin(buffer, parser); pf->sent_ut = now_realtime_usec(); if(ret < 0) { - netdata_log_error("FUNCTION: failed to send function to plugin, error %d", ret); - rrd_call_function_error(pf->destination_wb, "Failed to communicate with collector", HTTP_RESP_BACKEND_FETCH_FAILED); + netdata_log_error("FUNCTION '%s': failed to send it to the plugin, error %zd", string2str(pf->function), ret); + rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", HTTP_RESP_SERVICE_UNAVAILABLE); } else { internal_error(LOG_FUNCTIONS, - "FUNCTION '%s' with transaction '%s' sent to collector (%d bytes, in %llu usec)", + "FUNCTION '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)", string2str(pf->function), dictionary_acquired_item_name(item), ret, pf->sent_ut - pf->started_ut); } if (!pf->payload) return; - + // send the payload to the plugin ret = send_to_plugin(pf->payload, parser); if(ret < 0) { - netdata_log_error("FUNCTION_PAYLOAD: failed to send function to plugin, error %d", ret); - rrd_call_function_error(pf->destination_wb, "Failed to communicate with collector", HTTP_RESP_BACKEND_FETCH_FAILED); + netdata_log_error("FUNCTION_PAYLOAD '%s': failed to send function to plugin, error %zd", string2str(pf->function), ret); + rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", HTTP_RESP_SERVICE_UNAVAILABLE); } else { internal_error(LOG_FUNCTIONS, - "FUNCTION_PAYLOAD '%s' with transaction '%s' sent to collector (%d bytes, in %llu usec)", + "FUNCTION_PAYLOAD '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)", string2str(pf->function), dictionary_acquired_item_name(item), ret, pf->sent_ut - pf->started_ut); } @@ -798,23 +975,90 @@ static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __m struct inflight_function *pf = new_func; netdata_log_error("PLUGINSD_PARSER: duplicate UUID on pending function '%s' detected. Ignoring the second one.", string2str(pf->function)); - pf->code = rrd_call_function_error(pf->destination_wb, "This request is already in progress", HTTP_RESP_BAD_REQUEST); - pf->callback(pf->destination_wb, pf->code, pf->callback_data); + pf->code = rrd_call_function_error(pf->result_body_wb, "This request is already in progress", HTTP_RESP_BAD_REQUEST); + pf->result_cb(pf->result_body_wb, pf->code, pf->result_cb_data); string_freez(pf->function); return false; } -static void inflight_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *parser_ptr __maybe_unused) { +void delete_job_finalize(struct parser *parser __maybe_unused, struct configurable_plugin *plug, const char *fnc_sig, int code) { + if (code != DYNCFG_VFNC_RET_CFG_ACCEPTED) + return; + + char *params_local = strdupz(fnc_sig); + char *words[DYNCFG_MAX_WORDS]; + size_t words_c = quoted_strings_splitter(params_local, words, DYNCFG_MAX_WORDS, isspace_map_pluginsd); + + if (words_c != 3) { + netdata_log_error("PLUGINSD_PARSER: invalid number of parameters for delete_job"); + freez(params_local); + return; + } + + const char *module = words[1]; + const char *job = words[2]; + + delete_job(plug, module, job); + + unlink_job(plug->name, module, job); + + rrdpush_send_job_deleted(localhost, plug->name, module, job); + + freez(params_local); +} + +void set_job_finalize(struct parser *parser __maybe_unused, struct configurable_plugin *plug __maybe_unused, const char *fnc_sig, int code) { + if (code != DYNCFG_VFNC_RET_CFG_ACCEPTED) + return; + + char *params_local = strdupz(fnc_sig); + char *words[DYNCFG_MAX_WORDS]; + size_t words_c = quoted_strings_splitter(params_local, words, DYNCFG_MAX_WORDS, isspace_map_pluginsd); + + if (words_c != 3) { + netdata_log_error("PLUGINSD_PARSER: invalid number of parameters for set_job_config"); + freez(params_local); + return; + } + + const char *module_name = get_word(words, words_c, 1); + const char *job_name = get_word(words, words_c, 2); + + if (register_job(parser->user.host->configurable_plugins, parser->user.cd->configuration->name, module_name, job_name, JOB_TYPE_USER, JOB_FLG_USER_CREATED, 1)) { + freez(params_local); + return; + } + + // only send this if it is not existing already (register_job cares for that) + rrdpush_send_dyncfg_reg_job(localhost, parser->user.cd->configuration->name, module_name, job_name, JOB_TYPE_USER, JOB_FLG_USER_CREATED); + + freez(params_local); +} + +static void inflight_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *parser_ptr) { struct inflight_function *pf = func; + struct parser *parser = (struct parser *)parser_ptr; internal_error(LOG_FUNCTIONS, - "FUNCTION '%s' result of transaction '%s' received from collector (%zu bytes, request %llu usec, response %llu usec)", + "FUNCTION '%s' result of transaction '%s' received from collector (%zu bytes, request %"PRIu64" usec, response %"PRIu64" usec)", string2str(pf->function), dictionary_acquired_item_name(item), - buffer_strlen(pf->destination_wb), pf->sent_ut - pf->started_ut, now_realtime_usec() - pf->sent_ut); + buffer_strlen(pf->result_body_wb), pf->sent_ut - pf->started_ut, now_realtime_usec() - pf->sent_ut); + + if (pf->virtual && SERVING_PLUGINSD(parser)) { + if (pf->payload) { + if (strncmp(string2str(pf->function), FUNCTION_NAME_SET_JOB_CONFIG, strlen(FUNCTION_NAME_SET_JOB_CONFIG)) == 0) + set_job_finalize(parser, parser->user.cd->configuration, string2str(pf->function), pf->code); + dyn_conf_store_config(string2str(pf->function), pf->payload, parser->user.cd->configuration); + } else if (strncmp(string2str(pf->function), FUNCTION_NAME_DELETE_JOB, strlen(FUNCTION_NAME_DELETE_JOB)) == 0) { + delete_job_finalize(parser, parser->user.cd->configuration, string2str(pf->function), pf->code); + } + } + + pf->result_cb(pf->result_body_wb, pf->code, pf->result_cb_data); - pf->callback(pf->destination_wb, pf->code, pf->callback_data); string_freez(pf->function); + freez((void *)pf->payload); } void inflight_functions_init(PARSER *parser) { @@ -830,11 +1074,11 @@ static void inflight_functions_garbage_collect(PARSER *parser, usec_t now) { dfe_start_write(parser->inflight.functions, pf) { if (pf->timeout_ut < now) { internal_error(true, - "FUNCTION '%s' removing expired transaction '%s', after %llu usec.", + "FUNCTION '%s' removing expired transaction '%s', after %"PRIu64" usec.", string2str(pf->function), pf_dfe.name, now - pf->started_ut); - if(!buffer_strlen(pf->destination_wb) || pf->code == HTTP_RESP_OK) - pf->code = rrd_call_function_error(pf->destination_wb, + if(!buffer_strlen(pf->result_body_wb) || pf->code == HTTP_RESP_OK) + pf->code = rrd_call_function_error(pf->result_body_wb, "Timeout waiting for collector response.", HTTP_RESP_GATEWAY_TIMEOUT); @@ -847,35 +1091,74 @@ static void inflight_functions_garbage_collect(PARSER *parser, usec_t now) { dfe_done(pf); } +void pluginsd_function_cancel(void *data) { + struct inflight_function *look_for = data, *t; + + bool sent = false; + dfe_start_read(look_for->parser->inflight.functions, t) { + if(look_for == t) { + const char *transaction = t_dfe.name; + + internal_error(true, "PLUGINSD: sending function cancellation to plugin for transaction '%s'", transaction); + + char buffer[2048 + 1]; + snprintfz(buffer, sizeof(buffer) - 1, "%s %s\n", + PLUGINSD_KEYWORD_FUNCTION_CANCEL, + transaction); + + // send the command to the plugin + ssize_t ret = send_to_plugin(buffer, t->parser); + if(ret < 0) + sent = true; + + break; + } + } + dfe_done(t); + + if(sent <= 0) + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "PLUGINSD: FUNCTION_CANCEL request didn't match any pending function requests in pluginsd.d."); +} + // this is the function that is called from // rrd_call_function_and_wait() and rrd_call_function_async() -static int pluginsd_execute_function_callback(BUFFER *destination_wb, int timeout, const char *function, void *collector_data, void (*callback)(BUFFER *wb, int code, void *callback_data), void *callback_data) { - PARSER *parser = collector_data; +static int pluginsd_function_execute_cb(BUFFER *result_body_wb, int timeout, const char *function, + void *execute_cb_data, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb __maybe_unused, + void *is_cancelled_cb_data __maybe_unused, + rrd_function_register_canceller_cb_t register_canceller_cb, + void *register_canceller_db_data) { + PARSER *parser = execute_cb_data; usec_t now = now_realtime_usec(); struct inflight_function tmp = { .started_ut = now, - .timeout_ut = now + timeout * USEC_PER_SEC, - .destination_wb = destination_wb, + .timeout_ut = now + timeout * USEC_PER_SEC + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT, + .result_body_wb = result_body_wb, .timeout = timeout, .function = string_strdupz(function), - .callback = callback, - .callback_data = callback_data, - .payload = NULL + .result_cb = result_cb, + .result_cb_data = result_cb_data, + .payload = NULL, + .parser = parser, }; uuid_t uuid; - uuid_generate_time(uuid); + uuid_generate_random(uuid); - char key[UUID_STR_LEN]; - uuid_unparse_lower(uuid, key); + char transaction[UUID_STR_LEN]; + uuid_unparse_lower(uuid, transaction); dictionary_write_lock(parser->inflight.functions); // if there is any error, our dictionary callbacks will call the caller callback to notify // the caller about the error - no need for error handling here. - dictionary_set(parser->inflight.functions, key, &tmp, sizeof(struct inflight_function)); + void *t = dictionary_set(parser->inflight.functions, transaction, &tmp, sizeof(struct inflight_function)); + if(register_canceller_cb) + register_canceller_cb(register_canceller_db_data, pluginsd_function_cancel, t); if(!parser->inflight.smaller_timeout || tmp.timeout_ut < parser->inflight.smaller_timeout) parser->inflight.smaller_timeout = tmp.timeout_ut; @@ -890,6 +1173,8 @@ static int pluginsd_execute_function_callback(BUFFER *destination_wb, int timeou } static inline PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser) { + // a plugin or a child is registering a function + bool global = false; size_t i = 1; if(num_words >= 2 && strcmp(get_word(words, num_words, 1), "GLOBAL") == 0) { @@ -926,7 +1211,7 @@ static inline PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; } - rrd_collector_add_function(host, st, name, timeout, help, false, pluginsd_execute_function_callback, parser); + rrd_function_add(host, st, name, timeout, help, false, pluginsd_function_execute_cb, parser); parser->user.data_collections_count++; @@ -973,18 +1258,18 @@ static inline PARSER_RC pluginsd_function_result_begin(char **words, size_t num_ } else { if(format && *format) - pf->destination_wb->content_type = functions_format_to_content_type(format); + pf->result_body_wb->content_type = functions_format_to_content_type(format); pf->code = code; - pf->destination_wb->expires = expiration; + pf->result_body_wb->expires = expiration; if(expiration <= now_realtime_sec()) - buffer_no_cacheable(pf->destination_wb); + buffer_no_cacheable(pf->result_body_wb); else - buffer_cacheable(pf->destination_wb); + buffer_cacheable(pf->result_body_wb); } - parser->defer.response = (pf) ? pf->destination_wb : NULL; + parser->defer.response = (pf) ? pf->result_body_wb : NULL; parser->defer.end_keyword = PLUGINSD_KEYWORD_FUNCTION_RESULT_END; parser->defer.action = pluginsd_function_result_end; parser->defer.action_data = string_strdupz(key); // it is ok is key is NULL @@ -1133,6 +1418,15 @@ static inline PARSER_RC pluginsd_label(char **words, size_t num_words, PARSER *p if(unlikely(!(parser->user.new_host_labels))) parser->user.new_host_labels = rrdlabels_create(); + if (strcmp(name,HOST_LABEL_IS_EPHEMERAL) == 0) { + int is_ephemeral = appconfig_test_boolean_value((char *) value); + if (is_ephemeral) { + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_LABEL); + if (likely(host)) + rrdhost_option_set(host, RRDHOST_OPTION_EPHEMERAL_HOST); + } + } + rrdlabels_add(parser->user.new_host_labels, name, store, str2l(label_source)); if (allocated_store) @@ -1151,6 +1445,8 @@ static inline PARSER_RC pluginsd_overwrite(char **words __maybe_unused, size_t n host->rrdlabels = rrdlabels_create(); rrdlabels_migrate_to_these(host->rrdlabels, parser->user.new_host_labels); + if (rrdhost_option_check(host, RRDHOST_OPTION_EPHEMERAL_HOST)) + rrdlabels_add(host->rrdlabels, HOST_LABEL_IS_EPHEMERAL, "true", RRDLABEL_SRC_CONFIG); rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_LABELS | RRDHOST_FLAG_METADATA_UPDATE); rrdlabels_destroy(parser->user.new_host_labels); @@ -1163,7 +1459,7 @@ static inline PARSER_RC pluginsd_clabel(char **words, size_t num_words, PARSER * const char *value = get_word(words, num_words, 2); const char *label_source = get_word(words, num_words, 3); - if (!name || !value || !*label_source) { + if (!name || !value || !label_source) { netdata_log_error("Ignoring malformed or empty CHART LABEL command."); return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); } @@ -1197,16 +1493,21 @@ static inline PARSER_RC pluginsd_clabel_commit(char **words __maybe_unused, size rrdset_flag_set(st, RRDSET_FLAG_METADATA_UPDATE); rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); + rrdset_metadata_updated(st); parser->user.chart_rrdlabels_linked_temporarily = NULL; return PARSER_RC_OK; } static inline PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser) { - char *id = get_word(words, num_words, 1); - char *start_time_str = get_word(words, num_words, 2); - char *end_time_str = get_word(words, num_words, 3); - char *child_now_str = get_word(words, num_words, 4); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *start_time_str = get_word(words, num_words, idx++); + char *end_time_str = get_word(words, num_words, idx++); + char *child_now_str = get_word(words, num_words, idx++); RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); @@ -1215,7 +1516,7 @@ static inline PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PA if (likely(!id || !*id)) st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN, PLUGINSD_KEYWORD_REPLAY_BEGIN); else - st = pluginsd_find_chart(host, id, PLUGINSD_KEYWORD_REPLAY_BEGIN); + st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_REPLAY_BEGIN); if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); @@ -1330,9 +1631,13 @@ static inline SN_FLAGS pluginsd_parse_storage_number_flags(const char *flags_str } static inline PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser) { - char *dimension = get_word(words, num_words, 1); - char *value_str = get_word(words, num_words, 2); - char *flags_str = get_word(words, num_words, 3); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *value_str = get_word(words, num_words, idx++); + char *flags_str = get_word(words, num_words, idx++); RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_SET); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); @@ -1341,15 +1646,16 @@ static inline PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARS if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); if(!parser->user.replay.rset_enabled) { - error_limit_static_thread_var(erl, 1, 0); - error_limit(&erl, "PLUGINSD: 'host:%s/chart:%s' got a %s but it is disabled by %s errors", - rrdhost_hostname(host), rrdset_id(st), PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN); + nd_log_limit_static_thread_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_ERR, + "PLUGINSD: 'host:%s/chart:%s' got a %s but it is disabled by %s errors", + rrdhost_hostname(host), rrdset_id(st), PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN); // we have to return OK here return PARSER_RC_OK; } - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, PLUGINSD_KEYWORD_REPLAY_SET); + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_SET); if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); st->pluginsd.set = true; @@ -1390,8 +1696,10 @@ static inline PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARS rd->collector.counter++; } else { - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "PLUGINSD: 'host:%s/chart:%s/dim:%s' has the ARCHIVED flag set, but it is replicated. Ignoring data.", + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, + "PLUGINSD: 'host:%s/chart:%s/dim:%s' has the ARCHIVED flag set, but it is replicated. " + "Ignoring data.", rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_name(rd)); } } @@ -1403,11 +1711,15 @@ static inline PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, si if(parser->user.replay.rset_enabled == false) return PARSER_RC_OK; - char *dimension = get_word(words, num_words, 1); - char *last_collected_ut_str = get_word(words, num_words, 2); - char *last_collected_value_str = get_word(words, num_words, 3); - char *last_calculated_value_str = get_word(words, num_words, 4); - char *last_stored_value_str = get_word(words, num_words, 5); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *last_collected_ut_str = get_word(words, num_words, idx++); + char *last_collected_value_str = get_word(words, num_words, idx++); + char *last_calculated_value_str = get_word(words, num_words, idx++); + char *last_stored_value_str = get_word(words, num_words, idx++); RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE); if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); @@ -1421,7 +1733,7 @@ static inline PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, si st->pluginsd.set = false; } - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE); + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE); if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); usec_t dim_last_collected_ut = (usec_t)rd->collector.last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)rd->collector.last_collected_time.tv_usec; @@ -1585,10 +1897,14 @@ static inline PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARS static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER *parser) { timing_init(); - char *id = get_word(words, num_words, 1); - char *update_every_str = get_word(words, num_words, 2); - char *end_time_str = get_word(words, num_words, 3); - char *wall_clock_time_str = get_word(words, num_words, 4); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *update_every_str = get_word(words, num_words, idx++); + char *end_time_str = get_word(words, num_words, idx++); + char *wall_clock_time_str = get_word(words, num_words, idx++); if(unlikely(!id || !update_every_str || !end_time_str || !wall_clock_time_str)) return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_BEGIN_V2, "missing parameters"); @@ -1598,14 +1914,15 @@ static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER timing_step(TIMING_STEP_BEGIN2_PREPARE); - RRDSET *st = pluginsd_find_chart(host, id, PLUGINSD_KEYWORD_BEGIN_V2); + RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN_V2); + if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN_V2)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE | RRDSET_FLAG_ARCHIVED))) - rrdset_isnot_obsolete(st); + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) + rrdset_isnot_obsolete___safe_from_collector_thread(st); timing_step(TIMING_STEP_BEGIN2_FIND_CHART); @@ -1645,9 +1962,12 @@ static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER parser->user.v2.stream_buffer = rrdset_push_metric_initialize(parser->user.st, wall_clock_time); if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.wb) { - // check if receiver and sender have the same number parsing capabilities + // check receiver capabilities bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754); - NUMBER_ENCODING encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; + + // check sender capabilities + bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false; + NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; BUFFER *wb = parser->user.v2.stream_buffer.wb; @@ -1656,28 +1976,35 @@ static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER if(unlikely(parser->user.v2.stream_buffer.begin_v2_added)) buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2 " '", sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1 + 2); + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, st->rrdpush.sender.chart_slot); + } + + buffer_fast_strcat(wb, " '", 2); buffer_fast_strcat(wb, rrdset_id(st), string_strlen(st->id)); buffer_fast_strcat(wb, "' ", 2); if(can_copy) buffer_strcat(wb, update_every_str); else - buffer_print_uint64_encoded(wb, encoding, update_every); + buffer_print_uint64_encoded(wb, integer_encoding, update_every); buffer_fast_strcat(wb, " ", 1); if(can_copy) buffer_strcat(wb, end_time_str); else - buffer_print_uint64_encoded(wb, encoding, end_time); + buffer_print_uint64_encoded(wb, integer_encoding, end_time); buffer_fast_strcat(wb, " ", 1); if(can_copy) buffer_strcat(wb, wall_clock_time_str); else - buffer_print_uint64_encoded(wb, encoding, wall_clock_time); + buffer_print_uint64_encoded(wb, integer_encoding, wall_clock_time); buffer_fast_strcat(wb, "\n", 1); @@ -1710,10 +2037,14 @@ static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER static inline PARSER_RC pluginsd_set_v2(char **words, size_t num_words, PARSER *parser) { timing_init(); - char *dimension = get_word(words, num_words, 1); - char *collected_str = get_word(words, num_words, 2); - char *value_str = get_word(words, num_words, 3); - char *flags_str = get_word(words, num_words, 4); + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *collected_str = get_word(words, num_words, idx++); + char *value_str = get_word(words, num_words, idx++); + char *flags_str = get_word(words, num_words, idx++); if(unlikely(!dimension || !collected_str || !value_str || !flags_str)) return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_SET_V2, "missing parameters"); @@ -1726,13 +2057,13 @@ static inline PARSER_RC pluginsd_set_v2(char **words, size_t num_words, PARSER * timing_step(TIMING_STEP_SET2_PREPARE); - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, PLUGINSD_KEYWORD_SET_V2); + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET_V2); if(unlikely(!rd)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); st->pluginsd.set = true; if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED))) - rrddim_isnot_obsolete(st, rd); + rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); timing_step(TIMING_STEP_SET2_LOOKUP_DIMENSION); @@ -1778,12 +2109,22 @@ static inline PARSER_RC pluginsd_set_v2(char **words, size_t num_words, PARSER * if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb) { // check if receiver and sender have the same number parsing capabilities bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754); + + // check the sender capabilities + bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false; NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; NUMBER_ENCODING doubles_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_DECIMAL; BUFFER *wb = parser->user.v2.stream_buffer.wb; buffer_need_bytes(wb, 1024); - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2 " '", sizeof(PLUGINSD_KEYWORD_SET_V2) - 1 + 2); + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot); + } + + buffer_fast_strcat(wb, " '", 2); buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id)); buffer_fast_strcat(wb, "' ", 2); if(can_copy) @@ -1864,13 +2205,27 @@ static inline PARSER_RC pluginsd_end_v2(char **words __maybe_unused, size_t num_ // ------------------------------------------------------------------------ // cleanup RRDSET / RRDDIM - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - rd->collector.calculated_value = 0; - rd->collector.collected_value = 0; - rrddim_clear_updated(rd); + if(likely(st->pluginsd.dims_with_slots)) { + for(size_t i = 0; i < st->pluginsd.size ;i++) { + RRDDIM *rd = st->pluginsd.prd_array[i].rd; + + if(!rd) + continue; + + rd->collector.calculated_value = 0; + rd->collector.collected_value = 0; + rrddim_clear_updated(rd); + } + } + else { + RRDDIM *rd; + rrddim_foreach_read(rd, st){ + rd->collector.calculated_value = 0; + rd->collector.collected_value = 0; + rrddim_clear_updated(rd); + } + rrddim_foreach_done(rd); } - rrddim_foreach_done(rd); // ------------------------------------------------------------------------ // reset state @@ -1894,7 +2249,7 @@ struct mutex_cond { int rc; }; -static void virt_fnc_got_data_cb(BUFFER *wb, int code, void *callback_data) +static void virt_fnc_got_data_cb(BUFFER *wb __maybe_unused, int code, void *callback_data) { struct mutex_cond *ctx = callback_data; pthread_mutex_lock(&ctx->lock); @@ -1904,9 +2259,81 @@ static void virt_fnc_got_data_cb(BUFFER *wb, int code, void *callback_data) } #define VIRT_FNC_TIMEOUT 1 +#define VIRT_FNC_BUF_SIZE (4096) +void call_virtual_function_async(BUFFER *wb, RRDHOST *host, const char *name, const char *payload, rrd_function_result_callback_t callback, void *callback_data) { + PARSER *parser = NULL; + + //TODO simplify (as we really need only first parameter to get plugin name maybe we can avoid parsing all) + char *words[PLUGINSD_MAX_WORDS]; + char *function_with_params = strdupz(name); + size_t num_words = quoted_strings_splitter(function_with_params, words, PLUGINSD_MAX_WORDS, isspace_map_pluginsd); + + if (num_words < 2) { + netdata_log_error("PLUGINSD: virtual function name is empty."); + freez(function_with_params); + return; + } + + const DICTIONARY_ITEM *cpi = dictionary_get_and_acquire_item(host->configurable_plugins, get_word(words, num_words, 1)); + if (unlikely(cpi == NULL)) { + netdata_log_error("PLUGINSD: virtual function plugin '%s' not found.", name); + freez(function_with_params); + return; + } + struct configurable_plugin *cp = dictionary_acquired_item_value(cpi); + parser = (PARSER *)cp->cb_usr_ctx; + + BUFFER *function_out = buffer_create(VIRT_FNC_BUF_SIZE, NULL); + // if we are forwarding this to a plugin (as opposed to streaming/child) we have to remove the first parameter (plugin_name) + buffer_strcat(function_out, get_word(words, num_words, 0)); + for (size_t i = 1; i < num_words; i++) { + if (i == 1 && SERVING_PLUGINSD(parser)) + continue; + buffer_sprintf(function_out, " %s", get_word(words, num_words, i)); + } + freez(function_with_params); + + usec_t now = now_realtime_usec(); + + struct inflight_function tmp = { + .started_ut = now, + .timeout_ut = now + VIRT_FNC_TIMEOUT + USEC_PER_SEC, + .result_body_wb = wb, + .timeout = VIRT_FNC_TIMEOUT * 10, + .function = string_strdupz(buffer_tostring(function_out)), + .result_cb = callback, + .result_cb_data = callback_data, + .payload = payload != NULL ? strdupz(payload) : NULL, + .virtual = true, + }; + buffer_free(function_out); + + uuid_t uuid; + uuid_generate_time(uuid); + + char key[UUID_STR_LEN]; + uuid_unparse_lower(uuid, key); + + dictionary_write_lock(parser->inflight.functions); + + // if there is any error, our dictionary callbacks will call the caller callback to notify + // the caller about the error - no need for error handling here. + dictionary_set(parser->inflight.functions, key, &tmp, sizeof(struct inflight_function)); + + if(!parser->inflight.smaller_timeout || tmp.timeout_ut < parser->inflight.smaller_timeout) + parser->inflight.smaller_timeout = tmp.timeout_ut; + + // garbage collect stale inflight functions + if(parser->inflight.smaller_timeout < now) + inflight_functions_garbage_collect(parser, now); + + dictionary_write_unlock(parser->inflight.functions); +} + + dyncfg_config_t call_virtual_function_blocking(PARSER *parser, const char *name, int *rc, const char *payload) { usec_t now = now_realtime_usec(); - BUFFER *wb = buffer_create(4096, NULL); + BUFFER *wb = buffer_create(VIRT_FNC_BUF_SIZE, NULL); struct mutex_cond cond = { .lock = PTHREAD_MUTEX_INITIALIZER, @@ -1916,12 +2343,13 @@ dyncfg_config_t call_virtual_function_blocking(PARSER *parser, const char *name, struct inflight_function tmp = { .started_ut = now, .timeout_ut = now + VIRT_FNC_TIMEOUT + USEC_PER_SEC, - .destination_wb = wb, + .result_body_wb = wb, .timeout = VIRT_FNC_TIMEOUT, .function = string_strdupz(name), - .callback = virt_fnc_got_data_cb, - .callback_data = &cond, - .payload = payload, + .result_cb = virt_fnc_got_data_cb, + .result_cb_data = &cond, + .payload = payload != NULL ? strdupz(payload) : NULL, + .virtual = true, }; uuid_t uuid; @@ -1968,98 +2396,188 @@ dyncfg_config_t call_virtual_function_blocking(PARSER *parser, const char *name, return cfg; } -static dyncfg_config_t get_plugin_config_cb(void *usr_ctx) +#define CVF_MAX_LEN (1024) +static dyncfg_config_t get_plugin_config_cb(void *usr_ctx, const char *plugin_name) { PARSER *parser = usr_ctx; - return call_virtual_function_blocking(parser, "get_plugin_config", NULL, NULL); + + if (SERVING_STREAMING(parser)) { + char buf[CVF_MAX_LEN + 1]; + snprintfz(buf, CVF_MAX_LEN, FUNCTION_NAME_GET_PLUGIN_CONFIG " %s", plugin_name); + return call_virtual_function_blocking(parser, buf, NULL, NULL); + } + + return call_virtual_function_blocking(parser, FUNCTION_NAME_GET_PLUGIN_CONFIG, NULL, NULL); } -static dyncfg_config_t get_plugin_config_schema_cb(void *usr_ctx) +static dyncfg_config_t get_plugin_config_schema_cb(void *usr_ctx, const char *plugin_name) { PARSER *parser = usr_ctx; + + if (SERVING_STREAMING(parser)) { + char buf[CVF_MAX_LEN + 1]; + snprintfz(buf, CVF_MAX_LEN, FUNCTION_NAME_GET_PLUGIN_CONFIG_SCHEMA " %s", plugin_name); + return call_virtual_function_blocking(parser, buf, NULL, NULL); + } + return call_virtual_function_blocking(parser, "get_plugin_config_schema", NULL, NULL); } -static dyncfg_config_t get_module_config_cb(void *usr_ctx, const char *module_name) +static dyncfg_config_t get_module_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name) { PARSER *parser = usr_ctx; - char buf[1024]; - snprintfz(buf, sizeof(buf), "get_module_config %s", module_name); - return call_virtual_function_blocking(parser, buf, NULL, NULL); + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_GET_MODULE_CONFIG); + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + + buffer_sprintf(wb, " %s", module_name); + + dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL); + + buffer_free(wb); + + return ret; } -static dyncfg_config_t get_module_config_schema_cb(void *usr_ctx, const char *module_name) +static dyncfg_config_t get_module_config_schema_cb(void *usr_ctx, const char *plugin_name, const char *module_name) { PARSER *parser = usr_ctx; - char buf[1024]; - snprintfz(buf, sizeof(buf), "get_module_config_schema %s", module_name); - return call_virtual_function_blocking(parser, buf, NULL, NULL); + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_GET_MODULE_CONFIG_SCHEMA); + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + + buffer_sprintf(wb, " %s", module_name); + + dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL); + + buffer_free(wb); + + return ret; } -static dyncfg_config_t get_job_config_schema_cb(void *usr_ctx, const char *module_name) +static dyncfg_config_t get_job_config_schema_cb(void *usr_ctx, const char *plugin_name, const char *module_name) { PARSER *parser = usr_ctx; - char buf[1024]; - snprintfz(buf, sizeof(buf), "get_job_config_schema %s", module_name); - return call_virtual_function_blocking(parser, buf, NULL, NULL); + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_GET_JOB_CONFIG_SCHEMA); + + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + + buffer_sprintf(wb, " %s", module_name); + + dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL); + + buffer_free(wb); + + return ret; } -static dyncfg_config_t get_job_config_cb(void *usr_ctx, const char *module_name, const char* job_name) +static dyncfg_config_t get_job_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, const char* job_name) { PARSER *parser = usr_ctx; - char buf[1024]; - snprintfz(buf, sizeof(buf), "get_job_config %s %s", module_name, job_name); - return call_virtual_function_blocking(parser, buf, NULL, NULL); + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_GET_JOB_CONFIG); + + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + + buffer_sprintf(wb, " %s %s", module_name, job_name); + + dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL); + + buffer_free(wb); + + return ret; } -enum set_config_result set_plugin_config_cb(void *usr_ctx, dyncfg_config_t *cfg) +enum set_config_result set_plugin_config_cb(void *usr_ctx, const char *plugin_name, dyncfg_config_t *cfg) { PARSER *parser = usr_ctx; + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_SET_PLUGIN_CONFIG); + + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + int rc; - call_virtual_function_blocking(parser, "set_plugin_config", &rc, cfg->data); - if(rc != 1) + call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data); + + buffer_free(wb); + if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED) return SET_CONFIG_REJECTED; return SET_CONFIG_ACCEPTED; } -enum set_config_result set_module_config_cb(void *usr_ctx, const char *module_name, dyncfg_config_t *cfg) +enum set_config_result set_module_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, dyncfg_config_t *cfg) { PARSER *parser = usr_ctx; + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_SET_MODULE_CONFIG); + + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + + buffer_sprintf(wb, " %s", module_name); + int rc; + call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data); - char buf[1024]; - snprintfz(buf, sizeof(buf), "set_module_config %s", module_name); - call_virtual_function_blocking(parser, buf, &rc, cfg->data); + buffer_free(wb); - if(rc != 1) + if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED) return SET_CONFIG_REJECTED; return SET_CONFIG_ACCEPTED; } -enum set_config_result set_job_config_cb(void *usr_ctx, const char *module_name, const char *job_name, dyncfg_config_t *cfg) +enum set_config_result set_job_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, const char *job_name, dyncfg_config_t *cfg) { PARSER *parser = usr_ctx; + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_SET_JOB_CONFIG); + + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + + buffer_sprintf(wb, " %s %s", module_name, job_name); + int rc; + call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data); - char buf[1024]; - snprintfz(buf, sizeof(buf), "set_job_config %s %s", module_name, job_name); - call_virtual_function_blocking(parser, buf, &rc, cfg->data); + buffer_free(wb); - if(rc != 1) + if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED) return SET_CONFIG_REJECTED; return SET_CONFIG_ACCEPTED; } -enum set_config_result delete_job_cb(void *usr_ctx, const char *module_name, const char *job_name) +enum set_config_result delete_job_cb(void *usr_ctx, const char *plugin_name ,const char *module_name, const char *job_name) { PARSER *parser = usr_ctx; + BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL); + + buffer_strcat(wb, FUNCTION_NAME_DELETE_JOB); + + if (SERVING_STREAMING(parser)) + buffer_sprintf(wb, " %s", plugin_name); + + buffer_sprintf(wb, " %s %s", module_name, job_name); + int rc; + call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, NULL); - char buf[1024]; - snprintfz(buf, sizeof(buf), "delete_job %s %s", module_name, job_name); - call_virtual_function_blocking(parser, buf, &rc, NULL); + buffer_free(wb); - if(rc != 1) + if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED) return SET_CONFIG_REJECTED; return SET_CONFIG_ACCEPTED; } @@ -2079,37 +2597,65 @@ static inline PARSER_RC pluginsd_register_plugin(char **words __maybe_unused, si cfg->get_config_schema_cb = get_plugin_config_schema_cb; cfg->cb_usr_ctx = parser; - parser->user.cd->cfg_dict_item = register_plugin(cfg); - - if (unlikely(parser->user.cd->cfg_dict_item == NULL)) { + const DICTIONARY_ITEM *di = register_plugin(parser->user.host->configurable_plugins, cfg, SERVING_PLUGINSD(parser)); + if (unlikely(di == NULL)) { freez(cfg->name); freez(cfg); return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_ENABLE, "error registering plugin"); } - parser->user.cd->configuration = cfg; + if (SERVING_PLUGINSD(parser)) { + // this is optimization for pluginsd to avoid extra dictionary lookup + // as we know which plugin is comunicating with us + parser->user.cd->cfg_dict_item = di; + parser->user.cd->configuration = cfg; + } else { + // register_plugin keeps the item acquired, so we need to release it + dictionary_acquired_item_release(parser->user.host->configurable_plugins, di); + } + + rrdpush_send_dyncfg_enable(parser->user.host, cfg->name); + return PARSER_RC_OK; } +#define LOG_MSG_SIZE (1024) +#define MODULE_NAME_IDX (SERVING_PLUGINSD(parser) ? 1 : 2) +#define MODULE_TYPE_IDX (SERVING_PLUGINSD(parser) ? 2 : 3) static inline PARSER_RC pluginsd_register_module(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) { netdata_log_info("PLUGINSD: DYNCFG_REG_MODULE"); - struct configurable_plugin *plug_cfg = parser->user.cd->configuration; - if (unlikely(plug_cfg == NULL)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "you have to enable dynamic configuration first using " PLUGINSD_KEYWORD_DYNCFG_ENABLE); - - if (unlikely(num_words != 3)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "expected 2 parameters module_name followed by module_type"); + size_t expected_num_words = SERVING_PLUGINSD(parser) ? 3 : 4; + + if (unlikely(num_words != expected_num_words)) { + char log[LOG_MSG_SIZE + 1]; + snprintfz(log, LOG_MSG_SIZE, "expected %zu (got %zu) parameters: %smodule_name module_type", expected_num_words - 1, num_words - 1, SERVING_PLUGINSD(parser) ? "" : "plugin_name "); + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, log); + } + + struct configurable_plugin *plug_cfg; + const DICTIONARY_ITEM *di = NULL; + if (SERVING_PLUGINSD(parser)) { + plug_cfg = parser->user.cd->configuration; + if (unlikely(plug_cfg == NULL)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "you have to enable dynamic configuration first using " PLUGINSD_KEYWORD_DYNCFG_ENABLE); + } else { + di = dictionary_get_and_acquire_item(parser->user.host->configurable_plugins, words[1]); + if (unlikely(di == NULL)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "plugin not found"); + + plug_cfg = (struct configurable_plugin *)dictionary_acquired_item_value(di); + } struct module *mod = callocz(1, sizeof(struct module)); - mod->type = str2_module_type(words[2]); + mod->type = str2_module_type(words[MODULE_TYPE_IDX]); if (unlikely(mod->type == MOD_TYPE_UNKNOWN)) { freez(mod); return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "unknown module type (allowed: job_array, single)"); } - mod->name = strdupz(words[1]); + mod->name = strdupz(words[MODULE_NAME_IDX]); mod->set_config_cb = set_module_config_cb; mod->get_config_cb = get_module_config_cb; @@ -2122,27 +2668,141 @@ static inline PARSER_RC pluginsd_register_module(char **words __maybe_unused, si mod->delete_job_cb = delete_job_cb; mod->job_config_cb_usr_ctx = parser; - register_module(plug_cfg, mod); + register_module(parser->user.host->configurable_plugins, plug_cfg, mod, SERVING_PLUGINSD(parser)); + + if (di != NULL) + dictionary_acquired_item_release(parser->user.host->configurable_plugins, di); + + rrdpush_send_dyncfg_reg_module(parser->user.host, plug_cfg->name, mod->name, mod->type); + return PARSER_RC_OK; } -// job_status -static inline PARSER_RC pluginsd_job_status(char **words, size_t num_words, PARSER *parser) -{ - if (unlikely(num_words != 6 && num_words != 5)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "expected 4 or 5 parameters: module_name, job_name, status_code, state, [optional: message]"); +static inline PARSER_RC pluginsd_register_job_common(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused, const char *plugin_name) { + const char *module_name = words[0]; + const char *job_name = words[1]; + const char *job_type_str = words[2]; + const char *flags_str = words[3]; + + long f = str2l(flags_str); + + if (f < 0) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "invalid flags received"); + + dyncfg_job_flg_t flags = f; + + if (SERVING_PLUGINSD(parser)) + flags |= JOB_FLG_PLUGIN_PUSHED; + else + flags |= JOB_FLG_STREAMING_PUSHED; + + enum job_type job_type = dyncfg_str2job_type(job_type_str); + if (job_type == JOB_TYPE_UNKNOWN) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "unknown job type"); + + if (SERVING_PLUGINSD(parser) && job_type == JOB_TYPE_USER) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "plugins cannot push jobs of type \"user\" (this is allowed only in streaming)"); + + if (register_job(parser->user.host->configurable_plugins, plugin_name, module_name, job_name, job_type, flags, 0)) // ignore existing is off as this is explicitly called register job + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "error registering job"); + + rrdpush_send_dyncfg_reg_job(parser->user.host, plugin_name, module_name, job_name, job_type, flags); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_register_job(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) { + size_t expected_num_words = SERVING_PLUGINSD(parser) ? 5 : 6; + + if (unlikely(num_words != expected_num_words)) { + char log[LOG_MSG_SIZE + 1]; + snprintfz(log, LOG_MSG_SIZE, "expected %zu (got %zu) parameters: %smodule_name job_name job_type", expected_num_words - 1, num_words - 1, SERVING_PLUGINSD(parser) ? "" : "plugin_name "); + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, log); + } + + if (SERVING_PLUGINSD(parser)) { + return pluginsd_register_job_common(&words[1], num_words - 1, parser, parser->user.cd->configuration->name); + } + return pluginsd_register_job_common(&words[2], num_words - 2, parser, words[1]); +} + +static inline PARSER_RC pluginsd_dyncfg_reset(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) { + if (unlikely(num_words != (SERVING_PLUGINSD(parser) ? 1 : 2))) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_RESET, SERVING_PLUGINSD(parser) ? "expected 0 parameters" : "expected 1 parameter: plugin_name"); + + if (SERVING_PLUGINSD(parser)) { + unregister_plugin(parser->user.host->configurable_plugins, parser->user.cd->cfg_dict_item); + rrdpush_send_dyncfg_reset(parser->user.host, parser->user.cd->configuration->name); + parser->user.cd->configuration = NULL; + } else { + const DICTIONARY_ITEM *di = dictionary_get_and_acquire_item(parser->user.host->configurable_plugins, words[1]); + if (unlikely(di == NULL)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_RESET, "plugin not found"); + unregister_plugin(parser->user.host->configurable_plugins, di); + rrdpush_send_dyncfg_reset(parser->user.host, words[1]); + } - int state = atoi(words[4]); + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_job_status_common(char **words, size_t num_words, PARSER *parser, const char *plugin_name) { + int state = str2i(words[3]); - enum job_status job_status = str2job_state(words[3]); - if (unlikely(job_status == JOB_STATUS_UNKNOWN)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "unknown job state"); + enum job_status status = str2job_state(words[2]); + if (unlikely(SERVING_PLUGINSD(parser) && status == JOB_STATUS_UNKNOWN)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "unknown job status"); char *message = NULL; - if (num_words == 6) - message = strdupz(words[5]); + if (num_words == 5 && strlen(words[4]) > 0) + message = words[4]; + + const DICTIONARY_ITEM *plugin_item; + DICTIONARY *job_dict; + const DICTIONARY_ITEM *job_item = report_job_status_acq_lock(parser->user.host->configurable_plugins, &plugin_item, &job_dict, plugin_name, words[0], words[1], status, state, message); + + if (job_item != NULL) { + struct job *job = dictionary_acquired_item_value(job_item); + rrdpush_send_job_status_update(parser->user.host, plugin_name, words[0], job); + + pthread_mutex_unlock(&job->lock); + dictionary_acquired_item_release(job_dict, job_item); + dictionary_acquired_item_release(parser->user.host->configurable_plugins, plugin_item); + } + + return PARSER_RC_OK; +} + +// job_status [plugin_name if streaming] [message] +static PARSER_RC pluginsd_job_status(char **words, size_t num_words, PARSER *parser) { + if (SERVING_PLUGINSD(parser)) { + if (unlikely(num_words != 5 && num_words != 6)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "expected 4 or 5 parameters: module_name, job_name, status_code, state, [optional: message]"); + } else { + if (unlikely(num_words != 6 && num_words != 7)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "expected 5 or 6 parameters: plugin_name, module_name, job_name, status_code, state, [optional: message]"); + } + + if (SERVING_PLUGINSD(parser)) { + return pluginsd_job_status_common(&words[1], num_words - 1, parser, parser->user.cd->configuration->name); + } + return pluginsd_job_status_common(&words[2], num_words - 2, parser, words[1]); +} + +static PARSER_RC pluginsd_delete_job(char **words, size_t num_words, PARSER *parser) { + // this can confuse a bit but there is a diference between KEYWORD_DELETE_JOB and actual delete_job function + // they are of opossite direction + if (num_words != 4) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DELETE_JOB, "expected 2 parameters: plugin_name, module_name, job_name"); + + const char *plugin_name = get_word(words, num_words, 1); + const char *module_name = get_word(words, num_words, 2); + const char *job_name = get_word(words, num_words, 3); - report_job_status(parser->user.cd->configuration, words[1], words[2], job_status, state, message); + if (SERVING_STREAMING(parser)) + delete_job_pname(parser->user.host->configurable_plugins, plugin_name, module_name, job_name); + + // forward to parent if any + rrdpush_send_job_deleted(parser->user.host, plugin_name, module_name, job_name); return PARSER_RC_OK; } @@ -2195,70 +2855,49 @@ static inline PARSER_RC streaming_claimed_id(char **words, size_t num_words, PAR // ---------------------------------------------------------------------------- -static inline bool buffered_reader_read(struct buffered_reader *reader, int fd) { -#ifdef NETDATA_INTERNAL_CHECKS - if(reader->read_buffer[reader->read_len] != '\0') - fatal("%s(): read_buffer does not start with zero", __FUNCTION__ ); -#endif - - ssize_t bytes_read = read(fd, reader->read_buffer + reader->read_len, sizeof(reader->read_buffer) - reader->read_len - 1); - if(unlikely(bytes_read <= 0)) - return false; - - reader->read_len += bytes_read; - reader->read_buffer[reader->read_len] = '\0'; - - return true; -} - -static inline bool buffered_reader_read_timeout(struct buffered_reader *reader, int fd, int timeout_ms) { - errno = 0; - struct pollfd fds[1]; +void pluginsd_process_thread_cleanup(void *ptr) { + PARSER *parser = (PARSER *)ptr; - fds[0].fd = fd; - fds[0].events = POLLIN; + pluginsd_cleanup_v2(parser); + pluginsd_host_define_cleanup(parser); - int ret = poll(fds, 1, timeout_ms); + rrd_collector_finished(); - if (ret > 0) { - /* There is data to read */ - if (fds[0].revents & POLLIN) - return buffered_reader_read(reader, fd); +#ifdef NETDATA_LOG_STREAM_RECEIVE + if(parser->user.stream_log_fp) { + fclose(parser->user.stream_log_fp); + parser->user.stream_log_fp = NULL; + } +#endif - else if(fds[0].revents & POLLERR) { - netdata_log_error("PARSER: read failed: POLLERR."); - return false; - } - else if(fds[0].revents & POLLHUP) { - netdata_log_error("PARSER: read failed: POLLHUP."); - return false; - } - else if(fds[0].revents & POLLNVAL) { - netdata_log_error("PARSER: read failed: POLLNVAL."); - return false; - } + parser_destroy(parser); +} - netdata_log_error("PARSER: poll() returned positive number, but POLLIN|POLLERR|POLLHUP|POLLNVAL are not set."); - return false; - } - else if (ret == 0) { - netdata_log_error("PARSER: timeout while waiting for data."); +bool parser_reconstruct_node(BUFFER *wb, void *ptr) { + PARSER *parser = ptr; + if(!parser || !parser->user.host) return false; - } - netdata_log_error("PARSER: poll() failed with code %d.", ret); - return false; + buffer_strcat(wb, rrdhost_hostname(parser->user.host)); + return true; } -void pluginsd_process_thread_cleanup(void *ptr) { - PARSER *parser = (PARSER *)ptr; +bool parser_reconstruct_instance(BUFFER *wb, void *ptr) { + PARSER *parser = ptr; + if(!parser || !parser->user.st) + return false; - pluginsd_cleanup_v2(parser); - pluginsd_host_define_cleanup(parser); + buffer_strcat(wb, rrdset_name(parser->user.st)); + return true; +} - rrd_collector_finished(); +bool parser_reconstruct_context(BUFFER *wb, void *ptr) { + PARSER *parser = ptr; + if(!parser || !parser->user.st) + return false; - parser_destroy(parser); + buffer_strcat(wb, string2str(parser->user.st->context)); + return true; } inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations) @@ -2308,26 +2947,51 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugi // so, parser needs to be allocated before pushing it netdata_thread_cleanup_push(pluginsd_process_thread_cleanup, parser); - buffered_reader_init(&parser->reader); - char buffer[PLUGINSD_LINE_MAX + 2]; - while(likely(service_running(SERVICE_COLLECTORS))) { - if (unlikely(!buffered_reader_next_line(&parser->reader, buffer, PLUGINSD_LINE_MAX + 2))) { - if(unlikely(!buffered_reader_read_timeout(&parser->reader, fileno((FILE *)parser->fp_input), 2 * 60 * MSEC_PER_SEC))) - break; - } - else if(unlikely(parser_action(parser, buffer))) - break; - } - - cd->unsafe.enabled = parser->user.enabled; - count = parser->user.data_collections_count; - - if (likely(count)) { - cd->successful_collections += count; - cd->serial_failures = 0; - } - else - cd->serial_failures++; + { + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &parser->line), + ND_LOG_FIELD_CB(NDF_NIDL_NODE, parser_reconstruct_node, parser), + ND_LOG_FIELD_CB(NDF_NIDL_INSTANCE, parser_reconstruct_instance, parser), + ND_LOG_FIELD_CB(NDF_NIDL_CONTEXT, parser_reconstruct_context, parser), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + buffered_reader_init(&parser->reader); + BUFFER *buffer = buffer_create(sizeof(parser->reader.read_buffer) + 2, NULL); + while(likely(service_running(SERVICE_COLLECTORS))) { + + if(unlikely(!buffered_reader_next_line(&parser->reader, buffer))) { + buffered_reader_ret_t ret = buffered_reader_read_timeout( + &parser->reader, + fileno((FILE *) parser->fp_input), + 2 * 60 * MSEC_PER_SEC, true + ); + + if(unlikely(ret != BUFFERED_READER_READ_OK)) + break; + + continue; + } + + if(unlikely(parser_action(parser, buffer->buffer))) + break; + + buffer->len = 0; + buffer->buffer[0] = '\0'; + } + buffer_free(buffer); + + cd->unsafe.enabled = parser->user.enabled; + count = parser->user.data_collections_count; + + if(likely(count)) { + cd->successful_collections += count; + cd->serial_failures = 0; + } + else + cd->serial_failures++; + } // free parser with the pop function netdata_thread_cleanup_pop(1); @@ -2452,10 +3116,22 @@ PARSER_RC parser_execute(PARSER *parser, PARSER_KEYWORD *keyword, char **words, case 101: return pluginsd_register_plugin(words, num_words, parser); - + case 102: return pluginsd_register_module(words, num_words, parser); + case 103: + return pluginsd_register_job(words, num_words, parser); + + case 104: + return pluginsd_dyncfg_reset(words, num_words, parser); + + case 110: + return pluginsd_job_status(words, num_words, parser); + + case 111: + return pluginsd_delete_job(words, num_words, parser); + default: fatal("Unknown keyword '%s' with id %zu", keyword->keyword, keyword->id); } @@ -2472,14 +3148,20 @@ void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire) { } } +static void parser_destroy_dyncfg(PARSER *parser) { + if (parser->user.cd != NULL && parser->user.cd->configuration != NULL) { + unregister_plugin(parser->user.host->configurable_plugins, parser->user.cd->cfg_dict_item); + parser->user.cd->configuration = NULL; + } else if (parser->user.host != NULL && SERVING_STREAMING(parser) && parser->user.host != localhost){ + dictionary_flush(parser->user.host->configurable_plugins); + } +} + void parser_destroy(PARSER *parser) { if (unlikely(!parser)) return; - if (parser->user.cd != NULL && parser->user.cd->configuration != NULL) { - unregister_plugin(parser->user.cd->cfg_dict_item); - parser->user.cd->configuration = NULL; - } + parser_destroy_dyncfg(parser); dictionary_destroy(parser->inflight.functions); freez(parser); diff --git a/collectors/plugins.d/pluginsd_parser.h b/collectors/plugins.d/pluginsd_parser.h index 5e1ea124241dd3..35474642935748 100644 --- a/collectors/plugins.d/pluginsd_parser.h +++ b/collectors/plugins.d/pluginsd_parser.h @@ -10,6 +10,12 @@ // this has to be in-sync with the same at receiver.c #define WORKER_RECEIVER_JOB_REPLICATION_COMPLETION (WORKER_PARSER_FIRST_JOB - 3) +// this controls the max response size of a function +#define PLUGINSD_MAX_DEFERRED_SIZE (20 * 1024 * 1024) + +#define PLUGINSD_MIN_RRDSET_POINTERS_CACHE 1024 + +#define HOST_LABEL_IS_EPHEMERAL "_is_ephemeral" // PARSER return codes typedef enum __attribute__ ((__packed__)) parser_rc { PARSER_RC_OK, // Callback was successful, go on @@ -25,6 +31,7 @@ typedef enum __attribute__ ((__packed__)) parser_input_type { typedef enum __attribute__ ((__packed__)) { PARSER_INIT_PLUGINSD = (1 << 1), PARSER_INIT_STREAMING = (1 << 2), + PARSER_REP_METADATA = (1 << 3), } PARSER_REPERTOIRE; struct parser; @@ -38,16 +45,22 @@ typedef struct parser_keyword { } PARSER_KEYWORD; typedef struct parser_user_object { + bool cleanup_slots; RRDSET *st; RRDHOST *host; void *opaque; struct plugind *cd; int trust_durations; - DICTIONARY *new_host_labels; - DICTIONARY *chart_rrdlabels_linked_temporarily; + RRDLABELS *new_host_labels; + RRDLABELS *chart_rrdlabels_linked_temporarily; size_t data_collections_count; int enabled; +#ifdef NETDATA_LOG_STREAM_RECEIVE + FILE *stream_log_fp; + PARSER_REPERTOIRE stream_log_repertoire; +#endif + STREAM_CAPABILITIES capabilities; // receiver capabilities struct { @@ -55,7 +68,7 @@ typedef struct parser_user_object { uuid_t machine_guid; char machine_guid_str[UUID_STR_LEN]; STRING *hostname; - DICTIONARY *rrdlabels; + RRDLABELS *rrdlabels; } host_define; struct parser_user_object_replay { @@ -85,17 +98,21 @@ typedef struct parser { PARSER_REPERTOIRE repertoire; uint32_t flags; int fd; // Socket - size_t line; FILE *fp_input; // Input source e.g. stream FILE *fp_output; // Stream to send commands to plugin #ifdef ENABLE_HTTPS NETDATA_SSL *ssl_output; #endif +#ifdef ENABLE_H2O + void *h2o_ctx; // if set we use h2o_stream functions to send data +#endif PARSER_USER_OBJECT user; // User defined structure to hold extra state between calls struct buffered_reader reader; + struct line_splitter line; + PARSER_KEYWORD *keyword; struct { const char *end_keyword; @@ -147,19 +164,28 @@ static inline PARSER_KEYWORD *parser_find_keyword(PARSER *parser, const char *co return NULL; } +bool parser_reconstruct_node(BUFFER *wb, void *ptr); +bool parser_reconstruct_instance(BUFFER *wb, void *ptr); +bool parser_reconstruct_context(BUFFER *wb, void *ptr); + static inline int parser_action(PARSER *parser, char *input) { - parser->line++; +#ifdef NETDATA_LOG_STREAM_RECEIVE + static __thread char line[PLUGINSD_LINE_MAX + 1]; + strncpyz(line, input, sizeof(line) - 1); +#endif + + parser->line.count++; if(unlikely(parser->flags & PARSER_DEFER_UNTIL_KEYWORD)) { - char command[PLUGINSD_LINE_MAX + 1]; - bool has_keyword = find_first_keyword(input, command, PLUGINSD_LINE_MAX, isspace_map_pluginsd); + char command[100 + 1]; + bool has_keyword = find_first_keyword(input, command, 100, isspace_map_pluginsd); if(!has_keyword || strcmp(command, parser->defer.end_keyword) != 0) { if(parser->defer.response) { buffer_strcat(parser->defer.response, input); - if(buffer_strlen(parser->defer.response) > 10 * 1024 * 1024) { - // more than 10MB of data - // a bad plugin that did not send the end_keyword + if(buffer_strlen(parser->defer.response) > PLUGINSD_MAX_DEFERRED_SIZE) { + // more than PLUGINSD_MAX_DEFERRED_SIZE of data, + // or a bad plugin that did not send the end_keyword internal_error(true, "PLUGINSD: deferred response is too big (%zu bytes). Stopping this plugin.", buffer_strlen(parser->defer.response)); return 1; } @@ -180,18 +206,25 @@ static inline int parser_action(PARSER *parser, char *input) { return 0; } - char *words[PLUGINSD_MAX_WORDS]; - size_t num_words = quoted_strings_splitter_pluginsd(input, words, PLUGINSD_MAX_WORDS); - const char *command = get_word(words, num_words, 0); + parser->line.num_words = quoted_strings_splitter_pluginsd(input, parser->line.words, PLUGINSD_MAX_WORDS); + const char *command = get_word(parser->line.words, parser->line.num_words, 0); - if(unlikely(!command)) + if(unlikely(!command)) { + line_splitter_reset(&parser->line); return 0; + } PARSER_RC rc; - PARSER_KEYWORD *t = parser_find_keyword(parser, command); - if(likely(t)) { - worker_is_busy(t->worker_job_id); - rc = parser_execute(parser, t, words, num_words); + parser->keyword = parser_find_keyword(parser, command); + if(likely(parser->keyword)) { + worker_is_busy(parser->keyword->worker_job_id); + +#ifdef NETDATA_LOG_STREAM_RECEIVE + if(parser->user.stream_log_fp && parser->keyword->repertoire & parser->user.stream_log_repertoire) + fprintf(parser->user.stream_log_fp, "%s", line); +#endif + + rc = parser_execute(parser, parser->keyword, parser->line.words, parser->line.num_words); // rc = (*t->func)(words, num_words, parser); worker_is_idle(); } @@ -199,22 +232,13 @@ static inline int parser_action(PARSER *parser, char *input) { rc = PARSER_RC_ERROR; if(rc == PARSER_RC_ERROR) { - BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL); - for(size_t i = 0; i < num_words ;i++) { - if(i) buffer_fast_strcat(wb, " ", 1); - - buffer_fast_strcat(wb, "\"", 1); - const char *s = get_word(words, num_words, i); - buffer_strcat(wb, s?s:""); - buffer_fast_strcat(wb, "\"", 1); - } - + CLEAN_BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL); + line_splitter_reconstruct_line(wb, &parser->line); netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)", - command, parser->line, buffer_tostring(wb)); - - buffer_free(wb); + command, parser->line.count, buffer_tostring(wb)); } + line_splitter_reset(&parser->line); return (rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP); } diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md index 16ae6f412842ae..62e46569f4837e 100644 --- a/collectors/proc.plugin/README.md +++ b/collectors/proc.plugin/README.md @@ -398,11 +398,11 @@ You can set the following values for each configuration option: #### Wireless configuration -#### alarms +#### alerts -There are several alarms defined in `health.d/net.conf`. +There are several alerts defined in `health.d/net.conf`. -The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alarms can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alarm with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-families) line in the alarm configuration. For example, if you want to disable the `inbound packets dropped` alarm for `eth0`, set `families: !eth0 *` in the alarm definition for `template: inbound_packets_dropped`. +The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alerts can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alert with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alert-line-families) line in the alert configuration. For example, if you want to disable the `inbound packets dropped` alert for `eth0`, set `families: !eth0 *` in the alert definition for `template: inbound_packets_dropped`. #### configuration diff --git a/collectors/proc.plugin/integrations/amd_gpu.md b/collectors/proc.plugin/integrations/amd_gpu.md new file mode 100644 index 00000000000000..e85cce221b84f9 --- /dev/null +++ b/collectors/proc.plugin/integrations/amd_gpu.md @@ -0,0 +1,110 @@ + + +# AMD GPU + + + + + +Plugin: proc.plugin +Module: /sys/class/drm + + + +## Overview + +This integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage. + +It reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per gpu + +These metrics refer to the GPU. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| product_name | GPU product name (e.g. AMD RX 6600) | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| amdgpu.gpu_utilization | utilization | percentage | +| amdgpu.gpu_mem_utilization | utilization | percentage | +| amdgpu.gpu_clk_frequency | frequency | MHz | +| amdgpu.gpu_mem_clk_frequency | frequency | MHz | +| amdgpu.gpu_mem_vram_usage_perc | usage | percentage | +| amdgpu.gpu_mem_vram_usage | free, used | bytes | +| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage | +| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes | +| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage | +| amdgpu.gpu_mem_gtt_usage | free, used | bytes | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/btrfs.md b/collectors/proc.plugin/integrations/btrfs.md new file mode 100644 index 00000000000000..5f994c8419058a --- /dev/null +++ b/collectors/proc.plugin/integrations/btrfs.md @@ -0,0 +1,137 @@ + + +# BTRFS + + + + + +Plugin: proc.plugin +Module: /sys/fs/btrfs + + + +## Overview + +This integration provides usage and error statistics from the BTRFS filesystem. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per btrfs filesystem + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| filesystem_uuid | TBD | +| filesystem_label | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB | +| btrfs.data | free, used | MiB | +| btrfs.metadata | free, used, reserved | MiB | +| btrfs.system | free, used | MiB | +| btrfs.commits | commits | commits | +| btrfs.commits_perc_time | commits | percentage | +| btrfs.commit_timings | last, max | ms | + +### Per btrfs device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device_id | TBD | +| filesystem_uuid | TBD | +| filesystem_label | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space | +| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space | +| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space | +| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space | +| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors | +| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors | +| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors | +| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors | +| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/conntrack.md b/collectors/proc.plugin/integrations/conntrack.md new file mode 100644 index 00000000000000..b38f6b5080f20b --- /dev/null +++ b/collectors/proc.plugin/integrations/conntrack.md @@ -0,0 +1,105 @@ + + +# Conntrack + + + + + +Plugin: proc.plugin +Module: /proc/net/stat/nf_conntrack + + + +## Overview + +This integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Conntrack instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netfilter.conntrack_sockets | connections | active connections | +| netfilter.conntrack_new | new, ignore, invalid | connections/s | +| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s | +| netfilter.conntrack_expect | created, deleted, new | expectations/s | +| netfilter.conntrack_search | searched, restarted, found | searches/s | +| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/disk_statistics.md b/collectors/proc.plugin/integrations/disk_statistics.md new file mode 100644 index 00000000000000..8f7448c399a834 --- /dev/null +++ b/collectors/proc.plugin/integrations/disk_statistics.md @@ -0,0 +1,149 @@ + + +# Disk Statistics + + + + + +Plugin: proc.plugin +Module: /proc/diskstats + + + +## Overview + +Detailed statistics for each of your system's disk devices and partitions. +The data is reported by the kernel and can be used to monitor disk activity on a Linux system. + +Get valuable insight into how your disks are performing and where potential bottlenecks might be. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Disk Statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.io | in, out | KiB/s | + +### Per disk + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | TBD | +| mount_point | TBD | +| device_type | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| disk.io | reads, writes | KiB/s | +| disk_ext.io | discards | KiB/s | +| disk.ops | reads, writes | operations/s | +| disk_ext.ops | discards, flushes | operations/s | +| disk.qops | operations | operations | +| disk.backlog | backlog | milliseconds | +| disk.busy | busy | milliseconds | +| disk.util | utilization | % of time working | +| disk.mops | reads, writes | merged operations/s | +| disk_ext.mops | discards | merged operations/s | +| disk.iotime | reads, writes | milliseconds/s | +| disk_ext.iotime | discards, flushes | milliseconds/s | +| disk.await | reads, writes | milliseconds/operation | +| disk_ext.await | discards, flushes | milliseconds/operation | +| disk.avgsz | reads, writes | KiB/operation | +| disk_ext.avgsz | discards | KiB/operation | +| disk.svctm | svctm | milliseconds/operation | +| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage | +| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage | +| disk.bcache_rates | congested, writeback | KiB/s | +| disk.bcache_size | dirty | MiB | +| disk.bcache_usage | avail | percentage | +| disk.bcache_cache_read_races | races, errors | operations/s | +| disk.bcache | hits, misses, collisions, readaheads | operations/s | +| disk.bcache_bypass | hits, misses | operations/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes | +| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes | +| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) | +| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/entropy.md b/collectors/proc.plugin/integrations/entropy.md new file mode 100644 index 00000000000000..8432a1f960943a --- /dev/null +++ b/collectors/proc.plugin/integrations/entropy.md @@ -0,0 +1,133 @@ + + +# Entropy + + + + + +Plugin: proc.plugin +Module: /proc/sys/kernel/random/entropy_avail + + + +## Overview + +Entropy, a measure of the randomness or unpredictability of data. + +In the context of cryptography, entropy is used to generate random numbers or keys that are essential for +secure communication and encryption. Without a good source of entropy, cryptographic protocols can become +vulnerable to attacks that exploit the predictability of the generated keys. + +In most operating systems, entropy is generated by collecting random events from various sources, such as +hardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool +of entropy, which is then used to generate random numbers when needed. + +The `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs +to access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device, +which blocks until enough entropy is available to generate the requested numbers. This ensures that the +generated numbers are truly random and not predictable. + +However, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing +programs that rely on random numbers to slow down or even freeze. This is especially problematic for +cryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH. + +To avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality +entropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or +radioactive decay. These sources of randomness are considered to be more reliable and unpredictable than +software-based sources. + +One such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used +for cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates +high-quality entropy, which can be used to seed the pool of entropy in the operating system. + +Alternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by +exploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions +can help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions. + + + + +This collector is only supported on the following platforms: + +- linux + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Entropy instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.entropy | entropy | entropy | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel’s random number generator | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/infiniband.md b/collectors/proc.plugin/integrations/infiniband.md new file mode 100644 index 00000000000000..6cb5fdc5352efd --- /dev/null +++ b/collectors/proc.plugin/integrations/infiniband.md @@ -0,0 +1,99 @@ + + +# InfiniBand + + + + + +Plugin: proc.plugin +Module: /sys/class/infiniband + + + +## Overview + +This integration monitors InfiniBand network inteface statistics. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per infiniband port + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ib.bytes | Received, Sent | kilobits/s | +| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s | +| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s | +| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s | +| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/inter_process_communication.md b/collectors/proc.plugin/integrations/inter_process_communication.md new file mode 100644 index 00000000000000..55708a4b0ff82c --- /dev/null +++ b/collectors/proc.plugin/integrations/inter_process_communication.md @@ -0,0 +1,120 @@ + + +# Inter Process Communication + + + + + +Plugin: proc.plugin +Module: ipc + + + +## Overview + +IPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each +other and synchronize their actions. + +This collector exposes information about: + +- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that + allows messages to be placed onto a queue and read at a later time. + +- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by + reading/writing into shared memory segments. + +- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple + processes are trying to access a single shared resource, semaphores can ensure that only one process + accesses the resource at a given time. + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Inter Process Communication instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ipc_semaphores | semaphores | semaphores | +| system.ipc_semaphore_arrays | arrays | arrays | +| system.message_queue_message | a dimension per queue | messages | +| system.message_queue_bytes | a dimension per queue | bytes | +| system.shared_memory_segments | segments | segments | +| system.shared_memory_bytes | bytes | bytes | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization | +| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/interrupts.md b/collectors/proc.plugin/integrations/interrupts.md new file mode 100644 index 00000000000000..1b85fb767386cc --- /dev/null +++ b/collectors/proc.plugin/integrations/interrupts.md @@ -0,0 +1,141 @@ + + +# Interrupts + + + + + +Plugin: proc.plugin +Module: /proc/interrupts + + + +## Overview + +Monitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt. +The numbers reported are the counts of the interrupts that have occurred of each type. + +An interrupt is a signal to the processor emitted by hardware or software indicating an event that needs +immediate attention. The processor then interrupts its current activities and executes the interrupt handler +to deal with the event. This is part of the way a computer multitasks and handles concurrent processing. + +The types of interrupts include: + +- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when + you type something on the keyboard, an interrupt is triggered so the processor can handle the new input. + +- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily + used to switch the CPU among different tasks. + +- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources. + +- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc. + +Monitoring `/proc/interrupts` can be used for: + +- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not + configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system + performance degradation. + +- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem. + +- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you + understand what your system is doing. It can provide insights into the system's interaction with hardware, + drivers, and other parts of the kernel. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Interrupts instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.interrupts | a dimension per device | interrupts/s | + +### Per cpu core + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cpu | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.interrupts | a dimension per device | interrupts/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/ip_virtual_server.md b/collectors/proc.plugin/integrations/ip_virtual_server.md new file mode 100644 index 00000000000000..5c7afd2ebde747 --- /dev/null +++ b/collectors/proc.plugin/integrations/ip_virtual_server.md @@ -0,0 +1,97 @@ + + +# IP Virtual Server + + + + + +Plugin: proc.plugin +Module: /proc/net/ip_vs_stats + + + +## Overview + +This integration monitors IP Virtual Server statistics + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per IP Virtual Server instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipvs.sockets | connections | connections/s | +| ipvs.packets | received, sent | packets/s | +| ipvs.net | received, sent | kilobits/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/ipv6_socket_statistics.md b/collectors/proc.plugin/integrations/ipv6_socket_statistics.md new file mode 100644 index 00000000000000..2c1ee2721a29f0 --- /dev/null +++ b/collectors/proc.plugin/integrations/ipv6_socket_statistics.md @@ -0,0 +1,99 @@ + + +# IPv6 Socket Statistics + + + + + +Plugin: proc.plugin +Module: /proc/net/sockstat6 + + + +## Overview + +This integration provides IPv6 socket statistics. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per IPv6 Socket Statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipv6.sockstat6_tcp_sockets | inuse | sockets | +| ipv6.sockstat6_udp_sockets | inuse | sockets | +| ipv6.sockstat6_udplite_sockets | inuse | sockets | +| ipv6.sockstat6_raw_sockets | inuse | sockets | +| ipv6.sockstat6_frag_sockets | inuse | fragments | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/kernel_same-page_merging.md b/collectors/proc.plugin/integrations/kernel_same-page_merging.md new file mode 100644 index 00000000000000..336f0feaf62c4e --- /dev/null +++ b/collectors/proc.plugin/integrations/kernel_same-page_merging.md @@ -0,0 +1,103 @@ + + +# Kernel Same-Page Merging + + + + + +Plugin: proc.plugin +Module: /sys/kernel/mm/ksm + + + +## Overview + +Kernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the +memory of different processes and identify identical pages. It then merges these identical pages into a +single page that the processes share. This is particularly useful for virtualization, where multiple virtual +machines might be running the same operating system or applications and have many identical pages. + +The collector provides information about the operation and effectiveness of KSM on your system. + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Kernel Same-Page Merging instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.ksm | shared, unshared, sharing, volatile | MiB | +| mem.ksm_savings | savings, offered | MiB | +| mem.ksm_ratios | savings | percentage | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/md_raid.md b/collectors/proc.plugin/integrations/md_raid.md new file mode 100644 index 00000000000000..34a4840bb0d0a3 --- /dev/null +++ b/collectors/proc.plugin/integrations/md_raid.md @@ -0,0 +1,125 @@ + + +# MD RAID + + + + + +Plugin: proc.plugin +Module: /proc/mdstat + + + +## Overview + +This integration monitors the status of MD RAID devices. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per MD RAID instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| md.health | a dimension per md array | failed disks | + +### Per md array + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | TBD | +| raid_level | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| md.disks | inuse, down | disks | +| md.mismatch_cnt | count | unsynchronized blocks | +| md.status | check, resync, recovery, reshape | percent | +| md.expected_time_until_operation_finish | finish_in | seconds | +| md.operation_speed | speed | KiB/s | +| md.nonredundant | available | boolean | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection | +| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. | +| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array | +| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/memory_modules_dimms.md b/collectors/proc.plugin/integrations/memory_modules_dimms.md new file mode 100644 index 00000000000000..351c6fcd72234c --- /dev/null +++ b/collectors/proc.plugin/integrations/memory_modules_dimms.md @@ -0,0 +1,146 @@ + + +# Memory modules (DIMMs) + + + + + +Plugin: proc.plugin +Module: /sys/devices/system/edac/mc + + + +## Overview + +The Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory, +primarily ECC (Error-Correcting Code) memory errors. + +The collector provides data for: + +- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds: + - errors related to a DIMM + - errors that cannot be associated with a DIMM + +- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds: + - memory controllers that can identify the physical DIMMS and report errors directly for them, + - memory controllers that report errors for memory address ranges that can be linked to dimms. + In this case the DIMMS reported may be more than the physical DIMMS installed. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per memory controller + +These metrics refer to the memory controller. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. | +| mc_name | Memory controller type. | +| size_mb | The amount of memory in megabytes that this memory controller manages. | +| max_location | Last available memory slot in this memory controller. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.edac_mc | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors/s | + +### Per memory module + +These metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)). + +Labels: + +| Label | Description | +|:-----------|:----------------| +| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. | +| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. | +| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. | +| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. | +| dimm_label | Label assigned to this memory module. | +| dimm_location | Location of the memory module. | +| dimm_mem_type | Type of the memory module. | +| size | The amount of memory in megabytes that this memory module manages. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.edac_mc | correctable, uncorrectable | errors/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) in the last 10 minutes | +| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) in the last 10 minutes | +| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc_dimm | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors in the last 10 minutes | +| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc_dimm | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors in the last 10 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/memory_statistics.md b/collectors/proc.plugin/integrations/memory_statistics.md new file mode 100644 index 00000000000000..52f1bf530c5a78 --- /dev/null +++ b/collectors/proc.plugin/integrations/memory_statistics.md @@ -0,0 +1,138 @@ + + +# Memory Statistics + + + + + +Plugin: proc.plugin +Module: /proc/vmstat + + + +## Overview + +Linux Virtual memory subsystem. + +Information about memory management, indicating how effectively the kernel allocates and frees +memory resources in response to system demands. + +Monitors page faults, which occur when a process requests a portion of its memory that isn't +immediately available. Monitoring these events can help diagnose inefficiencies in memory management and +provide insights into application behavior. + +Tracks swapping activity — a vital aspect of memory management where the kernel moves data from RAM to +swap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap, +a compressed cache for swap pages, and provides insights into its usage and performance implications. + +In the context of virtualized environments, it tracks the ballooning mechanism which is used to balance +memory resources between host and guest systems. + +For systems using NUMA architecture, it provides insights into the local and remote memory accesses, which +can impact the performance based on the memory access times. + +The collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out +of memory resources. + + + + +This collector is only supported on the following platforms: + +- linux + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Memory Statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.swapio | in, out | KiB/s | +| system.pgpgio | in, out | KiB/s | +| system.pgfaults | minor, major | faults/s | +| mem.balloon | inflate, deflate, migrate | KiB/s | +| mem.zswapio | in, out | KiB/s | +| mem.ksm_cow | swapin, write | KiB/s | +| mem.thp_faults | alloc, fallback, fallback_charge | events/s | +| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s | +| mem.thp_zero | alloc, failed | events/s | +| mem.thp_collapse | alloc, failed | events/s | +| mem.thp_split | split, failed, split_pmd, split_deferred | events/s | +| mem.thp_swapout | swapout, fallback | events/s | +| mem.thp_compact | success, fail, stall | events/s | +| mem.oom_kill | kills | kills/s | +| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes | +| [ oom_kill ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/memory_usage.md b/collectors/proc.plugin/integrations/memory_usage.md new file mode 100644 index 00000000000000..141bd29ad637da --- /dev/null +++ b/collectors/proc.plugin/integrations/memory_usage.md @@ -0,0 +1,135 @@ + + +# Memory Usage + + + + + +Plugin: proc.plugin +Module: /proc/meminfo + + + +## Overview + +`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information +about different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory, +SLAB memory, memory mappings, and more. + +Monitoring /proc/meminfo can be useful for: + +- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system + tuning and optimization. For example, if your system is frequently low on free memory, it might benefit + from more RAM. + +- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about + whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could + mean that your system is swapping out a lot of memory to disk, which can degrade performance. + +- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed + decisions about future capacity needs. + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Memory Usage instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ram | free, used, cached, buffers | MiB | +| mem.available | avail | MiB | +| mem.swap | free, used | MiB | +| mem.swap_cached | cached | MiB | +| mem.zswap | in-ram, on-disk | MiB | +| mem.hwcorrupt | HardwareCorrupted | MiB | +| mem.commited | Commited_AS | MiB | +| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB | +| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB | +| mem.slab | reclaimable, unreclaimable | MiB | +| mem.hugepages | free, used, surplus, reserved | MiB | +| mem.thp | anonymous, shmem | MiB | +| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB | +| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB | +| mem.high_low | high_used, low_used, high_free, low_free | MiB | +| mem.cma | used, free | MiB | +| mem.directmaps | 4k, 2m, 4m, 1g | MiB | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | system.ram | system memory utilization | +| [ ram_available ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping | +| [ used_swap ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swap | swap memory utilization | +| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/network_interfaces.md b/collectors/proc.plugin/integrations/network_interfaces.md new file mode 100644 index 00000000000000..0cfd56faeeb2f7 --- /dev/null +++ b/collectors/proc.plugin/integrations/network_interfaces.md @@ -0,0 +1,137 @@ + + +# Network interfaces + + + + + +Plugin: proc.plugin +Module: /proc/net/dev + + + +## Overview + +Monitor network interface metrics about bandwidth, state, errors and more. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Network interfaces instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.net | received, sent | kilobits/s | + +### Per network device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| interface_type | TBD | +| device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| net.net | received, sent | kilobits/s | +| net.speed | speed | kilobits/s | +| net.duplex | full, half, unknown | state | +| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state | +| net.carrier | up, down | state | +| net.mtu | mtu | octets | +| net.packets | received, sent, multicast | packets/s | +| net.errors | inbound, outbound | errors/s | +| net.drops | inbound, outbound | drops/s | +| net.fifo | receive, transmit | errors | +| net.compressed | received, sent | packets/s | +| net.events | frames, collisions, carrier | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ interface_speed ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | network interface ${label:device} current speed | +| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute | +| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute | +| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes | +| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes | +| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes | +| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes | +| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute | +| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute | +| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/network_statistics.md b/collectors/proc.plugin/integrations/network_statistics.md new file mode 100644 index 00000000000000..726fd9d6145c54 --- /dev/null +++ b/collectors/proc.plugin/integrations/network_statistics.md @@ -0,0 +1,161 @@ + + +# Network statistics + + + + + +Plugin: proc.plugin +Module: /proc/net/netstat + + + +## Overview + +This integration provides metrics from the `netstat`, `snmp` and `snmp6` modules. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Network statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.ip | received, sent | kilobits/s | +| ip.tcpmemorypressures | pressures | events/s | +| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s | +| ip.tcpreorders | timestamp, sack, fack, reno | packets/s | +| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s | +| ip.tcpsyncookies | received, sent, failed | packets/s | +| ip.tcp_syn_queue | drops, cookies | packets/s | +| ip.tcp_accept_queue | overflows, drops | packets/s | +| ip.tcpsock | connections | active connections | +| ip.tcppackets | received, sent | packets/s | +| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s | +| ip.tcpopens | active, passive | connections/s | +| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s | +| ipv4.packets | received, sent, forwarded, delivered | packets/s | +| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s | +| ipc4.bcast | received, sent | kilobits/s | +| ipv4.bcastpkts | received, sent | packets/s | +| ipv4.mcast | received, sent | kilobits/s | +| ipv4.mcastpkts | received, sent | packets/s | +| ipv4.icmp | received, sent | packets/s | +| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s | +| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s | +| ipv4.udppackets | received, sent | packets/s | +| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s | +| ipv4.udplite | received, sent | packets/s | +| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s | +| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s | +| ipv4.fragsin | ok, failed, all | packets/s | +| ipv4.fragsout | ok, failed, created | packets/s | +| system.ipv6 | received, sent | kilobits/s | +| ipv6.packets | received, sent, forwarded, delivers | packets/s | +| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s | +| ipv6.bcast | received, sent | kilobits/s | +| ipv6.mcast | received, sent | kilobits/s | +| ipv6.mcastpkts | received, sent | packets/s | +| ipv6.udppackets | received, sent | packets/s | +| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s | +| ipv6.udplitepackets | received, sent | packets/s | +| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s | +| ipv6.icmp | received, sent | messages/s | +| ipv6.icmpredir | received, sent | redirects/s | +| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s | +| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s | +| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s | +| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s | +| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s | +| ipv6.icmpmldv2 | received, sent | reports/s | +| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s | +| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s | +| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s | +| ipv6.fragsin | ok, failed, timeout, all | packets/s | +| ipv6.fragsout | ok, failed, all | packets/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) | +| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute | +| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute | +| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute | +| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization | +| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute | +| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. | +| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute | +| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. | +| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute | +| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/nfs_client.md b/collectors/proc.plugin/integrations/nfs_client.md new file mode 100644 index 00000000000000..db584771483bd8 --- /dev/null +++ b/collectors/proc.plugin/integrations/nfs_client.md @@ -0,0 +1,99 @@ + + +# NFS Client + + + + + +Plugin: proc.plugin +Module: /proc/net/rpc/nfs + + + +## Overview + +This integration provides statistics from the Linux kernel's NFS Client. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per NFS Client instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nfs.net | udp, tcp | operations/s | +| nfs.rpc | calls, retransmits, auth_refresh | calls/s | +| nfs.proc2 | a dimension per proc2 call | calls/s | +| nfs.proc3 | a dimension per proc3 call | calls/s | +| nfs.proc4 | a dimension per proc4 call | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/nfs_server.md b/collectors/proc.plugin/integrations/nfs_server.md new file mode 100644 index 00000000000000..0c906b4d8610de --- /dev/null +++ b/collectors/proc.plugin/integrations/nfs_server.md @@ -0,0 +1,104 @@ + + +# NFS Server + + + + + +Plugin: proc.plugin +Module: /proc/net/rpc/nfsd + + + +## Overview + +This integration provides statistics from the Linux kernel's NFS Server. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per NFS Server instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nfsd.readcache | hits, misses, nocache | reads/s | +| nfsd.filehandles | stale | handles/s | +| nfsd.io | read, write | kilobytes/s | +| nfsd.threads | threads | threads | +| nfsd.net | udp, tcp | packets/s | +| nfsd.rpc | calls, bad_format, bad_auth | calls/s | +| nfsd.proc2 | a dimension per proc2 call | calls/s | +| nfsd.proc3 | a dimension per proc3 call | calls/s | +| nfsd.proc4 | a dimension per proc4 call | calls/s | +| nfsd.proc4ops | a dimension per proc4 operation | operations/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/non-uniform_memory_access.md b/collectors/proc.plugin/integrations/non-uniform_memory_access.md new file mode 100644 index 00000000000000..6f495fb7905392 --- /dev/null +++ b/collectors/proc.plugin/integrations/non-uniform_memory_access.md @@ -0,0 +1,111 @@ + + +# Non-Uniform Memory Access + + + + + +Plugin: proc.plugin +Module: /sys/devices/system/node + + + +## Overview + +Information about NUMA (Non-Uniform Memory Access) nodes on the system. + +NUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can +share memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a +symmetric multiprocessing (SMP) system. + +In a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes. +Each node has its own memory and set of I/O devices, and one or more processors. While a processor can access +memory in any of the nodes, it does so faster when accessing memory within its own node. + +The collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the +efficiency of memory allocations in multi-node systems. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per numa node + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| numa_node | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/page_types.md b/collectors/proc.plugin/integrations/page_types.md new file mode 100644 index 00000000000000..b228629b6e3e7c --- /dev/null +++ b/collectors/proc.plugin/integrations/page_types.md @@ -0,0 +1,113 @@ + + +# Page types + + + + + +Plugin: proc.plugin +Module: /proc/pagetypeinfo + + + +## Overview + +This integration provides metrics about the system's memory page types + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Page types instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.pagetype_global | a dimension per pagesize | B | + +### Per node, zone, type + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| node_id | TBD | +| node_zone | TBD | +| node_type | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.pagetype | a dimension per pagesize | B | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/power_supply.md b/collectors/proc.plugin/integrations/power_supply.md new file mode 100644 index 00000000000000..9a474e82a39354 --- /dev/null +++ b/collectors/proc.plugin/integrations/power_supply.md @@ -0,0 +1,107 @@ + + +# Power Supply + + + + + +Plugin: proc.plugin +Module: /sys/class/power_supply + + + +## Overview + +This integration monitors Power supply metrics, such as battery status, AC power status and more. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per power device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| powersupply.capacity | capacity | percentage | +| powersupply.charge | empty_design, empty, now, full, full_design | Ah | +| powersupply.energy | empty_design, empty, now, full, full_design | Wh | +| powersupply.voltage | min_design, min, now, max, max_design | V | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/pressure_stall_information.md b/collectors/proc.plugin/integrations/pressure_stall_information.md new file mode 100644 index 00000000000000..53f4aa0508e491 --- /dev/null +++ b/collectors/proc.plugin/integrations/pressure_stall_information.md @@ -0,0 +1,129 @@ + + +# Pressure Stall Information + + + + + +Plugin: proc.plugin +Module: /proc/pressure + + + +## Overview + +Introduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information +(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to +resource contention, such as CPU, memory, or I/O. + +The collectors monitored 3 separate files for CPU, memory, and I/O: + +- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention. +- **memory**: Tracks the amount of time tasks are stalled due to memory contention. +- **io**: Tracks the amount of time tasks are stalled due to I/O contention. +- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention. + +Each of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes. + +Monitoring the /proc/pressure files can provide important insights into system performance and capacity planning: + +- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are + frequently being stalled due to lack of resources, which can significantly degrade system performance. + +- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can + help identify whether resource contention is the cause. + +- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource + utilization and make informed decisions about when to add more resources to your system. + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Pressure Stall Information instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.cpu_some_pressure | some10, some60, some300 | percentage | +| system.cpu_some_pressure_stall_time | time | ms | +| system.cpu_full_pressure | some10, some60, some300 | percentage | +| system.cpu_full_pressure_stall_time | time | ms | +| system.memory_some_pressure | some10, some60, some300 | percentage | +| system.memory_some_pressure_stall_time | time | ms | +| system.memory_full_pressure | some10, some60, some300 | percentage | +| system.memory_full_pressure_stall_time | time | ms | +| system.io_some_pressure | some10, some60, some300 | percentage | +| system.io_some_pressure_stall_time | time | ms | +| system.io_full_pressure | some10, some60, some300 | percentage | +| system.io_full_pressure_stall_time | time | ms | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/sctp_statistics.md b/collectors/proc.plugin/integrations/sctp_statistics.md new file mode 100644 index 00000000000000..15c0d424d0b249 --- /dev/null +++ b/collectors/proc.plugin/integrations/sctp_statistics.md @@ -0,0 +1,99 @@ + + +# SCTP Statistics + + + + + +Plugin: proc.plugin +Module: /proc/net/sctp/snmp + + + +## Overview + +This integration provides statistics about the Stream Control Transmission Protocol (SCTP). + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per SCTP Statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| sctp.established | established | associations | +| sctp.transitions | active, passive, aborted, shutdown | transitions/s | +| sctp.packets | received, sent | packets/s | +| sctp.packet_errors | invalid, checksum | packets/s | +| sctp.fragmentation | reassembled, fragmented | packets/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/socket_statistics.md b/collectors/proc.plugin/integrations/socket_statistics.md new file mode 100644 index 00000000000000..d8ef26647c7aac --- /dev/null +++ b/collectors/proc.plugin/integrations/socket_statistics.md @@ -0,0 +1,109 @@ + + +# Socket statistics + + + + + +Plugin: proc.plugin +Module: /proc/net/sockstat + + + +## Overview + +This integration provides socket statistics. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Socket statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ip.sockstat_sockets | used | sockets | +| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets | +| ipv4.sockstat_tcp_mem | mem | KiB | +| ipv4.sockstat_udp_sockets | inuse | sockets | +| ipv4.sockstat_udp_mem | mem | sockets | +| ipv4.sockstat_udplite_sockets | inuse | sockets | +| ipv4.sockstat_raw_sockets | inuse | sockets | +| ipv4.sockstat_frag_sockets | inuse | fragments | +| ipv4.sockstat_frag_mem | mem | KiB | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization | +| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/softirq_statistics.md b/collectors/proc.plugin/integrations/softirq_statistics.md new file mode 100644 index 00000000000000..f966cf9714d993 --- /dev/null +++ b/collectors/proc.plugin/integrations/softirq_statistics.md @@ -0,0 +1,133 @@ + + +# SoftIRQ statistics + + + + + +Plugin: proc.plugin +Module: /proc/softirqs + + + +## Overview + +In the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half. +The top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later. + +Softirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be +deferred and processed later in a context where it's safe to enable interrupts. + +The actual work of handling the interrupt is offloaded to a softirq and executed later when the system +decides it's a good time to process them. This helps to keep the system responsive by not blocking the top +half for too long, which could lead to missed interrupts. + +Monitoring `/proc/softirqs` is useful for: + +- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high + rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue. + +- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about + what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem + with a disk. + +- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what + your system is doing, particularly in terms of how it's interacting with hardware and how it's handling + interrupts. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per SoftIRQ statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.softirqs | a dimension per softirq | softirqs/s | + +### Per cpu core + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cpu | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.softirqs | a dimension per softirq | softirqs/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/softnet_statistics.md b/collectors/proc.plugin/integrations/softnet_statistics.md new file mode 100644 index 00000000000000..58e6cf6e558568 --- /dev/null +++ b/collectors/proc.plugin/integrations/softnet_statistics.md @@ -0,0 +1,135 @@ + + +# Softnet Statistics + + + + + +Plugin: proc.plugin +Module: /proc/net/softnet_stat + + + +## Overview + +`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq. + +It provides information about: + +- Total number of processed packets (`processed`). +- Times ksoftirq ran out of quota (`dropped`). +- Times net_rx_action was rescheduled. +- Number of times processed all lists before quota. +- Number of times did not process all lists due to quota. +- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells. +- Number of times GRO cells were processed. + +Monitoring the /proc/net/softnet_stat file can be useful for: + +- **Network performance monitoring**: By tracking the total number of processed packets and how many packets + were dropped, you can gain insights into your system's network performance. + +- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues. + For instance, a high number of dropped packets may indicate a network problem. + +- **Capacity planning**: If your system is consistently processing near its maximum capacity of network + packets, it might be time to consider upgrading your network infrastructure. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Softnet Statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s | + +### Per cpu core + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog | +| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/synproxy.md b/collectors/proc.plugin/integrations/synproxy.md new file mode 100644 index 00000000000000..2db17ef6fec1a8 --- /dev/null +++ b/collectors/proc.plugin/integrations/synproxy.md @@ -0,0 +1,97 @@ + + +# Synproxy + + + + + +Plugin: proc.plugin +Module: /proc/net/stat/synproxy + + + +## Overview + +This integration provides statistics about the Synproxy netfilter module. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Synproxy instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netfilter.synproxy_syn_received | received | packets/s | +| netfilter.synproxy_conn_reopened | reopened | connections/s | +| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/system_load_average.md b/collectors/proc.plugin/integrations/system_load_average.md new file mode 100644 index 00000000000000..6e986d90c4a2da --- /dev/null +++ b/collectors/proc.plugin/integrations/system_load_average.md @@ -0,0 +1,128 @@ + + +# System Load Average + + + + + +Plugin: proc.plugin +Module: /proc/loadavg + + + +## Overview + +The `/proc/loadavg` file provides information about the system load average. + +The load average is a measure of the amount of computational work that a system performs. It is a +representation of the average system load over a period of time. + +This file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes, +respectively. It also includes the currently running processes and the total number of processes. + +Monitoring the load average can be used for: + +- **System performance**: If the load average is too high, it may indicate that your system is overloaded. + On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the + load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is + overloaded and tasks are waiting for CPU time. + +- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be + due to a runaway process, a software bug, or a hardware issue. + +- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your + system's workload. This can help with capacity planning and scaling decisions. + +Remember that load average not only considers CPU usage, but also includes processes waiting for disk I/O. +Therefore, high load averages could be due to I/O contention as well as CPU contention. + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per System Load Average instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.load | load1, load5, load15 | load | +| system.active_processes | active | processes | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | number of active CPU cores in the system | +| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system fifteen-minute load average | +| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system five-minute load average | +| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system one-minute load average | +| [ active_processes ](https://github.com/netdata/netdata/blob/master/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/system_statistics.md b/collectors/proc.plugin/integrations/system_statistics.md new file mode 100644 index 00000000000000..f3df1a19aa9c76 --- /dev/null +++ b/collectors/proc.plugin/integrations/system_statistics.md @@ -0,0 +1,169 @@ + + +# System statistics + + + + + +Plugin: proc.plugin +Module: /proc/stat + + + +## Overview + +CPU utilization, states and frequencies and key Linux system performance metrics. + +The `/proc/stat` file provides various types of system statistics: + +- The overall system CPU usage statistics +- Per CPU core statistics +- The total context switching of the system +- The total number of processes running +- The total CPU interrupts +- The total CPU softirqs + +The collector also reads: + +- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel. +- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems. +- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system. +- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core. +- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states. +- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system. +- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started. + + + + +This collector is only supported on the following platforms: + +- linux + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +The collector auto-detects all metrics. No configuration is needed. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available. + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per System statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage | +| system.intr | interrupts | interrupts/s | +| system.ctxt | switches | context switches/s | +| system.forks | started | processes/s | +| system.processes | running, blocked | processes | +| cpu.core_throttling | a dimension per cpu core | events/s | +| cpu.package_throttling | a dimension per package | events/s | +| cpu.cpufreq | a dimension per cpu core | MHz | + +### Per cpu core + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cpu | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage | +| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) | +| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes | +| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/system_uptime.md b/collectors/proc.plugin/integrations/system_uptime.md new file mode 100644 index 00000000000000..0954c0642ed658 --- /dev/null +++ b/collectors/proc.plugin/integrations/system_uptime.md @@ -0,0 +1,108 @@ + + +# System Uptime + + + + + +Plugin: proc.plugin +Module: /proc/uptime + + + +## Overview + +The amount of time the system has been up (running). + +Uptime is a critical aspect of overall system performance: + +- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes. +- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends. +- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems. +- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause. +- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others. +- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention. +- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability. + + + + +This collector is only supported on the following platforms: + +- linux + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per System Uptime instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.uptime | uptime | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/wireless_network_interfaces.md b/collectors/proc.plugin/integrations/wireless_network_interfaces.md new file mode 100644 index 00000000000000..a8d2406ee7b5a7 --- /dev/null +++ b/collectors/proc.plugin/integrations/wireless_network_interfaces.md @@ -0,0 +1,100 @@ + + +# Wireless network interfaces + + + + + +Plugin: proc.plugin +Module: /proc/net/wireless + + + +## Overview + +Monitor wireless devices with metrics about status, link quality, signal level, noise level and more. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per wireless device + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| wireless.status | status | status | +| wireless.link_quality | link_quality | value | +| wireless.signal_level | signal_level | dBm | +| wireless.noise_level | noise_level | dBm | +| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s | +| wireless.missed_beacons | missed_beacons | frames/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md b/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md new file mode 100644 index 00000000000000..c200ba673bb0df --- /dev/null +++ b/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md @@ -0,0 +1,125 @@ + + +# ZFS Adaptive Replacement Cache + + + + + +Plugin: proc.plugin +Module: /proc/spl/kstat/zfs/arcstats + + + +## Overview + +This integration monitors ZFS Adadptive Replacement Cache (ARC) statistics. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ZFS Adaptive Replacement Cache instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| zfs.arc_size | arcsz, target, min, max | MiB | +| zfs.l2_size | actual, size | MiB | +| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s | +| zfs.bytes | read, write | KiB/s | +| zfs.hits | hits, misses | percentage | +| zfs.hits_rate | hits, misses | events/s | +| zfs.dhits | hits, misses | percentage | +| zfs.dhits_rate | hits, misses | events/s | +| zfs.phits | hits, misses | percentage | +| zfs.phits_rate | hits, misses | events/s | +| zfs.mhits | hits, misses | percentage | +| zfs.mhits_rate | hits, misses | events/s | +| zfs.l2hits | hits, misses | percentage | +| zfs.l2hits_rate | hits, misses | events/s | +| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s | +| zfs.arc_size_breakdown | recent, frequent | percentage | +| zfs.memory_ops | direct, throttled, indirect | operations/s | +| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s | +| zfs.actual_hits | hits, misses | percentage | +| zfs.actual_hits_rate | hits, misses | events/s | +| zfs.demand_data_hits | hits, misses | percentage | +| zfs.demand_data_hits_rate | hits, misses | events/s | +| zfs.prefetch_data_hits | hits, misses | percentage | +| zfs.prefetch_data_hits_rate | hits, misses | events/s | +| zfs.hash_elements | current, max | elements | +| zfs.hash_chains | current, max | chains | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/zfs_pools.md b/collectors/proc.plugin/integrations/zfs_pools.md new file mode 100644 index 00000000000000..2985d39b066f02 --- /dev/null +++ b/collectors/proc.plugin/integrations/zfs_pools.md @@ -0,0 +1,105 @@ + + +# ZFS Pools + + + + + +Plugin: proc.plugin +Module: /proc/spl/kstat/zfs + + + +## Overview + +This integration provides metrics about the state of ZFS pools. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per zfs pool + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| pool | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded | +| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/integrations/zram.md b/collectors/proc.plugin/integrations/zram.md new file mode 100644 index 00000000000000..111b17c6283c4f --- /dev/null +++ b/collectors/proc.plugin/integrations/zram.md @@ -0,0 +1,106 @@ + + +# ZRAM + + + + + +Plugin: proc.plugin +Module: /sys/block/zram + + + +## Overview + +zRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device. +The data written to this block device is compressed and stored in memory. + +The collectors provides information about the operation and the effectiveness of zRAM on your system. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per zram device + + + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.zram_usage | compressed, metadata | MiB | +| mem.zram_savings | savings, original | MiB | +| mem.zram_ratio | ratio | ratio | +| mem.zram_efficiency | percent | percentage | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + diff --git a/collectors/proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c index b166deba68515a..204977bdf42c70 100644 --- a/collectors/proc.plugin/ipc.c +++ b/collectors/proc.plugin/ipc.c @@ -451,8 +451,8 @@ int do_ipc(int update_every, usec_t dt) { msq->found = 0; } else { - rrddim_is_obsolete(st_msq_messages, msq->rd_messages); - rrddim_is_obsolete(st_msq_bytes, msq->rd_bytes); + rrddim_is_obsolete___safe_from_collector_thread(st_msq_messages, msq->rd_messages); + rrddim_is_obsolete___safe_from_collector_thread(st_msq_bytes, msq->rd_bytes); // remove message queue from the linked list if(!msq_prev) @@ -480,19 +480,19 @@ int do_ipc(int update_every, usec_t dt) { if(unlikely(dimensions_num > dimensions_limit)) { collector_info("Message queue statistics has been disabled"); collector_info("There are %lld dimensions in memory but limit was set to %lld", dimensions_num, dimensions_limit); - rrdset_is_obsolete(st_msq_messages); - rrdset_is_obsolete(st_msq_bytes); + rrdset_is_obsolete___safe_from_collector_thread(st_msq_messages); + rrdset_is_obsolete___safe_from_collector_thread(st_msq_bytes); st_msq_messages = NULL; st_msq_bytes = NULL; do_msg = CONFIG_BOOLEAN_NO; } else if(unlikely(!message_queue_root)) { collector_info("Making chart %s (%s) obsolete since it does not have any dimensions", rrdset_name(st_msq_messages), rrdset_id(st_msq_messages)); - rrdset_is_obsolete(st_msq_messages); + rrdset_is_obsolete___safe_from_collector_thread(st_msq_messages); st_msq_messages = NULL; collector_info("Making chart %s (%s) obsolete since it does not have any dimensions", rrdset_name(st_msq_bytes), rrdset_id(st_msq_bytes)); - rrdset_is_obsolete(st_msq_bytes); + rrdset_is_obsolete___safe_from_collector_thread(st_msq_bytes); st_msq_bytes = NULL; } } diff --git a/collectors/proc.plugin/metadata.yaml b/collectors/proc.plugin/metadata.yaml index 81d83f50e5a8b9..45351b36f458ba 100644 --- a/collectors/proc.plugin/metadata.yaml +++ b/collectors/proc.plugin/metadata.yaml @@ -2643,22 +2643,22 @@ modules: os: "linux" - name: inbound_packets_dropped_ratio link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets + metric: net.drops info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes os: "linux" - name: outbound_packets_dropped_ratio link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets + metric: net.drops info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes os: "linux" - name: wifi_inbound_packets_dropped_ratio link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets + metric: net.drops info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes os: "linux" - name: wifi_outbound_packets_dropped_ratio link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.packets + metric: net.drops info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes os: "linux" - name: 1m_received_packets_rate @@ -2669,20 +2669,8 @@ modules: - name: 10s_received_packets_storm link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf metric: net.packets - info: - ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over - the last minute + info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute os: "linux freebsd" - - name: inbound_packets_dropped - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.drops - info: number of inbound dropped packets for the network interface ${label:device} in the last 10 minutes - os: "linux" - - name: outbound_packets_dropped - link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf - metric: net.drops - info: number of outbound dropped packets for the network interface ${label:device} in the last 10 minutes - os: "linux" - name: 10min_fifo_errors link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf metric: net.fifo @@ -3140,29 +3128,29 @@ modules: os: "linux" - name: tcp_connections link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf - metric: ipv4.tcpsock - info: IPv4 TCP connections utilization + metric: ip.tcpsock + info: TCP connections utilization os: "linux" - - name: 1m_ipv4_tcp_resets_sent + - name: 1m_ip_tcp_resets_sent link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf - metric: ipv4.tcphandshake + metric: ip.tcphandshake info: average number of sent TCP RESETS over the last minute os: "linux" - - name: 10s_ipv4_tcp_resets_sent + - name: 10s_ip_tcp_resets_sent link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf - metric: ipv4.tcphandshake + metric: ip.tcphandshake info: average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. os: "linux" - - name: 1m_ipv4_tcp_resets_received + - name: 1m_ip_tcp_resets_received link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf - metric: ipv4.tcphandshake + metric: ip.tcphandshake info: average number of received TCP RESETS over the last minute os: "linux freebsd" - - name: 10s_ipv4_tcp_resets_received + - name: 10s_ip_tcp_resets_received link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf - metric: ipv4.tcphandshake + metric: ip.tcphandshake info: average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. @@ -3189,57 +3177,12 @@ modules: labels: [] metrics: - name: system.ip - description: IP Bandwidth + description: IPv4 Bandwidth unit: "kilobits/s" chart_type: area dimensions: - name: received - name: sent - - name: ip.inerrors - description: IP Input Errors - unit: "packets/s" - chart_type: line - dimensions: - - name: noroutes - - name: truncated - - name: checksum - - name: ip.mcast - description: IP Multicast Bandwidth - unit: "kilobits/s" - chart_type: area - dimensions: - - name: received - - name: sent - - name: ip.bcast - description: IP Broadcast Bandwidth - unit: "kilobits/s" - chart_type: area - dimensions: - - name: received - - name: sent - - name: ip.mcastpkts - description: IP Multicast Packets - unit: "packets/s" - chart_type: line - dimensions: - - name: received - - name: sent - - name: ip.bcastpkts - description: IP Broadcast Packets - unit: "packets/s" - chart_type: line - dimensions: - - name: received - - name: sent - - name: ip.ecnpkts - description: IP ECN Statistics - unit: "packets/s" - chart_type: line - dimensions: - - name: CEP - - name: NoECTP - - name: ECTP0 - - name: ECTP1 - name: ip.tcpmemorypressures description: TCP Memory Pressures unit: "events/s" @@ -3297,31 +3240,52 @@ modules: dimensions: - name: overflows - name: drops - - name: ipv4.packets - description: IPv4 Packets + - name: ip.tcpsock + description: IPv4 TCP Connections + unit: "active connections" + chart_type: line + dimensions: + - name: connections + - name: ip.tcppackets + description: IPv4 TCP Packets unit: "packets/s" chart_type: line dimensions: - name: received - name: sent - - name: forwarded - - name: delivered - - name: ipv4.fragsout - description: IPv4 Fragments Sent + - name: ip.tcperrors + description: IPv4 TCP Errors unit: "packets/s" chart_type: line dimensions: - - name: ok - - name: failed - - name: created - - name: ipv4.fragsin - description: IPv4 Fragments Reassembly + - name: InErrs + - name: InCsumErrors + - name: RetransSegs + - name: ip.tcpopens + description: IPv4 TCP Opens + unit: "connections/s" + chart_type: line + dimensions: + - name: active + - name: passive + - name: ip.tcphandshake + description: IPv4 TCP Handshake Issues + unit: "events/s" + chart_type: line + dimensions: + - name: EstabResets + - name: OutRsts + - name: AttemptFails + - name: SynRetrans + - name: ipv4.packets + description: IPv4 Packets unit: "packets/s" chart_type: line dimensions: - - name: ok - - name: failed - - name: all + - name: received + - name: sent + - name: forwarded + - name: delivered - name: ipv4.errors description: IPv4 Errors unit: "packets/s" @@ -3329,25 +3293,47 @@ modules: dimensions: - name: InDiscards - name: OutDiscards - - name: InHdrErrors + - name: InNoRoutes - name: OutNoRoutes + - name: InHdrErrors - name: InAddrErrors - - name: InUnknownProtos - - name: ipv4.icmp - description: IPv4 ICMP Packets + - name: InTruncatedPkts + - name: InCsumErrors + - name: ipc4.bcast + description: IP Broadcast Bandwidth + unit: "kilobits/s" + chart_type: area + dimensions: + - name: received + - name: sent + - name: ipv4.bcastpkts + description: IP Broadcast Packets unit: "packets/s" chart_type: line dimensions: - name: received - name: sent - - name: ipv4.icmp_errors - description: IPv4 ICMP Errors + - name: ipv4.mcast + description: IPv4 Multicast Bandwidth + unit: "kilobits/s" + chart_type: area + dimensions: + - name: received + - name: sent + - name: ipv4.mcastpkts + description: IP Multicast Packets unit: "packets/s" chart_type: line dimensions: - - name: InErrors - - name: OutErrors - - name: InCsumErrors + - name: received + - name: sent + - name: ipv4.icmp + description: IPv4 ICMP Packets + unit: "packets/s" + chart_type: line + dimensions: + - name: received + - name: sent - name: ipv4.icmpmsg description: IPv4 ICMP Messages unit: "packets/s" @@ -3373,43 +3359,14 @@ modules: - name: OutTimestamps - name: InTimestampReps - name: OutTimestampReps - - name: ipv4.tcpsock - description: IPv4 TCP Connections - unit: "active connections" - chart_type: line - dimensions: - - name: connections - - name: ipv4.tcppackets - description: IPv4 TCP Packets - unit: "packets/s" - chart_type: line - dimensions: - - name: received - - name: sent - - name: ipv4.tcperrors - description: IPv4 TCP Errors + - name: ipv4.icmp_errors + description: IPv4 ICMP Errors unit: "packets/s" chart_type: line dimensions: - - name: InErrs + - name: InErrors + - name: OutErrors - name: InCsumErrors - - name: RetransSegs - - name: ipv4.tcpopens - description: IPv4 TCP Opens - unit: "connections/s" - chart_type: line - dimensions: - - name: active - - name: passive - - name: ipv4.tcphandshake - description: IPv4 TCP Handshake Issues - unit: "events/s" - chart_type: line - dimensions: - - name: EstabResets - - name: OutRsts - - name: AttemptFails - - name: SynRetrans - name: ipv4.udppackets description: IPv4 UDP Packets unit: "packets/s" @@ -3446,6 +3403,31 @@ modules: - name: NoPorts - name: InCsumErrors - name: IgnoredMulti + - name: ipv4.ecnpkts + description: IP ECN Statistics + unit: "packets/s" + chart_type: line + dimensions: + - name: CEP + - name: NoECTP + - name: ECTP0 + - name: ECTP1 + - name: ipv4.fragsin + description: IPv4 Fragments Reassembly + unit: "packets/s" + chart_type: line + dimensions: + - name: ok + - name: failed + - name: all + - name: ipv4.fragsout + description: IPv4 Fragments Sent + unit: "packets/s" + chart_type: line + dimensions: + - name: ok + - name: failed + - name: created - name: system.ipv6 description: IPv6 Bandwidth unit: "kilobits/s" @@ -3453,7 +3435,7 @@ modules: dimensions: - name: received - name: sent - - name: system.ipv6 + - name: ipv6.packets description: IPv6 Packets unit: "packets/s" chart_type: line @@ -3462,23 +3444,6 @@ modules: - name: sent - name: forwarded - name: delivers - - name: ipv6.fragsout - description: IPv6 Fragments Sent - unit: "packets/s" - chart_type: line - dimensions: - - name: ok - - name: failed - - name: all - - name: ipv6.fragsin - description: IPv6 Fragments Reassembly - unit: "packets/s" - chart_type: line - dimensions: - - name: ok - - name: failed - - name: timeout - - name: all - name: ipv6.errors description: IPv6 Errors unit: "packets/s" @@ -3493,6 +3458,27 @@ modules: - name: InTruncatedPkts - name: InNoRoutes - name: OutNoRoutes + - name: ipv6.bcast + description: IPv6 Broadcast Bandwidth + unit: "kilobits/s" + chart_type: area + dimensions: + - name: received + - name: sent + - name: ipv6.mcast + description: IPv6 Multicast Bandwidth + unit: "kilobits/s" + chart_type: area + dimensions: + - name: received + - name: sent + - name: ipv6.mcastpkts + description: IPv6 Multicast Packets + unit: "packets/s" + chart_type: line + dimensions: + - name: received + - name: sent - name: ipv6.udppackets description: IPv6 UDP Packets unit: "packets/s" @@ -3528,27 +3514,6 @@ modules: - name: InErrors - name: NoPorts - name: InCsumErrors - - name: ipv6.mcast - description: IPv6 Multicast Bandwidth - unit: "kilobits/s" - chart_type: area - dimensions: - - name: received - - name: sent - - name: ipv6.bcast - description: IPv6 Broadcast Bandwidth - unit: "kilobits/s" - chart_type: area - dimensions: - - name: received - - name: sent - - name: ipv6.mcastpkts - description: IPv6 Multicast Packets - unit: "packets/s" - chart_type: line - dimensions: - - name: received - - name: sent - name: ipv6.icmp description: IPv6 ICMP Messages unit: "messages/s" @@ -3657,6 +3622,23 @@ modules: - name: InECT1Pkts - name: InECT0Pkts - name: InCEPkts + - name: ipv6.fragsin + description: IPv6 Fragments Reassembly + unit: "packets/s" + chart_type: line + dimensions: + - name: ok + - name: failed + - name: timeout + - name: all + - name: ipv6.fragsout + description: IPv6 Fragments Sent + unit: "packets/s" + chart_type: line + dimensions: + - name: ok + - name: failed + - name: all - meta: plugin_name: proc.plugin module_name: /proc/net/sockstat @@ -3734,8 +3716,8 @@ modules: description: "" labels: [] metrics: - - name: ipv4.sockstat_sockets - description: IPv4 Sockets Used + - name: ip.sockstat_sockets + description: Sockets used for all address families unit: "sockets" chart_type: line dimensions: diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c index fbcaa614a51cd1..3f11aaf6c956ca 100644 --- a/collectors/proc.plugin/plugin_proc.c +++ b/collectors/proc.plugin/plugin_proc.c @@ -138,10 +138,18 @@ static bool is_lxcfs_proc_mounted() { return false; } +static bool log_proc_module(BUFFER *wb, void *data) { + struct proc_module *pm = data; + buffer_sprintf(wb, "proc.plugin[%s]", pm->name); + return true; +} + void *proc_main(void *ptr) { worker_register("PROC"); + rrd_collector_started(); + if (config_get_boolean("plugin:proc", "/proc/net/dev", CONFIG_BOOLEAN_YES)) { netdev_thread = mallocz(sizeof(netdata_thread_t)); netdata_log_debug(D_SYSTEM, "Starting thread %s.", THREAD_NETDEV_NAME); @@ -151,46 +159,56 @@ void *proc_main(void *ptr) netdata_thread_cleanup_push(proc_main_cleanup, ptr); - config_get_boolean("plugin:proc", "/proc/pagetypeinfo", CONFIG_BOOLEAN_NO); + { + config_get_boolean("plugin:proc", "/proc/pagetypeinfo", CONFIG_BOOLEAN_NO); - // check the enabled status for each module - int i; - for (i = 0; proc_modules[i].name; i++) { - struct proc_module *pm = &proc_modules[i]; + // check the enabled status for each module + int i; + for(i = 0; proc_modules[i].name; i++) { + struct proc_module *pm = &proc_modules[i]; - pm->enabled = config_get_boolean("plugin:proc", pm->name, CONFIG_BOOLEAN_YES); - pm->rd = NULL; + pm->enabled = config_get_boolean("plugin:proc", pm->name, CONFIG_BOOLEAN_YES); + pm->rd = NULL; - worker_register_job_name(i, proc_modules[i].dim); - } + worker_register_job_name(i, proc_modules[i].dim); + } - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; - heartbeat_t hb; - heartbeat_init(&hb); + usec_t step = localhost->rrd_update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); - inside_lxc_container = is_lxcfs_proc_mounted(); + inside_lxc_container = is_lxcfs_proc_mounted(); - while (service_running(SERVICE_COLLECTORS)) { - worker_is_idle(); - usec_t hb_dt = heartbeat_next(&hb, step); +#define LGS_MODULE_ID 0 - if (unlikely(!service_running(SERVICE_COLLECTORS))) - break; + ND_LOG_STACK lgs[] = { + [LGS_MODULE_ID] = ND_LOG_FIELD_TXT(NDF_MODULE, "proc.plugin"), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); - for (i = 0; proc_modules[i].name; i++) { - if (unlikely(!service_running(SERVICE_COLLECTORS))) - break; + while(service_running(SERVICE_COLLECTORS)) { + worker_is_idle(); + usec_t hb_dt = heartbeat_next(&hb, step); - struct proc_module *pm = &proc_modules[i]; - if (unlikely(!pm->enabled)) - continue; + if(unlikely(!service_running(SERVICE_COLLECTORS))) + break; - netdata_log_debug(D_PROCNETDEV_LOOP, "PROC calling %s.", pm->name); + for(i = 0; proc_modules[i].name; i++) { + if(unlikely(!service_running(SERVICE_COLLECTORS))) + break; - worker_is_busy(i); - pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt); - } - } + struct proc_module *pm = &proc_modules[i]; + if(unlikely(!pm->enabled)) + continue; + + worker_is_busy(i); + lgs[LGS_MODULE_ID] = ND_LOG_FIELD_CB(NDF_MODULE, log_proc_module, pm); + pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt); + lgs[LGS_MODULE_ID] = ND_LOG_FIELD_TXT(NDF_MODULE, "proc.plugin"); + } + } + } netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h index a90f4838e93c06..e4fc105bac4949 100644 --- a/collectors/proc.plugin/plugin_proc.h +++ b/collectors/proc.plugin/plugin_proc.h @@ -58,8 +58,9 @@ void netdev_rename_device_add( const char *host_device, const char *container_device, const char *container_name, - DICTIONARY *labels, - const char *ctx_prefix); + RRDLABELS *labels, + const char *ctx_prefix, + const DICTIONARY_ITEM *cgroup_netdev_link); void netdev_rename_device_del(const char *host_device); diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c index 359fa9a810eb42..475d90835f5a02 100644 --- a/collectors/proc.plugin/proc_diskstats.c +++ b/collectors/proc.plugin/proc_diskstats.c @@ -6,6 +6,8 @@ #define PLUGIN_PROC_MODULE_DISKSTATS_NAME "/proc/diskstats" #define CONFIG_SECTION_PLUGIN_PROC_DISKSTATS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_DISKSTATS_NAME +#define RRDFUNCTIONS_DISKSTATS_HELP "View block device statistics" + #define DISK_TYPE_UNKNOWN 0 #define DISK_TYPE_PHYSICAL 1 #define DISK_TYPE_PARTITION 2 @@ -14,15 +16,25 @@ #define DEFAULT_PREFERRED_IDS "*" #define DEFAULT_EXCLUDED_DISKS "loop* ram*" +static netdata_mutex_t diskstats_dev_mutex = NETDATA_MUTEX_INITIALIZER; + static struct disk { char *disk; // the name of the disk (sda, sdb, etc, after being looked up) char *device; // the device of the disk (before being looked up) + char *disk_by_id; + char *model; + char *serial; +// bool rotational; +// bool removable; uint32_t hash; unsigned long major; unsigned long minor; int sector_size; int type; + bool excluded; + bool function_ready; + char *mount_point; char *chart_id; @@ -163,7 +175,7 @@ static struct disk { struct disk *next; } *disk_root = NULL; -#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st) +#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st) // static char *path_to_get_hw_sector_size = NULL; // static char *path_to_get_hw_sector_size_partitions = NULL; @@ -172,6 +184,8 @@ static char *path_to_sys_block_device = NULL; static char *path_to_sys_block_device_bcache = NULL; static char *path_to_sys_devices_virtual_block_device = NULL; static char *path_to_device_mapper = NULL; +static char *path_to_dev_disk = NULL; +static char *path_to_sys_block = NULL; static char *path_to_device_label = NULL; static char *path_to_device_id = NULL; static char *path_to_veritas_volume_groups = NULL; @@ -352,7 +366,10 @@ static inline int get_disk_name_from_path(const char *path, char *result, size_t DIR *dir = opendir(path); if (!dir) { - collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path); + if (errno == ENOENT) + nd_log_collector(NDLP_DEBUG, "DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s': no such file or directory.", disk, major, minor, path); + else + collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path); goto failed; } @@ -469,22 +486,127 @@ static inline char *get_disk_name(unsigned long major, unsigned long minor, char return strdup(result); } +static inline bool ends_with(const char *str, const char *suffix) { + if (!str || !suffix) + return false; + + size_t len_str = strlen(str); + size_t len_suffix = strlen(suffix); + if (len_suffix > len_str) + return false; + + return strncmp(str + len_str - len_suffix, suffix, len_suffix) == 0; +} + +static inline char *get_disk_by_id(char *device) { + char pathname[256 + 1]; + snprintfz(pathname, sizeof(pathname) - 1, "%s/by-id", path_to_dev_disk); + + struct dirent *entry; + DIR *dp = opendir(pathname); + if (dp == NULL) { + internal_error(true, "Cannot open '%s'", pathname); + return NULL; + } + + while ((entry = readdir(dp))) { + // We ignore the '.' and '..' entries + if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) + continue; + + if(strncmp(entry->d_name, "md-uuid-", 8) == 0 || + strncmp(entry->d_name, "dm-uuid-", 8) == 0 || + strncmp(entry->d_name, "nvme-eui.", 9) == 0 || + strncmp(entry->d_name, "wwn-", 4) == 0 || + strncmp(entry->d_name, "lvm-pv-uuid-", 12) == 0) + continue; + + char link_target[256 + 1]; + char full_path[256 + 1]; + snprintfz(full_path, 256, "%s/%s", pathname, entry->d_name); + + ssize_t len = readlink(full_path, link_target, 256); + if (len == -1) + continue; + + link_target[len] = '\0'; + + if (ends_with(link_target, device)) { + char *s = strdupz(entry->d_name); + closedir(dp); + return s; + } + } + + closedir(dp); + return NULL; +} + +static inline char *get_disk_model(char *device) { + char path[256 + 1]; + char buffer[256 + 1]; + + snprintfz(path, sizeof(path) - 1, "%s/%s/device/model", path_to_sys_block, device); + if(read_file(path, buffer, 256) != 0) { + snprintfz(path, sizeof(path) - 1, "%s/%s/device/name", path_to_sys_block, device); + if(read_file(path, buffer, 256) != 0) + return NULL; + } + + char *clean = trim(buffer); + if (!clean) + return NULL; + + return strdupz(clean); +} + +static inline char *get_disk_serial(char *device) { + char path[256 + 1]; + char buffer[256 + 1]; + + snprintfz(path, sizeof(path) - 1, "%s/%s/device/serial", path_to_sys_block, device); + if(read_file(path, buffer, 256) != 0) + return NULL; + + return strdupz(buffer); +} + +//static inline bool get_disk_rotational(char *device) { +// char path[256 + 1]; +// char buffer[256 + 1]; +// +// snprintfz(path, 256, "%s/%s/queue/rotational", path_to_sys_block, device); +// if(read_file(path, buffer, 256) != 0) +// return false; +// +// return buffer[0] == '1'; +//} +// +//static inline bool get_disk_removable(char *device) { +// char path[256 + 1]; +// char buffer[256 + 1]; +// +// snprintfz(path, 256, "%s/%s/removable", path_to_sys_block, device); +// if(read_file(path, buffer, 256) != 0) +// return false; +// +// return buffer[0] == '1'; +//} + static void get_disk_config(struct disk *d) { int def_enable = global_enable_new_disks_detected_at_runtime; - if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk))) + if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk))) { + d->excluded = true; def_enable = CONFIG_BOOLEAN_NO; -#ifdef NETDATA_SKIP_IF_NOT_COLLECT - if(!def_enable) { - netdata_log_debug(D_COLLECTOR, "DISKSTAT: Skipping device: %s, disk: %s because it is excluded by configuration.", d->device, d->disk); - return; } -#endif char var_name[4096 + 1]; snprintfz(var_name, 4096, CONFIG_SECTION_PLUGIN_PROC_DISKSTATS ":%s", d->disk); - def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable); + if (config_exists(var_name, "enable")) + def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable); + if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) { // the user does not want any metrics for this disk d->do_io = CONFIG_BOOLEAN_NO; @@ -536,7 +658,8 @@ static void get_disk_config(struct disk *d) { // def_performance // check the user configuration (this will also show our 'on demand' decision) - def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance); + if (config_exists(var_name, "enable performance metrics")) + def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance); int ddo_io = CONFIG_BOOLEAN_NO, ddo_ops = CONFIG_BOOLEAN_NO, @@ -559,21 +682,44 @@ static void get_disk_config(struct disk *d) { ddo_ext = global_do_ext, ddo_backlog = global_do_backlog, ddo_bcache = global_do_bcache; + } else { + d->excluded = true; } - d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io); - d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops); - d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops); - d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime); - d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops); - d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util); - d->do_ext = config_get_boolean_ondemand(var_name, "extended operations", ddo_ext); - d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog); - - if(d->device_is_bcache) - d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache); - else + d->do_io = ddo_io; + d->do_ops = ddo_ops; + d->do_mops = ddo_mops; + d->do_iotime = ddo_iotime; + d->do_qops = ddo_qops; + d->do_util = ddo_util; + d->do_ext = ddo_ext; + d->do_backlog = ddo_backlog; + + if (config_exists(var_name, "bandwidth")) + d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io); + if (config_exists(var_name, "operations")) + d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops); + if (config_exists(var_name, "merged operations")) + d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops); + if (config_exists(var_name, "i/o time")) + d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime); + if (config_exists(var_name, "queued operations")) + d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops); + if (config_exists(var_name, "utilization percentage")) + d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util); + if (config_exists(var_name, "extended operations")) + d->do_ext = config_get_boolean_ondemand(var_name, "extended operations", ddo_ext); + if (config_exists(var_name, "backlog")) + d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog); + + d->do_bcache = ddo_bcache; + + if (d->device_is_bcache) { + if (config_exists(var_name, "bcache")) + d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache); + } else { d->do_bcache = 0; + } } } @@ -598,8 +744,15 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis // create a new disk structure d = (struct disk *)callocz(1, sizeof(struct disk)); + d->excluded = false; + d->function_ready = false; d->disk = get_disk_name(major, minor, disk); d->device = strdupz(disk); + d->disk_by_id = get_disk_by_id(disk); + d->model = get_disk_model(disk); + d->serial = get_disk_serial(disk); +// d->rotational = get_disk_rotational(disk); +// d->removable = get_disk_removable(disk); d->hash = simple_hash(d->device); d->major = major; d->minor = minor; @@ -854,30 +1007,399 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis } get_disk_config(d); + return d; } +static const char *get_disk_type_string(int disk_type) { + switch (disk_type) { + case DISK_TYPE_PHYSICAL: + return "physical"; + case DISK_TYPE_PARTITION: + return "partition"; + case DISK_TYPE_VIRTUAL: + return "virtual"; + default: + return "unknown"; + } +} + static void add_labels_to_disk(struct disk *d, RRDSET *st) { rrdlabels_add(st->rrdlabels, "device", d->disk, RRDLABEL_SRC_AUTO); rrdlabels_add(st->rrdlabels, "mount_point", d->mount_point, RRDLABEL_SRC_AUTO); + rrdlabels_add(st->rrdlabels, "id", d->disk_by_id, RRDLABEL_SRC_AUTO); + rrdlabels_add(st->rrdlabels, "model", d->model, RRDLABEL_SRC_AUTO); + rrdlabels_add(st->rrdlabels, "serial", d->serial, RRDLABEL_SRC_AUTO); + rrdlabels_add(st->rrdlabels, "device_type", get_disk_type_string(d->type), RRDLABEL_SRC_AUTO); +} - switch (d->type) { - default: - case DISK_TYPE_UNKNOWN: - rrdlabels_add(st->rrdlabels, "device_type", "unknown", RRDLABEL_SRC_AUTO); - break; +static int diskstats_function_block_devices(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused, + void *collector_data __maybe_unused, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused, + void *register_canceller_cb_data __maybe_unused) { + + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost)); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_DISKSTATS_HELP); + buffer_json_member_add_array(wb, "data"); + + double max_io_reads = 0.0; + double max_io_writes = 0.0; + double max_io = 0.0; + double max_backlog_time = 0.0; + double max_busy_time = 0.0; + double max_busy_perc = 0.0; + double max_iops_reads = 0.0; + double max_iops_writes = 0.0; + double max_iops_time_reads = 0.0; + double max_iops_time_writes = 0.0; + double max_iops_avg_time_read = 0.0; + double max_iops_avg_time_write = 0.0; + double max_iops_avg_size_read = 0.0; + double max_iops_avg_size_write = 0.0; + + netdata_mutex_lock(&diskstats_dev_mutex); + + for (struct disk *d = disk_root; d; d = d->next) { + if (unlikely(!d->function_ready)) + continue; - case DISK_TYPE_PHYSICAL: - rrdlabels_add(st->rrdlabels, "device_type", "physical", RRDLABEL_SRC_AUTO); - break; + buffer_json_add_array_item_array(wb); + + buffer_json_add_array_item_string(wb, d->device); + buffer_json_add_array_item_string(wb, get_disk_type_string(d->type)); + buffer_json_add_array_item_string(wb, d->disk_by_id); + buffer_json_add_array_item_string(wb, d->model); + buffer_json_add_array_item_string(wb, d->serial); + + // IO + double io_reads = rrddim_get_last_stored_value(d->rd_io_reads, &max_io_reads, 1024.0); + double io_writes = rrddim_get_last_stored_value(d->rd_io_writes, &max_io_writes, 1024.0); + double io_total = NAN; + if (!isnan(io_reads) && !isnan(io_writes)) { + io_total = io_reads + io_writes; + max_io = MAX(max_io, io_total); + } + // Backlog and Busy Time + double busy_perc = rrddim_get_last_stored_value(d->rd_util_utilization, &max_busy_perc, 1); + double busy_time = rrddim_get_last_stored_value(d->rd_busy_busy, &max_busy_time, 1); + double backlog_time = rrddim_get_last_stored_value(d->rd_backlog_backlog, &max_backlog_time, 1); + // IOPS + double iops_reads = rrddim_get_last_stored_value(d->rd_ops_reads, &max_iops_reads, 1); + double iops_writes = rrddim_get_last_stored_value(d->rd_ops_writes, &max_iops_writes, 1); + // IO Time + double iops_time_reads = rrddim_get_last_stored_value(d->rd_iotime_reads, &max_iops_time_reads, 1); + double iops_time_writes = rrddim_get_last_stored_value(d->rd_iotime_writes, &max_iops_time_writes, 1); + // Avg IO Time + double iops_avg_time_read = rrddim_get_last_stored_value(d->rd_await_reads, &max_iops_avg_time_read, 1); + double iops_avg_time_write = rrddim_get_last_stored_value(d->rd_await_writes, &max_iops_avg_time_write, 1); + // Avg IO Size + double iops_avg_size_read = rrddim_get_last_stored_value(d->rd_avgsz_reads, &max_iops_avg_size_read, 1); + double iops_avg_size_write = rrddim_get_last_stored_value(d->rd_avgsz_writes, &max_iops_avg_size_write, 1); + + + buffer_json_add_array_item_double(wb, io_reads); + buffer_json_add_array_item_double(wb, io_writes); + buffer_json_add_array_item_double(wb, io_total); + buffer_json_add_array_item_double(wb, busy_perc); + buffer_json_add_array_item_double(wb, busy_time); + buffer_json_add_array_item_double(wb, backlog_time); + buffer_json_add_array_item_double(wb, iops_reads); + buffer_json_add_array_item_double(wb, iops_writes); + buffer_json_add_array_item_double(wb, iops_time_reads); + buffer_json_add_array_item_double(wb, iops_time_writes); + buffer_json_add_array_item_double(wb, iops_avg_time_read); + buffer_json_add_array_item_double(wb, iops_avg_time_write); + buffer_json_add_array_item_double(wb, iops_avg_size_read); + buffer_json_add_array_item_double(wb, iops_avg_size_write); + + // End + buffer_json_array_close(wb); + } - case DISK_TYPE_PARTITION: - rrdlabels_add(st->rrdlabels, "device_type", "partition", RRDLABEL_SRC_AUTO); - break; + netdata_mutex_unlock(&diskstats_dev_mutex); - case DISK_TYPE_VIRTUAL: - rrdlabels_add(st->rrdlabels, "device_type", "virtual", RRDLABEL_SRC_AUTO); - break; + buffer_json_array_close(wb); // data + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + buffer_rrdf_table_add_field(wb, field_id++, "Device", "Device Name", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Type", "Device Type", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "ID", "Device ID", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Model", "Device Model", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Serial", "Device Serial Number", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Read", "Data Read from Device", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_io_reads, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Written", "Data Writen to Device", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_io_writes, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Total", "Data Transferred to and from Device", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "MiB", max_io, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Busy%", "Disk Busy Percentage", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "%", max_busy_perc, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Busy", "Disk Busy Time", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "milliseconds", max_busy_time, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Backlog", "Disk Backlog", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "milliseconds", max_backlog_time, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Completed Read Operations", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "ops", max_iops_reads, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Completed Write Operations", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "ops", max_iops_writes, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "ReadsTime", "Read Operations Time", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "milliseconds", max_iops_time_reads, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "WritesTime", "Write Operations Time", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "milliseconds", max_iops_time_writes, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "ReadAvgTime", "Average Read Operation Service Time", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "milliseconds", max_iops_avg_time_read, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "WriteAvgTime", "Average Write Operation Service Time", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "milliseconds", max_iops_avg_time_write, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "ReadAvgSz", "Average Read Operation Size", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "KiB", max_iops_avg_size_read, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + buffer_rrdf_table_add_field(wb, field_id++, "WriteAvgSz", "Average Write Operation Size", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "KiB", max_iops_avg_size_write, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + } + + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "Total"); + + buffer_json_member_add_object(wb, "charts"); + { + buffer_json_member_add_object(wb, "IO"); + { + buffer_json_member_add_string(wb, "name", "IO"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Read"); + buffer_json_add_array_item_string(wb, "Written"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "Busy"); + { + buffer_json_member_add_string(wb, "name", "Busy"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Busy"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "IO"); + buffer_json_add_array_item_string(wb, "Device"); + buffer_json_array_close(wb); + + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Busy"); + buffer_json_add_array_item_string(wb, "Device"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_object(wb, "group_by"); + { + buffer_json_member_add_object(wb, "Type"); + { + buffer_json_member_add_string(wb, "name", "Type"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Type"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // group_by + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + int response = HTTP_RESP_OK; + if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) { + buffer_flush(wb); + response = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + + if(result_cb) + result_cb(wb, response, result_cb_data); + + return response; +} + +static void diskstats_cleanup_disks() { + struct disk *d = disk_root, *last = NULL; + while (d) { + if (unlikely(global_cleanup_removed_disks && !d->updated)) { + struct disk *t = d; + + rrdset_obsolete_and_pointer_null(d->st_avgsz); + rrdset_obsolete_and_pointer_null(d->st_ext_avgsz); + rrdset_obsolete_and_pointer_null(d->st_await); + rrdset_obsolete_and_pointer_null(d->st_ext_await); + rrdset_obsolete_and_pointer_null(d->st_backlog); + rrdset_obsolete_and_pointer_null(d->st_busy); + rrdset_obsolete_and_pointer_null(d->st_io); + rrdset_obsolete_and_pointer_null(d->st_ext_io); + rrdset_obsolete_and_pointer_null(d->st_iotime); + rrdset_obsolete_and_pointer_null(d->st_ext_iotime); + rrdset_obsolete_and_pointer_null(d->st_mops); + rrdset_obsolete_and_pointer_null(d->st_ext_mops); + rrdset_obsolete_and_pointer_null(d->st_ops); + rrdset_obsolete_and_pointer_null(d->st_ext_ops); + rrdset_obsolete_and_pointer_null(d->st_qops); + rrdset_obsolete_and_pointer_null(d->st_svctm); + rrdset_obsolete_and_pointer_null(d->st_util); + rrdset_obsolete_and_pointer_null(d->st_bcache); + rrdset_obsolete_and_pointer_null(d->st_bcache_bypass); + rrdset_obsolete_and_pointer_null(d->st_bcache_rates); + rrdset_obsolete_and_pointer_null(d->st_bcache_size); + rrdset_obsolete_and_pointer_null(d->st_bcache_usage); + rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio); + rrdset_obsolete_and_pointer_null(d->st_bcache_cache_allocations); + rrdset_obsolete_and_pointer_null(d->st_bcache_cache_read_races); + + if (d == disk_root) { + disk_root = d = d->next; + last = NULL; + } else if (last) { + last->next = d = d->next; + } + + freez(t->bcache_filename_dirty_data); + freez(t->bcache_filename_writeback_rate); + freez(t->bcache_filename_cache_congested); + freez(t->bcache_filename_cache_available_percent); + freez(t->bcache_filename_stats_five_minute_cache_hit_ratio); + freez(t->bcache_filename_stats_hour_cache_hit_ratio); + freez(t->bcache_filename_stats_day_cache_hit_ratio); + freez(t->bcache_filename_stats_total_cache_hit_ratio); + freez(t->bcache_filename_stats_total_cache_hits); + freez(t->bcache_filename_stats_total_cache_misses); + freez(t->bcache_filename_stats_total_cache_miss_collisions); + freez(t->bcache_filename_stats_total_cache_bypass_hits); + freez(t->bcache_filename_stats_total_cache_bypass_misses); + freez(t->bcache_filename_stats_total_cache_readaheads); + freez(t->bcache_filename_cache_read_races); + freez(t->bcache_filename_cache_io_errors); + freez(t->bcache_filename_priority_stats); + + freez(t->disk); + freez(t->device); + freez(t->disk_by_id); + freez(t->model); + freez(t->serial); + freez(t->mount_point); + freez(t->chart_id); + freez(t); + } else { + d->updated = 0; + last = d; + d = d->next; + } } } @@ -928,6 +1450,12 @@ int do_proc_diskstats(int update_every, usec_t dt) { snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix); path_to_device_mapper = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to device mapper", buffer); + snprintfz(buffer, FILENAME_MAX, "%s/dev/disk", netdata_configured_host_prefix); + path_to_dev_disk = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s/sys/block", netdata_configured_host_prefix); + path_to_sys_block = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /sys/block", buffer); + snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-label", netdata_configured_host_prefix); path_to_device_label = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-label", buffer); @@ -960,12 +1488,20 @@ int do_proc_diskstats(int update_every, usec_t dt) { ff = procfile_readall(ff); if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + static bool add_func = true; + if (add_func) { + rrd_function_add(localhost, NULL, "block-devices", 10, RRDFUNCTIONS_DISKSTATS_HELP, true, diskstats_function_block_devices, NULL); + add_func = false; + } + size_t lines = procfile_lines(ff), l; collected_number system_read_kb = 0, system_write_kb = 0; int do_dc_stats = 0, do_fl_stats = 0; + netdata_mutex_lock(&diskstats_dev_mutex); + for(l = 0; l < lines ;l++) { // -------------------------------------------------------------------------- // Read parameters @@ -1090,7 +1626,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { // -------------------------------------------------------------------------- // Do performance metrics - if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors || discardsectors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { @@ -1936,8 +2471,13 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(d->st_bcache_bypass); } } + + d->function_ready = !d->excluded; } + diskstats_cleanup_disks(); + + netdata_mutex_unlock(&diskstats_dev_mutex); // update the system total I/O if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && @@ -1971,77 +2511,5 @@ int do_proc_diskstats(int update_every, usec_t dt) { rrdset_done(st_io); } - // cleanup removed disks - - struct disk *d = disk_root, *last = NULL; - while(d) { - if(unlikely(global_cleanup_removed_disks && !d->updated)) { - struct disk *t = d; - - rrdset_obsolete_and_pointer_null(d->st_avgsz); - rrdset_obsolete_and_pointer_null(d->st_ext_avgsz); - rrdset_obsolete_and_pointer_null(d->st_await); - rrdset_obsolete_and_pointer_null(d->st_ext_await); - rrdset_obsolete_and_pointer_null(d->st_backlog); - rrdset_obsolete_and_pointer_null(d->st_busy); - rrdset_obsolete_and_pointer_null(d->st_io); - rrdset_obsolete_and_pointer_null(d->st_ext_io); - rrdset_obsolete_and_pointer_null(d->st_iotime); - rrdset_obsolete_and_pointer_null(d->st_ext_iotime); - rrdset_obsolete_and_pointer_null(d->st_mops); - rrdset_obsolete_and_pointer_null(d->st_ext_mops); - rrdset_obsolete_and_pointer_null(d->st_ops); - rrdset_obsolete_and_pointer_null(d->st_ext_ops); - rrdset_obsolete_and_pointer_null(d->st_qops); - rrdset_obsolete_and_pointer_null(d->st_svctm); - rrdset_obsolete_and_pointer_null(d->st_util); - rrdset_obsolete_and_pointer_null(d->st_bcache); - rrdset_obsolete_and_pointer_null(d->st_bcache_bypass); - rrdset_obsolete_and_pointer_null(d->st_bcache_rates); - rrdset_obsolete_and_pointer_null(d->st_bcache_size); - rrdset_obsolete_and_pointer_null(d->st_bcache_usage); - rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio); - rrdset_obsolete_and_pointer_null(d->st_bcache_cache_allocations); - rrdset_obsolete_and_pointer_null(d->st_bcache_cache_read_races); - - if(d == disk_root) { - disk_root = d = d->next; - last = NULL; - } - else if(last) { - last->next = d = d->next; - } - - freez(t->bcache_filename_dirty_data); - freez(t->bcache_filename_writeback_rate); - freez(t->bcache_filename_cache_congested); - freez(t->bcache_filename_cache_available_percent); - freez(t->bcache_filename_stats_five_minute_cache_hit_ratio); - freez(t->bcache_filename_stats_hour_cache_hit_ratio); - freez(t->bcache_filename_stats_day_cache_hit_ratio); - freez(t->bcache_filename_stats_total_cache_hit_ratio); - freez(t->bcache_filename_stats_total_cache_hits); - freez(t->bcache_filename_stats_total_cache_misses); - freez(t->bcache_filename_stats_total_cache_miss_collisions); - freez(t->bcache_filename_stats_total_cache_bypass_hits); - freez(t->bcache_filename_stats_total_cache_bypass_misses); - freez(t->bcache_filename_stats_total_cache_readaheads); - freez(t->bcache_filename_cache_read_races); - freez(t->bcache_filename_cache_io_errors); - freez(t->bcache_filename_priority_stats); - - freez(t->disk); - freez(t->device); - freez(t->mount_point); - freez(t->chart_id); - freez(t); - } - else { - d->updated = 0; - last = d; - d = d->next; - } - } - return 0; } diff --git a/collectors/proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c index 9a20700a3db611..37071b22f842fd 100644 --- a/collectors/proc.plugin/proc_interrupts.c +++ b/collectors/proc.plugin/proc_interrupts.c @@ -201,10 +201,10 @@ int do_proc_interrupts(int update_every, usec_t dt) { for(c = 0; c < cpus ;c++) { if(unlikely(!core_st[c])) { char id[50+1]; - snprintfz(id, 50, "cpu%d_interrupts", c); + snprintfz(id, sizeof(id) - 1, "cpu%d_interrupts", c); char title[100+1]; - snprintfz(title, 100, "CPU Interrupts"); + snprintfz(title, sizeof(title) - 1, "CPU Interrupts"); core_st[c] = rrdset_create_localhost( "cpu" , id @@ -221,7 +221,7 @@ int do_proc_interrupts(int update_every, usec_t dt) { ); char core[50+1]; - snprintfz(core, 50, "cpu%d", c); + snprintfz(core, sizeof(core) - 1, "cpu%d", c); rrdlabels_add(core_st[c]->rrdlabels, "cpu", core, RRDLABEL_SRC_AUTO); } diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c index c3d1793cbeeb1a..3857d9ec4f5102 100644 --- a/collectors/proc.plugin/proc_mdstat.c +++ b/collectors/proc.plugin/proc_mdstat.c @@ -70,10 +70,10 @@ static inline void make_chart_obsolete(char *name, const char *id_modifier) RRDSET *st = NULL; if (likely(name && id_modifier)) { - snprintfz(id, 50, "mdstat.%s_%s", name, id_modifier); + snprintfz(id, sizeof(id) - 1, "mdstat.%s_%s", name, id_modifier); st = rrdset_find_active_byname_localhost(id); if (likely(st)) - rrdset_is_obsolete(st); + rrdset_is_obsolete___safe_from_collector_thread(st); } } @@ -409,7 +409,7 @@ int do_proc_mdstat(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_isnot_obsolete(st_mdstat_health); + rrdset_isnot_obsolete___safe_from_collector_thread(st_mdstat_health); } if (!redundant_num) { @@ -438,10 +438,10 @@ int do_proc_mdstat(int update_every, usec_t dt) if (likely(raid->redundant)) { if (likely(do_disks)) { - snprintfz(id, 50, "%s_disks", raid->name); + snprintfz(id, sizeof(id) - 1, "%s_disks", raid->name); if (unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_active_byname_localhost(id)))) { - snprintfz(family, 50, "%s (%s)", raid->name, raid->level); + snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level); raid->st_disks = rrdset_create_localhost( "mdstat", @@ -457,7 +457,7 @@ int do_proc_mdstat(int update_every, usec_t dt) update_every, RRDSET_TYPE_STACKED); - rrdset_isnot_obsolete(raid->st_disks); + rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_disks); add_labels_to_mdstat(raid, raid->st_disks); } @@ -473,10 +473,10 @@ int do_proc_mdstat(int update_every, usec_t dt) } if (likely(do_mismatch)) { - snprintfz(id, 50, "%s_mismatch", raid->name); + snprintfz(id, sizeof(id) - 1, "%s_mismatch", raid->name); if (unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_active_byname_localhost(id)))) { - snprintfz(family, 50, "%s (%s)", raid->name, raid->level); + snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level); raid->st_mismatch_cnt = rrdset_create_localhost( "mdstat", @@ -492,7 +492,7 @@ int do_proc_mdstat(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_isnot_obsolete(raid->st_mismatch_cnt); + rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_mismatch_cnt); add_labels_to_mdstat(raid, raid->st_mismatch_cnt); } @@ -505,10 +505,10 @@ int do_proc_mdstat(int update_every, usec_t dt) } if (likely(do_operations)) { - snprintfz(id, 50, "%s_operation", raid->name); + snprintfz(id, sizeof(id) - 1, "%s_operation", raid->name); if (unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_active_byname_localhost(id)))) { - snprintfz(family, 50, "%s (%s)", raid->name, raid->level); + snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level); raid->st_operation = rrdset_create_localhost( "mdstat", @@ -524,7 +524,7 @@ int do_proc_mdstat(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_isnot_obsolete(raid->st_operation); + rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_operation); add_labels_to_mdstat(raid, raid->st_operation); } @@ -544,9 +544,9 @@ int do_proc_mdstat(int update_every, usec_t dt) rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape); rrdset_done(raid->st_operation); - snprintfz(id, 50, "%s_finish", raid->name); + snprintfz(id, sizeof(id) - 1, "%s_finish", raid->name); if (unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_active_byname_localhost(id)))) { - snprintfz(family, 50, "%s (%s)", raid->name, raid->level); + snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level); raid->st_finish = rrdset_create_localhost( "mdstat", @@ -561,7 +561,7 @@ int do_proc_mdstat(int update_every, usec_t dt) NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10, update_every, RRDSET_TYPE_LINE); - rrdset_isnot_obsolete(raid->st_finish); + rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_finish); add_labels_to_mdstat(raid, raid->st_finish); } @@ -572,9 +572,9 @@ int do_proc_mdstat(int update_every, usec_t dt) rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in); rrdset_done(raid->st_finish); - snprintfz(id, 50, "%s_speed", raid->name); + snprintfz(id, sizeof(id) - 1, "%s_speed", raid->name); if (unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_active_byname_localhost(id)))) { - snprintfz(family, 50, "%s (%s)", raid->name, raid->level); + snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level); raid->st_speed = rrdset_create_localhost( "mdstat", @@ -590,7 +590,7 @@ int do_proc_mdstat(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_isnot_obsolete(raid->st_speed); + rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_speed); add_labels_to_mdstat(raid, raid->st_speed); } @@ -603,10 +603,10 @@ int do_proc_mdstat(int update_every, usec_t dt) } } else { if (likely(do_nonredundant)) { - snprintfz(id, 50, "%s_availability", raid->name); + snprintfz(id, sizeof(id) - 1, "%s_availability", raid->name); if (unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_active_localhost(id)))) { - snprintfz(family, 50, "%s (%s)", raid->name, raid->level); + snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level); raid->st_nonredundant = rrdset_create_localhost( "mdstat", @@ -622,7 +622,7 @@ int do_proc_mdstat(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_isnot_obsolete(raid->st_nonredundant); + rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_nonredundant); add_labels_to_mdstat(raid, raid->st_nonredundant); } diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c index 88863f48ee48ef..b39f396833dcff 100644 --- a/collectors/proc.plugin/proc_net_dev.c +++ b/collectors/proc.plugin/proc_net_dev.c @@ -5,16 +5,35 @@ #define PLUGIN_PROC_MODULE_NETDEV_NAME "/proc/net/dev" #define CONFIG_SECTION_PLUGIN_PROC_NETDEV "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETDEV_NAME +#define RRDFUNCTIONS_NETDEV_HELP "View network interface statistics" + #define STATE_LENGTH_MAX 32 #define READ_RETRY_PERIOD 60 // seconds +void cgroup_netdev_reset_all(void); +void cgroup_netdev_release(const DICTIONARY_ITEM *link); +const void *cgroup_netdev_dup(const DICTIONARY_ITEM *link); +void cgroup_netdev_add_bandwidth(const DICTIONARY_ITEM *link, NETDATA_DOUBLE received, NETDATA_DOUBLE sent); + enum { NETDEV_DUPLEX_UNKNOWN, NETDEV_DUPLEX_HALF, NETDEV_DUPLEX_FULL }; +static const char *get_duplex_string(int duplex) +{ + switch (duplex) { + case NETDEV_DUPLEX_FULL: + return "full"; + case NETDEV_DUPLEX_HALF: + return "half"; + default: + return "unknown"; + } +} + enum { NETDEV_OPERSTATE_UNKNOWN, NETDEV_OPERSTATE_NOTPRESENT, @@ -44,6 +63,26 @@ static inline int get_operstate(char *operstate) return NETDEV_OPERSTATE_UNKNOWN; } +static const char *get_operstate_string(int operstate) +{ + switch (operstate) { + case NETDEV_OPERSTATE_UP: + return "up"; + case NETDEV_OPERSTATE_DOWN: + return "down"; + case NETDEV_OPERSTATE_NOTPRESENT: + return "notpresent"; + case NETDEV_OPERSTATE_LOWERLAYERDOWN: + return "lowerlayerdown"; + case NETDEV_OPERSTATE_TESTING: + return "testing"; + case NETDEV_OPERSTATE_DORMANT: + return "dormant"; + default: + return "unknown"; + } +} + // ---------------------------------------------------------------------------- // netdev list @@ -58,6 +97,8 @@ static struct netdev { int enabled; int updated; + bool function_ready; + time_t discover_time; int carrier_file_exists; @@ -123,7 +164,7 @@ static struct netdev { const char *chart_family; - DICTIONARY *chart_labels; + RRDLABELS *chart_labels; int flipped; unsigned long priority; @@ -208,6 +249,8 @@ static struct netdev { char *filename_carrier; char *filename_mtu; + const DICTIONARY_ITEM *cgroup_netdev_link; + struct netdev *next; } *netdev_root = NULL, *netdev_last_used = NULL; @@ -216,18 +259,18 @@ static size_t netdev_added = 0, netdev_found = 0; // ---------------------------------------------------------------------------- static void netdev_charts_release(struct netdev *d) { - if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth); - if(d->st_packets) rrdset_is_obsolete(d->st_packets); - if(d->st_errors) rrdset_is_obsolete(d->st_errors); - if(d->st_drops) rrdset_is_obsolete(d->st_drops); - if(d->st_fifo) rrdset_is_obsolete(d->st_fifo); - if(d->st_compressed) rrdset_is_obsolete(d->st_compressed); - if(d->st_events) rrdset_is_obsolete(d->st_events); - if(d->st_speed) rrdset_is_obsolete(d->st_speed); - if(d->st_duplex) rrdset_is_obsolete(d->st_duplex); - if(d->st_operstate) rrdset_is_obsolete(d->st_operstate); - if(d->st_carrier) rrdset_is_obsolete(d->st_carrier); - if(d->st_mtu) rrdset_is_obsolete(d->st_mtu); + if(d->st_bandwidth) rrdset_is_obsolete___safe_from_collector_thread(d->st_bandwidth); + if(d->st_packets) rrdset_is_obsolete___safe_from_collector_thread(d->st_packets); + if(d->st_errors) rrdset_is_obsolete___safe_from_collector_thread(d->st_errors); + if(d->st_drops) rrdset_is_obsolete___safe_from_collector_thread(d->st_drops); + if(d->st_fifo) rrdset_is_obsolete___safe_from_collector_thread(d->st_fifo); + if(d->st_compressed) rrdset_is_obsolete___safe_from_collector_thread(d->st_compressed); + if(d->st_events) rrdset_is_obsolete___safe_from_collector_thread(d->st_events); + if(d->st_speed) rrdset_is_obsolete___safe_from_collector_thread(d->st_speed); + if(d->st_duplex) rrdset_is_obsolete___safe_from_collector_thread(d->st_duplex); + if(d->st_operstate) rrdset_is_obsolete___safe_from_collector_thread(d->st_operstate); + if(d->st_carrier) rrdset_is_obsolete___safe_from_collector_thread(d->st_carrier); + if(d->st_mtu) rrdset_is_obsolete___safe_from_collector_thread(d->st_mtu); d->st_bandwidth = NULL; d->st_compressed = NULL; @@ -326,6 +369,7 @@ static void netdev_free(struct netdev *d) { netdev_charts_release(d); netdev_free_chart_strings(d); rrdlabels_destroy(d->chart_labels); + cgroup_netdev_release(d->cgroup_netdev_link); freez((void *)d->name); freez((void *)d->filename_speed); @@ -348,15 +392,18 @@ static struct netdev_rename { const char *container_name; const char *ctx_prefix; - DICTIONARY *chart_labels; + RRDLABELS *chart_labels; int processed; + const DICTIONARY_ITEM *cgroup_netdev_link; + struct netdev_rename *next; } *netdev_rename_root = NULL; static int netdev_pending_renames = 0; static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER; +static netdata_mutex_t netdev_dev_mutex = NETDATA_MUTEX_INITIALIZER; static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) { struct netdev_rename *r; @@ -373,8 +420,9 @@ void netdev_rename_device_add( const char *host_device, const char *container_device, const char *container_name, - DICTIONARY *labels, - const char *ctx_prefix) + RRDLABELS *labels, + const char *ctx_prefix, + const DICTIONARY_ITEM *cgroup_netdev_link) { netdata_mutex_lock(&netdev_rename_mutex); @@ -391,6 +439,8 @@ void netdev_rename_device_add( r->hash = hash; r->next = netdev_rename_root; r->processed = 0; + r->cgroup_netdev_link = cgroup_netdev_link; + netdev_rename_root = r; netdev_pending_renames++; collector_info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); @@ -406,6 +456,8 @@ void netdev_rename_device_add( rrdlabels_migrate_to_these(r->chart_labels, labels); r->processed = 0; + r->cgroup_netdev_link = cgroup_netdev_link; + netdev_pending_renames++; collector_info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); } @@ -438,6 +490,7 @@ void netdev_rename_device_del(const char *host_device) { freez((void *) r->container_device); freez((void *) r->ctx_prefix); rrdlabels_destroy(r->chart_labels); + cgroup_netdev_release(r->cgroup_netdev_link); freez((void *) r); break; } @@ -451,6 +504,7 @@ static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename * netdev_charts_release(d); netdev_free_chart_strings(d); + d->cgroup_netdev_link = cgroup_netdev_dup(r->cgroup_netdev_link); char buffer[RRD_ID_LENGTH_MAX + 1]; @@ -521,6 +575,7 @@ static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename * d->chart_family = strdupz("net"); rrdlabels_copy(d->chart_labels, r->chart_labels); + rrdlabels_add(d->chart_labels, "container_device", r->container_device, RRDLABEL_SRC_AUTO); d->priority = NETDATA_CHART_PRIO_CGROUP_NET_IFACE; d->flipped = 1; @@ -554,6 +609,319 @@ static inline void netdev_rename_all_lock(void) { } // ---------------------------------------------------------------------------- + +int netdev_function_net_interfaces(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused, + void *collector_data __maybe_unused, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused, + void *register_canceller_cb_data __maybe_unused) { + + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost)); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_NETDEV_HELP); + buffer_json_member_add_array(wb, "data"); + + double max_traffic_rx = 0.0; + double max_traffic_tx = 0.0; + double max_traffic = 0.0; + double max_packets_rx = 0.0; + double max_packets_tx = 0.0; + double max_mcast_rx = 0.0; + double max_drops_rx = 0.0; + double max_drops_tx = 0.0; + + netdata_mutex_lock(&netdev_dev_mutex); + + RRDDIM *rd = NULL; + + for (struct netdev *d = netdev_root; d != netdev_last_used; d = d->next) { + if (unlikely(!d->function_ready)) + continue; + + buffer_json_add_array_item_array(wb); + + buffer_json_add_array_item_string(wb, d->name); + + buffer_json_add_array_item_string(wb, d->virtual ? "virtual" : "physical"); + buffer_json_add_array_item_string(wb, d->flipped ? "cgroup" : "host"); + buffer_json_add_array_item_string(wb, d->carrier == 1 ? "up" : "down"); + buffer_json_add_array_item_string(wb, get_operstate_string(d->operstate)); + buffer_json_add_array_item_string(wb, get_duplex_string(d->duplex)); + buffer_json_add_array_item_double(wb, d->speed > 0 ? d->speed : NAN); + buffer_json_add_array_item_double(wb, d->mtu > 0 ? d->mtu : NAN); + + rd = d->flipped ? d->rd_tbytes : d->rd_rbytes; + double traffic_rx = rrddim_get_last_stored_value(rd, &max_traffic_rx, 1000.0); + rd = d->flipped ? d->rd_rbytes : d->rd_tbytes; + double traffic_tx = rrddim_get_last_stored_value(rd, &max_traffic_tx, 1000.0); + + rd = d->flipped ? d->rd_tpackets : d->rd_rpackets; + double packets_rx = rrddim_get_last_stored_value(rd, &max_packets_rx, 1000.0); + rd = d->flipped ? d->rd_rpackets : d->rd_tpackets; + double packets_tx = rrddim_get_last_stored_value(rd, &max_packets_tx, 1000.0); + + double mcast_rx = rrddim_get_last_stored_value(d->rd_rmulticast, &max_mcast_rx, 1000.0); + + rd = d->flipped ? d->rd_tdrops : d->rd_rdrops; + double drops_rx = rrddim_get_last_stored_value(rd, &max_drops_rx, 1.0); + rd = d->flipped ? d->rd_rdrops : d->rd_tdrops; + double drops_tx = rrddim_get_last_stored_value(rd, &max_drops_tx, 1.0); + + // FIXME: "traffic" (total) is needed only for default_sorting + // can be removed when default_sorting will accept multiple columns (sum) + double traffic = NAN; + if (!isnan(traffic_rx) && !isnan(traffic_tx)) { + traffic = traffic_rx + traffic_tx; + max_traffic = MAX(max_traffic, traffic); + } + + + buffer_json_add_array_item_double(wb, traffic_rx); + buffer_json_add_array_item_double(wb, traffic_tx); + buffer_json_add_array_item_double(wb, traffic); + buffer_json_add_array_item_double(wb, packets_rx); + buffer_json_add_array_item_double(wb, packets_tx); + buffer_json_add_array_item_double(wb, mcast_rx); + buffer_json_add_array_item_double(wb, drops_rx); + buffer_json_add_array_item_double(wb, drops_tx); + + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "severity", drops_rx + drops_tx > 0 ? "warning" : "normal"); + } + buffer_json_object_close(wb); + + buffer_json_array_close(wb); + } + + netdata_mutex_unlock(&netdev_dev_mutex); + + buffer_json_array_close(wb); // data + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + buffer_rrdf_table_add_field(wb, field_id++, "Interface", "Network Interface Name", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Type", "Network Interface Type", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "UsedBy", "Indicates whether the network interface is used by a cgroup or by the host system", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "PhState", "Current Physical State", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OpState", "Current Operational State", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Duplex", "Current Duplex Mode", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Speed", "Current Link Speed", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "Mbit", NAN, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "MTU", "Maximum Transmission Unit", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "Octets", NAN, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_UNIQUE_KEY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "In", "Traffic Received", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Mbit", max_traffic_rx, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Out", "Traffic Sent", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Mbit", max_traffic_tx, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "Total", "Traffic Received and Sent", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Mbit", max_traffic, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "PktsIn", "Received Packets", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Kpps", max_packets_rx, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "PktsOut", "Sent Packets", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Kpps", max_packets_tx, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "McastIn", "Multicast Received Packets", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Kpps", max_mcast_rx, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "DropsIn", "Dropped Inbound Packets", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Drops", max_drops_rx, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "DropsOut", "Dropped Outbound Packets", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 2, "Drops", max_drops_tx, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE, + NULL); + + buffer_rrdf_table_add_field( + wb, field_id++, + "rowOptions", "rowOptions", + RRDF_FIELD_TYPE_NONE, + RRDR_FIELD_VISUAL_ROW_OPTIONS, + RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_FIXED, + NULL, + RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_DUMMY, + NULL); + } + + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "Total"); + + buffer_json_member_add_object(wb, "charts"); + { + buffer_json_member_add_object(wb, "Traffic"); + { + buffer_json_member_add_string(wb, "name", "Traffic"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "In"); + buffer_json_add_array_item_string(wb, "Out"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "Packets"); + { + buffer_json_member_add_string(wb, "name", "Packets"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "PktsIn"); + buffer_json_add_array_item_string(wb, "PktsOut"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Traffic"); + buffer_json_add_array_item_string(wb, "Interface"); + buffer_json_array_close(wb); + + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "Traffic"); + buffer_json_add_array_item_string(wb, "Type"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_object(wb, "group_by"); + { + buffer_json_member_add_object(wb, "Type"); + { + buffer_json_member_add_string(wb, "name", "Type"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Type"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "UsedBy"); + { + buffer_json_member_add_string(wb, "name", "UsedBy"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "UsedBy"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // group_by + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + int response = HTTP_RESP_OK; + if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) { + buffer_flush(wb); + response = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + + if(result_cb) + result_cb(wb, response, result_cb_data); + + return response; +} + // netdev data collection static void netdev_cleanup() { @@ -615,6 +983,7 @@ static struct netdev *get_netdev(const char *name) { d->hash = simple_hash(d->name); d->len = strlen(d->name); d->chart_labels = rrdlabels_create(); + d->function_ready = false; d->chart_type_net_bytes = strdupz("net"); d->chart_type_net_compressed = strdupz("net_compressed"); @@ -778,63 +1147,89 @@ int do_proc_net_dev(int update_every, usec_t dt) { if(d->enabled) d->enabled = !simple_pattern_matches(disabled_list, d->name); -#ifdef NETDATA_SKIP_IF_NOT_COLLECT - if(unlikely(!d->enabled)) { - netdata_log_debug(D_COLLECTOR, "PLUGIN: proc_net_dev: Skipping interface '%s' disabled by configuration", d->name); - continue; - } -#endif - char buffer[FILENAME_MAX + 1]; + char buf[FILENAME_MAX + 1]; + snprintfz(buf, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name); - snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name); - if (likely(access(buffer, R_OK) == 0)) { - d->virtual = 1; - rrdlabels_add(d->chart_labels, "interface_type", "virtual", RRDLABEL_SRC_AUTO|RRDLABEL_FLAG_PERMANENT); - } - else { + d->virtual = likely(access(buf, R_OK) == 0) ? 1 : 0; + + // At least on Proxmox inside LXC: eth0 is virtual. + // Virtual interfaces are not taken into account in system.net calculations + if (inside_lxc_container && d->virtual && strncmp(d->name, "eth", 3) == 0) d->virtual = 0; - rrdlabels_add(d->chart_labels, "interface_type", "real", RRDLABEL_SRC_AUTO|RRDLABEL_FLAG_PERMANENT); - } - rrdlabels_add(d->chart_labels, "device", name, RRDLABEL_SRC_AUTO|RRDLABEL_FLAG_PERMANENT); + + if (d->virtual) + rrdlabels_add(d->chart_labels, "interface_type", "virtual", RRDLABEL_SRC_AUTO); + else + rrdlabels_add(d->chart_labels, "interface_type", "real", RRDLABEL_SRC_AUTO); + + rrdlabels_add(d->chart_labels, "device", name, RRDLABEL_SRC_AUTO); if(likely(!d->virtual)) { // set the filename to get the interface speed - snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_speed, d->name); - d->filename_speed = strdupz(buffer); + snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_speed, d->name); + d->filename_speed = strdupz(buf); - snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_duplex, d->name); - d->filename_duplex = strdupz(buffer); + snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_duplex, d->name); + d->filename_duplex = strdupz(buf); } - snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_operstate, d->name); - d->filename_operstate = strdupz(buffer); + snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_operstate, d->name); + d->filename_operstate = strdupz(buf); - snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_carrier, d->name); - d->filename_carrier = strdupz(buffer); + snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_carrier, d->name); + d->filename_carrier = strdupz(buf); - snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_mtu, d->name); - d->filename_mtu = strdupz(buffer); + snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_mtu, d->name); + d->filename_mtu = strdupz(buf); - snprintfz(buffer, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name); - d->enabled = config_get_boolean_ondemand(buffer, "enabled", d->enabled); - d->virtual = config_get_boolean(buffer, "virtual", d->virtual); + snprintfz(buf, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name); + + if (config_exists(buf, "enabled")) + d->enabled = config_get_boolean_ondemand(buf, "enabled", d->enabled); + if (config_exists(buf, "virtual")) + d->virtual = config_get_boolean(buf, "virtual", d->virtual); if(d->enabled == CONFIG_BOOLEAN_NO) continue; - d->do_bandwidth = config_get_boolean_ondemand(buffer, "bandwidth", do_bandwidth); - d->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets); - d->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors); - d->do_drops = config_get_boolean_ondemand(buffer, "drops", do_drops); - d->do_fifo = config_get_boolean_ondemand(buffer, "fifo", do_fifo); - d->do_compressed = config_get_boolean_ondemand(buffer, "compressed", do_compressed); - d->do_events = config_get_boolean_ondemand(buffer, "events", do_events); - d->do_speed = config_get_boolean_ondemand(buffer, "speed", do_speed); - d->do_duplex = config_get_boolean_ondemand(buffer, "duplex", do_duplex); - d->do_operstate = config_get_boolean_ondemand(buffer, "operstate", do_operstate); - d->do_carrier = config_get_boolean_ondemand(buffer, "carrier", do_carrier); - d->do_mtu = config_get_boolean_ondemand(buffer, "mtu", do_mtu); + d->do_bandwidth = do_bandwidth; + d->do_packets = do_packets; + d->do_errors = do_errors; + d->do_drops = do_drops; + d->do_fifo = do_fifo; + d->do_compressed = do_compressed; + d->do_events = do_events; + d->do_speed = do_speed; + d->do_duplex = do_duplex; + d->do_operstate = do_operstate; + d->do_carrier = do_carrier; + d->do_mtu = do_mtu; + + if (config_exists(buf, "bandwidth")) + d->do_bandwidth = config_get_boolean_ondemand(buf, "bandwidth", do_bandwidth); + if (config_exists(buf, "packets")) + d->do_packets = config_get_boolean_ondemand(buf, "packets", do_packets); + if (config_exists(buf, "errors")) + d->do_errors = config_get_boolean_ondemand(buf, "errors", do_errors); + if (config_exists(buf, "drops")) + d->do_drops = config_get_boolean_ondemand(buf, "drops", do_drops); + if (config_exists(buf, "fifo")) + d->do_fifo = config_get_boolean_ondemand(buf, "fifo", do_fifo); + if (config_exists(buf, "compressed")) + d->do_compressed = config_get_boolean_ondemand(buf, "compressed", do_compressed); + if (config_exists(buf, "events")) + d->do_events = config_get_boolean_ondemand(buf, "events", do_events); + if (config_exists(buf, "speed")) + d->do_speed = config_get_boolean_ondemand(buf, "speed", do_speed); + if (config_exists(buf, "duplex")) + d->do_duplex = config_get_boolean_ondemand(buf, "duplex", do_duplex); + if (config_exists(buf, "operstate")) + d->do_operstate = config_get_boolean_ondemand(buf, "operstate", do_operstate); + if (config_exists(buf, "carrier")) + d->do_carrier = config_get_boolean_ondemand(buf, "carrier", do_carrier); + if (config_exists(buf, "mtu")) + d->do_mtu = config_get_boolean_ondemand(buf, "mtu", do_mtu); } if(unlikely(!d->enabled)) @@ -1014,6 +1409,11 @@ int do_proc_net_dev(int update_every, usec_t dt) { rrddim_set_by_pointer(d->st_bandwidth, d->rd_tbytes, (collected_number)d->tbytes); rrdset_done(d->st_bandwidth); + if(d->cgroup_netdev_link) + cgroup_netdev_add_bandwidth(d->cgroup_netdev_link, + d->flipped ? d->rd_tbytes->collector.last_stored_value : -d->rd_rbytes->collector.last_stored_value, + d->flipped ? -d->rd_rbytes->collector.last_stored_value : d->rd_tbytes->collector.last_stored_value); + // update the interface speed if(d->filename_speed) { if(unlikely(!d->chart_var_speed)) { @@ -1468,6 +1868,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { rrddim_set_by_pointer(d->st_events, d->rd_tcarrier, (collected_number)d->tcarrier); rrdset_done(d->st_events); } + + d->function_ready = true; } if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && @@ -1524,6 +1926,9 @@ void *netdev_main(void *ptr) netdata_thread_cleanup_push(netdev_main_cleanup, ptr); + rrd_collector_started(); + rrd_function_add(localhost, NULL, "network-interfaces", 10, RRDFUNCTIONS_NETDEV_HELP, true, netdev_function_net_interfaces, NULL); + usec_t step = localhost->rrd_update_every * USEC_PER_SEC; heartbeat_t hb; heartbeat_init(&hb); @@ -1535,11 +1940,17 @@ void *netdev_main(void *ptr) if (unlikely(!service_running(SERVICE_COLLECTORS))) break; + cgroup_netdev_reset_all(); + worker_is_busy(0); + + netdata_mutex_lock(&netdev_dev_mutex); if(do_proc_net_dev(localhost->rrd_update_every, hb_dt)) break; + netdata_mutex_unlock(&netdev_dev_mutex); } netdata_thread_cleanup_pop(1); + return NULL; } diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c index ce3068c0e5bc51..170daad5d980cb 100644 --- a/collectors/proc.plugin/proc_net_netstat.c +++ b/collectors/proc.plugin/proc_net_netstat.c @@ -2,9 +2,9 @@ #include "plugin_proc.h" -#define RRD_TYPE_NET_NETSTAT "ip" -#define RRD_TYPE_NET_SNMP "ipv4" -#define RRD_TYPE_NET_SNMP6 "ipv6" +#define RRD_TYPE_NET_IP "ip" +#define RRD_TYPE_NET_IP4 "ipv4" +#define RRD_TYPE_NET_IP6 "ipv6" #define PLUGIN_PROC_MODULE_NETSTAT_NAME "/proc/net/netstat" #define CONFIG_SECTION_PLUGIN_PROC_NETSTAT "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETSTAT_NAME @@ -424,7 +424,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "packets" , NULL , "packets" @@ -464,7 +464,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "fragsout" , NULL , "fragments6" @@ -506,7 +506,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "fragsin" , NULL , "fragments6" @@ -557,7 +557,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "errors" , NULL , "errors" @@ -605,7 +605,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "udppackets" , NULL , "udp6" @@ -647,7 +647,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "udperrors" , NULL , "udp6" @@ -689,7 +689,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "udplitepackets" , NULL , "udplite6" @@ -730,7 +730,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "udpliteerrors" , NULL , "udplite6" @@ -771,7 +771,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "mcast" , NULL , "multicast6" @@ -806,7 +806,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "bcast" , NULL , "broadcast6" @@ -841,7 +841,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "mcastpkts" , NULL , "multicast6" @@ -876,7 +876,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmp" , NULL , "icmp6" @@ -910,7 +910,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmpredir" , NULL , "icmp6" @@ -962,7 +962,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmperrors" , NULL , "icmp6" @@ -1018,7 +1018,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmpechos" , NULL , "icmp6" @@ -1064,7 +1064,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "groupmemb" , NULL , "icmp6" @@ -1109,7 +1109,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmprouter" , NULL , "icmp6" @@ -1151,7 +1151,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmpneighbor" , NULL , "icmp6" @@ -1189,7 +1189,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmpmldv2" , NULL , "icmp6" @@ -1239,7 +1239,7 @@ static void do_proc_net_snmp6(int update_every) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 + RRD_TYPE_NET_IP6 , "icmptypes" , NULL , "icmp6" @@ -1287,7 +1287,7 @@ static void do_proc_net_snmp6(int update_every) { if (unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6, + RRD_TYPE_NET_IP6, "ect", NULL, "packets", @@ -1852,11 +1852,11 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_system_ip)) { st_system_ip = rrdset_create_localhost( "system" - , RRD_TYPE_NET_NETSTAT + , "ip" // FIXME: this is ipv4. Not changing it because it will require to do changes in cloud-frontend too , NULL , "network" , NULL - , "IP Bandwidth" + , "IPv4 Bandwidth" , "kilobits/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME @@ -1874,43 +1874,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { rrdset_done(st_system_ip); } - if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && - (ipext_InNoRoutes || - ipext_InTruncatedPkts || - netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { - do_inerrors = CONFIG_BOOLEAN_YES; - static RRDSET *st_ip_inerrors = NULL; - static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL; - - if(unlikely(!st_ip_inerrors)) { - st_ip_inerrors = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT - , "inerrors" - , NULL - , "errors" - , NULL - , "IP Input Errors" - , "packets/s" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IP_ERRORS - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_ip_inerrors, RRDSET_FLAG_DETAIL); - - rd_noroutes = rrddim_add(st_ip_inerrors, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_truncated = rrddim_add(st_ip_inerrors, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_checksum = rrddim_add(st_ip_inerrors, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set_by_pointer(st_ip_inerrors, rd_noroutes, ipext_InNoRoutes); - rrddim_set_by_pointer(st_ip_inerrors, rd_truncated, ipext_InTruncatedPkts); - rrddim_set_by_pointer(st_ip_inerrors, rd_checksum, ipext_InCsumErrors); - rrdset_done(st_ip_inerrors); - } - if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (ipext_InMcastOctets || ipext_OutMcastOctets || @@ -1921,7 +1884,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_ip_mcast)) { st_ip_mcast = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP4 , "mcast" , NULL , "multicast" @@ -1930,7 +1893,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "kilobits/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IP_MCAST + , NETDATA_CHART_PRIO_IPV4_MCAST , update_every , RRDSET_TYPE_AREA ); @@ -1960,16 +1923,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_ip_bcast)) { st_ip_bcast = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP4 , "bcast" , NULL , "broadcast" , NULL - , "IP Broadcast Bandwidth" + , "IPv4 Broadcast Bandwidth" , "kilobits/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IP_BCAST + , NETDATA_CHART_PRIO_IPV4_BCAST , update_every , RRDSET_TYPE_AREA ); @@ -1999,16 +1962,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_ip_mcastpkts)) { st_ip_mcastpkts = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP4 , "mcastpkts" , NULL , "multicast" , NULL - , "IP Multicast Packets" + , "IPv4 Multicast Packets" , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IP_MCAST_PACKETS + , NETDATA_CHART_PRIO_IPV4_MCAST_PACKETS , update_every , RRDSET_TYPE_LINE ); @@ -2035,16 +1998,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_ip_bcastpkts)) { st_ip_bcastpkts = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP4 , "bcastpkts" , NULL , "broadcast" , NULL - , "IP Broadcast Packets" + , "IPv4 Broadcast Packets" , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IP_BCAST_PACKETS + , NETDATA_CHART_PRIO_IPV4_BCAST_PACKETS , update_every , RRDSET_TYPE_LINE ); @@ -2073,16 +2036,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_ecnpkts)) { st_ecnpkts = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP4 , "ecnpkts" , NULL , "ecn" , NULL - , "IP ECN Statistics" + , "IPv4 ECN Statistics" , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IP_ECN + , NETDATA_CHART_PRIO_IPV4_ECN , update_every , RRDSET_TYPE_LINE ); @@ -2114,7 +2077,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_tcpmemorypressures)) { st_tcpmemorypressures = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP , "tcpmemorypressures" , NULL , "tcp" @@ -2123,7 +2086,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "events/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IP_TCP_MEM + , NETDATA_CHART_PRIO_IP_TCP_MEM_PRESSURE , update_every , RRDSET_TYPE_LINE ); @@ -2150,7 +2113,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_tcpconnaborts)) { st_tcpconnaborts = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP , "tcpconnaborts" , NULL , "tcp" @@ -2194,7 +2157,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_tcpreorders)) { st_tcpreorders = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP , "tcpreorders" , NULL , "tcp" @@ -2236,7 +2199,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_ip_tcpofo)) { st_ip_tcpofo = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP , "tcpofo" , NULL , "tcp" @@ -2276,7 +2239,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_syncookies)) { st_syncookies = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP , "tcpsyncookies" , NULL , "tcp" @@ -2315,7 +2278,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_syn_queue)) { st_syn_queue = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP , "tcp_syn_queue" , NULL , "tcp" @@ -2351,7 +2314,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_accept_queue)) { st_accept_queue = rrdset_create_localhost( - RRD_TYPE_NET_NETSTAT + RRD_TYPE_NET_IP , "tcp_accept_queue" , NULL , "tcp" @@ -2392,7 +2355,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "packets" , NULL , "packets" @@ -2433,7 +2396,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "fragsout" , NULL , "fragments" @@ -2442,7 +2405,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_OUT , update_every , RRDSET_TYPE_LINE ); @@ -2473,7 +2436,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "fragsin" , NULL , "fragments" @@ -2482,7 +2445,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + 1 + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_IN , update_every , RRDSET_TYPE_LINE ); @@ -2513,13 +2476,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) { static RRDDIM *rd_InDiscards = NULL, *rd_OutDiscards = NULL, *rd_InHdrErrors = NULL, + *rd_InNoRoutes = NULL, *rd_OutNoRoutes = NULL, *rd_InAddrErrors = NULL, + *rd_InTruncatedPkts = NULL, + *rd_InCsumErrors = NULL, *rd_InUnknownProtos = NULL; if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "errors" , NULL , "errors" @@ -2537,11 +2503,14 @@ int do_proc_net_netstat(int update_every, usec_t dt) { rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); } rrddim_set_by_pointer(st, rd_InDiscards, (collected_number)snmp_root.ip_InDiscards); @@ -2549,7 +2518,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) { rrddim_set_by_pointer(st, rd_InHdrErrors, (collected_number)snmp_root.ip_InHdrErrors); rrddim_set_by_pointer(st, rd_InAddrErrors, (collected_number)snmp_root.ip_InAddrErrors); rrddim_set_by_pointer(st, rd_InUnknownProtos, (collected_number)snmp_root.ip_InUnknownProtos); + rrddim_set_by_pointer(st, rd_InNoRoutes, (collected_number)ipext_InNoRoutes); rrddim_set_by_pointer(st, rd_OutNoRoutes, (collected_number)snmp_root.ip_OutNoRoutes); + rrddim_set_by_pointer(st, rd_InTruncatedPkts, (collected_number)ipext_InTruncatedPkts); + rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)ipext_InCsumErrors); rrdset_done(st); } @@ -2571,7 +2543,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_packets)) { st_packets = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "icmp" , NULL , "icmp" @@ -2580,7 +2552,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_ICMP + , NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS , update_every , RRDSET_TYPE_LINE ); @@ -2602,7 +2574,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st_errors)) { st_errors = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "icmp_errors" , NULL , "icmp" @@ -2611,7 +2583,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_ICMP + 1 + , NETDATA_CHART_PRIO_IPV4_ICMP_ERRORS , update_every , RRDSET_TYPE_LINE ); @@ -2678,7 +2650,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "icmpmsg" , NULL , "icmp" @@ -2687,7 +2659,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_ICMP + 2 + , NETDATA_CHART_PRIO_IPV4_ICMP_MESSAGES , update_every , RRDSET_TYPE_LINE ); @@ -2754,16 +2726,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP , "tcpsock" , NULL , "tcp" , NULL - , "IPv4 TCP Connections" + , "TCP Connections" , "active connections" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_TCP + , NETDATA_CHART_PRIO_IP_TCP_ESTABLISHED_CONNS , update_every , RRDSET_TYPE_LINE ); @@ -2787,7 +2759,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP , "tcppackets" , NULL , "tcp" @@ -2796,7 +2768,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_TCP + 4 + , NETDATA_CHART_PRIO_IP_TCP_PACKETS , update_every , RRDSET_TYPE_LINE ); @@ -2826,7 +2798,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP , "tcperrors" , NULL , "tcp" @@ -2835,7 +2807,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_TCP + 20 + , NETDATA_CHART_PRIO_IP_TCP_ERRORS , update_every , RRDSET_TYPE_LINE ); @@ -2864,7 +2836,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP , "tcpopens" , NULL , "tcp" @@ -2873,7 +2845,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "connections/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_TCP + 5 + , NETDATA_CHART_PRIO_IP_TCP_OPENS , update_every , RRDSET_TYPE_LINE ); @@ -2903,7 +2875,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP , "tcphandshake" , NULL , "tcp" @@ -2912,7 +2884,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "events/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_TCP + 30 + , NETDATA_CHART_PRIO_IP_TCP_HANDSHAKE , update_every , RRDSET_TYPE_LINE ); @@ -2946,7 +2918,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "udppackets" , NULL , "udp" @@ -2955,7 +2927,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_UDP + , NETDATA_CHART_PRIO_IPV4_UDP_PACKETS , update_every , RRDSET_TYPE_LINE ); @@ -2991,7 +2963,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "udperrors" , NULL , "udp" @@ -3000,7 +2972,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "events/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_UDP + 10 + , NETDATA_CHART_PRIO_IPV4_UDP_ERRORS , update_every , RRDSET_TYPE_LINE ); @@ -3044,7 +3016,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "udplite" , NULL , "udplite" @@ -3053,7 +3025,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_UDPLITE + , NETDATA_CHART_PRIO_IPV4_UDPLITE_PACKETS , update_every , RRDSET_TYPE_LINE ); @@ -3078,7 +3050,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP + RRD_TYPE_NET_IP4 , "udplite_errors" , NULL , "udplite" @@ -3087,7 +3059,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_UDPLITE + 10 + , NETDATA_CHART_PRIO_IPV4_UDPLITE_ERRORS , update_every , RRDSET_TYPE_LINE); diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c index e94b891ca87e26..b0feab5fa4d377 100644 --- a/collectors/proc.plugin/proc_net_sockstat.c +++ b/collectors/proc.plugin/proc_net_sockstat.c @@ -228,16 +228,16 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { if(unlikely(!st)) { st = rrdset_create_localhost( - "ipv4" + "ip" , "sockstat_sockets" , NULL , "sockets" , NULL - , "IPv4 Sockets Used" + , "Sockets used for all address families" , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_SOCKETS + , NETDATA_CHART_PRIO_IP_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -272,7 +272,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , NULL , "tcp" , NULL - , "IPv4 TCP Sockets" + , "TCP Sockets" , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME @@ -310,11 +310,11 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , NULL , "tcp" , NULL - , "IPv4 TCP Sockets Memory" + , "TCP Sockets Memory" , "KiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_TCP_MEM + , NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS_MEM , update_every , RRDSET_TYPE_AREA ); @@ -347,7 +347,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_UDP + , NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -380,7 +380,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "KiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_UDP_MEM + , NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS_MEM , update_every , RRDSET_TYPE_AREA ); @@ -413,7 +413,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_UDPLITE + , NETDATA_CHART_PRIO_IPV4_UDPLITE_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -479,7 +479,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "fragments" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -512,7 +512,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "KiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME - , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS_MEM , update_every , RRDSET_TYPE_AREA ); diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c index 065cf605592e3b..16e0248af895b0 100644 --- a/collectors/proc.plugin/proc_net_sockstat6.c +++ b/collectors/proc.plugin/proc_net_sockstat6.c @@ -130,7 +130,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME - , NETDATA_CHART_PRIO_IPV6_TCP + , NETDATA_CHART_PRIO_IPV6_TCP_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -163,7 +163,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME - , NETDATA_CHART_PRIO_IPV6_UDP + , NETDATA_CHART_PRIO_IPV6_UDP_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -196,7 +196,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME - , NETDATA_CHART_PRIO_IPV6_UDPLITE + , NETDATA_CHART_PRIO_IPV6_UDPLITE_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -229,7 +229,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { , "sockets" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME - , NETDATA_CHART_PRIO_IPV6_RAW + , NETDATA_CHART_PRIO_IPV6_RAW_SOCKETS , update_every , RRDSET_TYPE_LINE ); @@ -262,7 +262,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) { , "fragments" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME - , NETDATA_CHART_PRIO_IPV6_FRAGMENTS + , NETDATA_CHART_PRIO_IPV6_FRAGMENTS_SOCKETS , update_every , RRDSET_TYPE_LINE ); diff --git a/collectors/proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c index dfd372b2a9553e..2f01b8859815ac 100644 --- a/collectors/proc.plugin/proc_net_softnet_stat.c +++ b/collectors/proc.plugin/proc_net_softnet_stat.c @@ -111,12 +111,12 @@ int do_proc_net_softnet_stat(int update_every, usec_t dt) { if(do_per_core) { for(l = 0; l < lines ;l++) { char id[50+1]; - snprintfz(id, 50, "cpu%zu_softnet_stat", l); + snprintfz(id, sizeof(id) - 1,"cpu%zu_softnet_stat", l); st = rrdset_find_active_bytype_localhost("cpu", id); if(unlikely(!st)) { char title[100+1]; - snprintfz(title, 100, "CPU softnet_stat"); + snprintfz(title, sizeof(title) - 1, "CPU softnet_stat"); st = rrdset_create_localhost( "cpu" diff --git a/collectors/proc.plugin/proc_net_wireless.c b/collectors/proc.plugin/proc_net_wireless.c index 08ab2eada186ef..c7efa33350a4cb 100644 --- a/collectors/proc.plugin/proc_net_wireless.c +++ b/collectors/proc.plugin/proc_net_wireless.c @@ -85,12 +85,13 @@ static struct netwireless { static void netwireless_free_st(struct netwireless *wireless_dev) { - if (wireless_dev->st_status) rrdset_is_obsolete(wireless_dev->st_status); - if (wireless_dev->st_link) rrdset_is_obsolete(wireless_dev->st_link); - if (wireless_dev->st_level) rrdset_is_obsolete(wireless_dev->st_level); - if (wireless_dev->st_noise) rrdset_is_obsolete(wireless_dev->st_noise); - if (wireless_dev->st_discarded_packets) rrdset_is_obsolete(wireless_dev->st_discarded_packets); - if (wireless_dev->st_missed_beacon) rrdset_is_obsolete(wireless_dev->st_missed_beacon); + if (wireless_dev->st_status) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_status); + if (wireless_dev->st_link) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_link); + if (wireless_dev->st_level) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_level); + if (wireless_dev->st_noise) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_noise); + if (wireless_dev->st_discarded_packets) + rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_discarded_packets); + if (wireless_dev->st_missed_beacon) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_missed_beacon); wireless_dev->st_status = NULL; wireless_dev->st_link = NULL; diff --git a/collectors/proc.plugin/proc_pagetypeinfo.c b/collectors/proc.plugin/proc_pagetypeinfo.c index e5318ce8fb95b4..fc5496c630a73e 100644 --- a/collectors/proc.plugin/proc_pagetypeinfo.c +++ b/collectors/proc.plugin/proc_pagetypeinfo.c @@ -211,7 +211,7 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) { ); for (o = 0; o < pageorders_cnt; o++) { char id[3+1]; - snprintfz(id, 3, "%lu", o); + snprintfz(id, sizeof(id) - 1, "%lu", o); char name[20+1]; dim_name(name, o, pagesize); @@ -234,7 +234,7 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) { // "pagetype Node" + NUMA-NodeId + ZoneName + TypeName char setid[13+1+2+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME+1]; - snprintfz(setid, 13+1+2+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME, "pagetype_Node%d_%s_%s", pgl->node, pgl->zone, pgl->type); + snprintfz(setid, sizeof(setid) - 1, "pagetype_Node%d_%s_%s", pgl->node, pgl->zone, pgl->type); // Skip explicitly refused charts if (simple_pattern_matches(filter_types, setid)) @@ -260,14 +260,14 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) { ); char node[50+1]; - snprintfz(node, 50, "node%d", pgl->node); + snprintfz(node, sizeof(node) - 1, "node%d", pgl->node); rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_id", node, RRDLABEL_SRC_AUTO); rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_zone", pgl->zone, RRDLABEL_SRC_AUTO); rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_type", pgl->type, RRDLABEL_SRC_AUTO); for (o = 0; o < pageorders_cnt; o++) { char dimid[3+1]; - snprintfz(dimid, 3, "%lu", o); + snprintfz(dimid, sizeof(dimid) - 1, "%lu", o); char dimname[20+1]; dim_name(dimname, o, pagesize); diff --git a/collectors/proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c index ccf46cb8aa6471..5f0502f66598d9 100644 --- a/collectors/proc.plugin/proc_softirqs.c +++ b/collectors/proc.plugin/proc_softirqs.c @@ -197,10 +197,10 @@ int do_proc_softirqs(int update_every, usec_t dt) { if (unlikely(core_sum == 0)) continue; // try next core char id[50 + 1]; - snprintfz(id, 50, "cpu%d_softirqs", c); + snprintfz(id, sizeof(id) - 1, "cpu%d_softirqs", c); char title[100 + 1]; - snprintfz(title, 100, "CPU softirqs"); + snprintfz(title, sizeof(title) - 1, "CPU softirqs"); core_st[c] = rrdset_create_localhost( "cpu" @@ -218,7 +218,7 @@ int do_proc_softirqs(int update_every, usec_t dt) { ); char core[50+1]; - snprintfz(core, 50, "cpu%d", c); + snprintfz(core, sizeof(core) - 1, "cpu%d", c); rrdlabels_add(core_st[c]->rrdlabels, "cpu", core, RRDLABEL_SRC_AUTO); } diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c index 428ef0d3299c81..27178b60fe0f48 100644 --- a/collectors/proc.plugin/proc_spl_kstat_zfs.c +++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -240,7 +240,7 @@ DICTIONARY *zfs_pools = NULL; void disable_zfs_pool_state(struct zfs_pool *pool) { if (pool->st) - rrdset_is_obsolete(pool->st); + rrdset_is_obsolete___safe_from_collector_thread(pool->st); pool->st = NULL; @@ -335,7 +335,10 @@ int do_proc_spl_kstat_zfs_pool_state(int update_every, usec_t dt) if (likely(do_zfs_pool_state)) { DIR *dir = opendir(dirname); if (unlikely(!dir)) { - collector_error("Cannot read directory '%s'", dirname); + if (errno == ENOENT) + collector_info("Cannot read directory '%s'", dirname); + else + collector_error("Cannot read directory '%s'", dirname); return 1; } diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c index a4f76796b60412..84160f22f78c6c 100644 --- a/collectors/proc.plugin/proc_stat.c +++ b/collectors/proc.plugin/proc_stat.c @@ -1038,7 +1038,7 @@ int do_proc_stat(int update_every, usec_t dt) { ); char corebuf[50+1]; - snprintfz(corebuf, 50, "cpu%zu", core); + snprintfz(corebuf, sizeof(corebuf) - 1, "cpu%zu", core); rrdlabels_add(cpuidle_charts[core].st->rrdlabels, "cpu", corebuf, RRDLABEL_SRC_AUTO); char cpuidle_dim_id[RRD_ID_LENGTH_MAX + 1]; diff --git a/collectors/proc.plugin/sys_block_zram.c b/collectors/proc.plugin/sys_block_zram.c index f9166ace00b3c8..dac7cac0f4e6e2 100644 --- a/collectors/proc.plugin/sys_block_zram.c +++ b/collectors/proc.plugin/sys_block_zram.c @@ -3,7 +3,7 @@ #include "plugin_proc.h" #define PLUGIN_PROC_MODULE_ZRAM_NAME "/sys/block/zram" -#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st) +#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st) typedef struct mm_stat { unsigned long long orig_data_size; diff --git a/collectors/proc.plugin/sys_class_drm.c b/collectors/proc.plugin/sys_class_drm.c index 284662cf65e593..3ed1fb8757b56e 100644 --- a/collectors/proc.plugin/sys_class_drm.c +++ b/collectors/proc.plugin/sys_class_drm.c @@ -648,13 +648,17 @@ static int read_clk_freq_file(procfile **p_ff, const char *const pathname, colle *p_ff = procfile_open(pathname, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); if(unlikely(!*p_ff)) return -2; } - + if(unlikely(NULL == (*p_ff = procfile_readall(*p_ff)))) return -3; for(size_t l = 0; l < procfile_lines(*p_ff) ; l++) { + char *str_with_units = NULL; + if((*p_ff)->lines->lines[l].words >= 3 && !strcmp(procfile_lineword((*p_ff), l, 2), "*")) //format: X: collected_number * + str_with_units = procfile_lineword((*p_ff), l, 1); + else if ((*p_ff)->lines->lines[l].words == 2 && !strcmp(procfile_lineword((*p_ff), l, 1), "*")) //format: collected_number * + str_with_units = procfile_lineword((*p_ff), l, 0); - if((*p_ff)->lines->lines[l].words >= 3 && !strcmp(procfile_lineword((*p_ff), l, 2), "*")){ - char *str_with_units = procfile_lineword((*p_ff), l, 1); + if (str_with_units) { char *delim = strchr(str_with_units, 'M'); char str_without_units[10]; memcpy(str_without_units, str_with_units, delim - str_with_units); @@ -707,7 +711,7 @@ static int do_rrd_util_gpu(struct card *const c){ else { collector_error("Cannot read util_gpu for %s: [%s]", c->pathname, c->id.marketing_name); freez((void *) c->pathname_util_gpu); - rrdset_is_obsolete(c->st_util_gpu); + rrdset_is_obsolete___safe_from_collector_thread(c->st_util_gpu); return 1; } } @@ -721,7 +725,7 @@ static int do_rrd_util_mem(struct card *const c){ else { collector_error("Cannot read util_mem for %s: [%s]", c->pathname, c->id.marketing_name); freez((void *) c->pathname_util_mem); - rrdset_is_obsolete(c->st_util_mem); + rrdset_is_obsolete___safe_from_collector_thread(c->st_util_mem); return 1; } } @@ -735,7 +739,7 @@ static int do_rrd_clk_gpu(struct card *const c){ else { collector_error("Cannot read clk_gpu for %s: [%s]", c->pathname, c->id.marketing_name); freez((void *) c->pathname_clk_gpu); - rrdset_is_obsolete(c->st_clk_gpu); + rrdset_is_obsolete___safe_from_collector_thread(c->st_clk_gpu); return 1; } } @@ -749,7 +753,7 @@ static int do_rrd_clk_mem(struct card *const c){ else { collector_error("Cannot read clk_mem for %s: [%s]", c->pathname, c->id.marketing_name); freez((void *) c->pathname_clk_mem); - rrdset_is_obsolete(c->st_clk_mem); + rrdset_is_obsolete___safe_from_collector_thread(c->st_clk_mem); return 1; } } @@ -771,8 +775,8 @@ static int do_rrd_vram(struct card *const c){ collector_error("Cannot read used_vram for %s: [%s]", c->pathname, c->id.marketing_name); freez((void *) c->pathname_mem_used_vram); freez((void *) c->pathname_mem_total_vram); - rrdset_is_obsolete(c->st_mem_usage_perc_vram); - rrdset_is_obsolete(c->st_mem_usage_vram); + rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_vram); + rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_vram); return 1; } } @@ -794,8 +798,8 @@ static int do_rrd_vis_vram(struct card *const c){ collector_error("Cannot read used_vis_vram for %s: [%s]", c->pathname, c->id.marketing_name); freez((void *) c->pathname_mem_used_vis_vram); freez((void *) c->pathname_mem_total_vis_vram); - rrdset_is_obsolete(c->st_mem_usage_perc_vis_vram); - rrdset_is_obsolete(c->st_mem_usage_vis_vram); + rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_vis_vram); + rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_vis_vram); return 1; } } @@ -817,8 +821,8 @@ static int do_rrd_gtt(struct card *const c){ collector_error("Cannot read used_gtt for %s: [%s]", c->pathname, c->id.marketing_name); freez((void *) c->pathname_mem_used_gtt); freez((void *) c->pathname_mem_total_gtt); - rrdset_is_obsolete(c->st_mem_usage_perc_gtt); - rrdset_is_obsolete(c->st_mem_usage_gtt); + rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_gtt); + rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_gtt); return 1; } } diff --git a/collectors/proc.plugin/sys_class_power_supply.c b/collectors/proc.plugin/sys_class_power_supply.c index 8687ecb552c37e..3f793b9c658f88 100644 --- a/collectors/proc.plugin/sys_class_power_supply.c +++ b/collectors/proc.plugin/sys_class_power_supply.c @@ -65,7 +65,7 @@ void power_supply_free(struct power_supply *ps) { // free capacity structure if(likely(ps->capacity)) { - if(likely(ps->capacity->st)) rrdset_is_obsolete(ps->capacity->st); + if(likely(ps->capacity->st)) rrdset_is_obsolete___safe_from_collector_thread(ps->capacity->st); freez(ps->capacity->filename); if(likely(ps->capacity->fd != -1)) close(ps->capacity->fd); files_num--; @@ -89,7 +89,7 @@ void power_supply_free(struct power_supply *ps) { } // free properties - if(likely(pr->st)) rrdset_is_obsolete(pr->st); + if(likely(pr->st)) rrdset_is_obsolete___safe_from_collector_thread(pr->st); freez(pr->name); freez(pr->title); freez(pr->units); diff --git a/collectors/proc.plugin/sys_devices_pci_aer.c b/collectors/proc.plugin/sys_devices_pci_aer.c index 134426238a6879..563ebf0515bed5 100644 --- a/collectors/proc.plugin/sys_devices_pci_aer.c +++ b/collectors/proc.plugin/sys_devices_pci_aer.c @@ -240,8 +240,8 @@ int do_proc_sys_devices_pci_aer(int update_every, usec_t dt __maybe_unused) { continue; if(!a->st) { - const char *title; - const char *context; + const char *title = ""; + const char *context = ""; switch(a->type) { case AER_DEV_NONFATAL: @@ -268,6 +268,11 @@ int do_proc_sys_devices_pci_aer(int update_every, usec_t dt __maybe_unused) { title = "PCI Root-Port Advanced Error Reporting (AER) Fatal Errors"; context = "pci.rootport_aer_fatal"; break; + + default: + title = "Unknown PCI Advanced Error Reporting"; + context = "pci.unknown_aer"; + break; } char id[RRD_ID_LENGTH_MAX + 1]; diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c index 0947f61f090730..fdaa22cb700f40 100644 --- a/collectors/proc.plugin/sys_devices_system_edac_mc.c +++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c @@ -265,22 +265,22 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt __maybe_unuse char buffer[1024 + 1]; - if(read_edac_mc_rank_file(m->name, d->name, "dimm_dev_type", buffer, 1024)) + if (read_edac_mc_rank_file(m->name, d->name, "dimm_dev_type", buffer, 1024)) rrdlabels_add(d->st->rrdlabels, "dimm_dev_type", buffer, RRDLABEL_SRC_AUTO); - if(read_edac_mc_rank_file(m->name, d->name, "dimm_edac_mode", buffer, 1024)) + if (read_edac_mc_rank_file(m->name, d->name, "dimm_edac_mode", buffer, 1024)) rrdlabels_add(d->st->rrdlabels, "dimm_edac_mode", buffer, RRDLABEL_SRC_AUTO); - if(read_edac_mc_rank_file(m->name, d->name, "dimm_label", buffer, 1024)) + if (read_edac_mc_rank_file(m->name, d->name, "dimm_label", buffer, 1024)) rrdlabels_add(d->st->rrdlabels, "dimm_label", buffer, RRDLABEL_SRC_AUTO); - if(read_edac_mc_rank_file(m->name, d->name, "dimm_location", buffer, 1024)) + if (read_edac_mc_rank_file(m->name, d->name, "dimm_location", buffer, 1024)) rrdlabels_add(d->st->rrdlabels, "dimm_location", buffer, RRDLABEL_SRC_AUTO); - if(read_edac_mc_rank_file(m->name, d->name, "dimm_mem_type", buffer, 1024)) + if (read_edac_mc_rank_file(m->name, d->name, "dimm_mem_type", buffer, 1024)) rrdlabels_add(d->st->rrdlabels, "dimm_mem_type", buffer, RRDLABEL_SRC_AUTO); - if(read_edac_mc_rank_file(m->name, d->name, "size", buffer, 1024)) + if (read_edac_mc_rank_file(m->name, d->name, "size", buffer, 1024)) rrdlabels_add(d->st->rrdlabels, "size", buffer, RRDLABEL_SRC_AUTO); d->ce.rd = rrddim_add(d->st, "correctable", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c index da89411bd78852..f1d6fe720f80a1 100644 --- a/collectors/proc.plugin/sys_fs_btrfs.c +++ b/collectors/proc.plugin/sys_fs_btrfs.c @@ -196,8 +196,8 @@ static inline int collect_btrfs_commits_stats(BTRFS_NODE *node, int update_every static inline void btrfs_free_commits_stats(BTRFS_NODE *node){ if(node->st_commits){ - rrdset_is_obsolete(node->st_commits); - rrdset_is_obsolete(node->st_commit_timings); + rrdset_is_obsolete___safe_from_collector_thread(node->st_commits); + rrdset_is_obsolete___safe_from_collector_thread(node->st_commit_timings); } freez(node->commit_stats_filename); node->commit_stats_filename = NULL; @@ -211,7 +211,7 @@ static inline void btrfs_free_disk(BTRFS_DISK *d) { static inline void btrfs_free_device(BTRFS_DEVICE *d) { if(d->st_error_stats) - rrdset_is_obsolete(d->st_error_stats); + rrdset_is_obsolete___safe_from_collector_thread(d->st_error_stats); freez(d->error_stats_filename); freez(d); } @@ -220,16 +220,16 @@ static inline void btrfs_free_node(BTRFS_NODE *node) { // collector_info("BTRFS: destroying '%s'", node->id); if(node->st_allocation_disks) - rrdset_is_obsolete(node->st_allocation_disks); + rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_disks); if(node->st_allocation_data) - rrdset_is_obsolete(node->st_allocation_data); + rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_data); if(node->st_allocation_metadata) - rrdset_is_obsolete(node->st_allocation_metadata); + rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_metadata); if(node->st_allocation_system) - rrdset_is_obsolete(node->st_allocation_system); + rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_system); freez(node->allocation_data_bytes_used_filename); freez(node->allocation_data_total_bytes_filename); @@ -392,14 +392,14 @@ static inline int find_btrfs_devices(BTRFS_NODE *node, const char *path) { continue; } - collector_info("BTRFS: device found '%s'", de->d_name); + // internal_error("BTRFS: device found '%s'", de->d_name); // -------------------------------------------------------------------- // search for it for(d = node->devices ; d ; d = d->next) { if(str2ll(de->d_name, NULL) == d->id){ - collector_info("BTRFS: existing device id '%d'", d->id); + // collector_info("BTRFS: existing device id '%d'", d->id); break; } } @@ -411,11 +411,11 @@ static inline int find_btrfs_devices(BTRFS_NODE *node, const char *path) { d = callocz(sizeof(BTRFS_DEVICE), 1); d->id = str2ll(de->d_name, NULL); - collector_info("BTRFS: new device with id '%d'", d->id); + // collector_info("BTRFS: new device with id '%d'", d->id); snprintfz(filename, FILENAME_MAX, "%s/%d/error_stats", path, d->id); d->error_stats_filename = strdupz(filename); - collector_info("BTRFS: error_stats_filename '%s'", filename); + // collector_info("BTRFS: error_stats_filename '%s'", filename); // link it d->next = node->devices; @@ -795,7 +795,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "disk_%s", node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "disk_%s", node->label); - snprintfz(title, 200, "BTRFS Physical Disk Allocation"); + snprintfz(title, sizeof(title) - 1, "BTRFS Physical Disk Allocation"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); @@ -854,7 +854,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "data_%s", node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "data_%s", node->label); - snprintfz(title, 200, "BTRFS Data Allocation"); + snprintfz(title, sizeof(title) - 1, "BTRFS Data Allocation"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); @@ -898,7 +898,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "metadata_%s", node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "metadata_%s", node->label); - snprintfz(title, 200, "BTRFS Metadata Allocation"); + snprintfz(title, sizeof(title) - 1, "BTRFS Metadata Allocation"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); @@ -944,7 +944,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "system_%s", node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "system_%s", node->label); - snprintfz(title, 200, "BTRFS System Allocation"); + snprintfz(title, sizeof(title) - 1, "BTRFS System Allocation"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); @@ -988,7 +988,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "commits_%s", node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "commits_%s", node->label); - snprintfz(title, 200, "BTRFS Commits"); + snprintfz(title, sizeof(title) - 1, "BTRFS Commits"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); @@ -1021,7 +1021,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "commits_perc_time_%s", node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "commits_perc_time_%s", node->label); - snprintfz(title, 200, "BTRFS Commits Time Share"); + snprintfz(title, sizeof(title) - 1, "BTRFS Commits Time Share"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); @@ -1055,7 +1055,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "commit_timings_%s", node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "commit_timings_%s", node->label); - snprintfz(title, 200, "BTRFS Commit Timings"); + snprintfz(title, sizeof(title) - 1, "BTRFS Commit Timings"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); @@ -1101,7 +1101,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(id, RRD_ID_LENGTH_MAX, "device_errors_dev%d_%s", d->id, node->id); snprintfz(name, RRD_ID_LENGTH_MAX, "device_errors_dev%d_%s", d->id, node->label); - snprintfz(title, 200, "BTRFS Device Errors"); + snprintfz(title, sizeof(title) - 1, "BTRFS Device Errors"); netdata_fix_chart_id(id); netdata_fix_chart_name(name); diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md deleted file mode 100644 index 41d5b62e09abf6..00000000000000 --- a/collectors/python.d.plugin/adaptec_raid/README.md +++ /dev/null @@ -1,103 +0,0 @@ - - -# Adaptec RAID controller collector - -Collects logical and physical devices metrics using `arcconf` command-line utility. - -Executed commands: - -- `sudo -n arcconf GETCONFIG 1 LD` -- `sudo -n arcconf GETCONFIG 1 PD` - -## Requirements - -The module uses `arcconf`, which can only be executed by `root`. It uses -`sudo` and assumes that it is configured such that the `netdata` user can execute `arcconf` as root without a password. - -- Add to your `/etc/sudoers` file: - -`which arcconf` shows the full path to the binary. - -```bash -netdata ALL=(root) NOPASSWD: /path/to/arcconf -``` - -- Reset Netdata's systemd - unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux - distributions with systemd) - -The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `arcconf` using `sudo`. - - -As the `root` user, do the following: - -```cmd -mkdir /etc/systemd/system/netdata.service.d -echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf -systemctl daemon-reload -systemctl restart netdata.service -``` - -## Charts - -- Logical Device Status -- Physical Device State -- Physical Device S.M.A.R.T warnings -- Physical Device Temperature - -## Enable the collector - -The `adaptec_raid` collector is disabled by default. To enable it, use `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` -file. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d.conf -``` - -Change the value of the `adaptec_raid` setting to `yes`. Save the file and restart the Netdata Agent with `sudo -systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - -## Configuration - -Edit the `python.d/adaptec_raid.conf` configuration file using `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/adaptec_raid.conf -``` - -![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png) - - - - -### Troubleshooting - -To troubleshoot issues with the `adaptec_raid` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `adaptec_raid` module in debug mode: - -```bash -./python.d.plugin adaptec_raid debug trace -``` - diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md new file mode 120000 index 00000000000000..97a103eb9f1cc2 --- /dev/null +++ b/collectors/python.d.plugin/adaptec_raid/README.md @@ -0,0 +1 @@ +integrations/adaptecraid.md \ No newline at end of file diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py index bb59d88e1d3c46..1995ad681083db 100644 --- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py +++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py @@ -87,7 +87,7 @@ def find_pds(d): elif row.startswith('Temperature'): v = row.split(':')[-1].split()[0] pd.temperature = v - elif row.startswith('NCQ status'): + elif row.startswith(('NCQ status', 'Device Phy')) or not row: if pd.id and pd.state and pd.smart_warnings: pds.append(pd) pd = PD() diff --git a/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md b/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md new file mode 100644 index 00000000000000..13d22ba54f1c31 --- /dev/null +++ b/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md @@ -0,0 +1,204 @@ + + +# AdaptecRAID + + + + + +Plugin: python.d.plugin +Module: adaptec_raid + + + +## Overview + +This collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives. + + +It uses the arcconf command line utility (from adaptec) to monitor your raid controller. + +Executed commands: + - `sudo -n arcconf GETCONFIG 1 LD` + - `sudo -n arcconf GETCONFIG 1 PD` + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + +The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password. + +### Default Behavior + +#### Auto-Detection + +After all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per AdaptecRAID instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| adaptec_raid.ld_status | a dimension per logical device | bool | +| adaptec_raid.pd_state | a dimension per physical device | bool | +| adaptec_raid.smart_warnings | a dimension per physical device | count | +| adaptec_raid.temperature | a dimension per physical device | celsius | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ adaptec_raid_ld_status ](https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf) | adaptec_raid.ld_status | logical device status is failed or degraded | +| [ adaptec_raid_pd_state ](https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf) | adaptec_raid.pd_state | physical device state is not online | + + +## Setup + +### Prerequisites + +#### Grant permissions for netdata, to run arcconf as sudoer + +The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password. + +Add to your /etc/sudoers file: +which arcconf shows the full path to the binary. + +```bash +netdata ALL=(root) NOPASSWD: /path/to/arcconf +``` + + +#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd) + +The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo. + +As root user, do the following: + +```bash +mkdir /etc/systemd/system/netdata.service.d +echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf +systemctl daemon-reload +systemctl restart netdata.service +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/adaptec_raid.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/adaptec_raid.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration per job + +```yaml +job_name: + name: my_job_name + update_every: 1 # the JOB's data collection frequency + priority: 60000 # the JOB's order on the dashboard + penalty: yes # the JOB's penalty + autodetection_retry: 0 # the JOB's re-check interval in seconds + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `adaptec_raid` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin adaptec_raid debug trace + ``` + + diff --git a/collectors/python.d.plugin/adaptec_raid/metadata.yaml b/collectors/python.d.plugin/adaptec_raid/metadata.yaml index 7ee4ce7c256225..c69baff4a83a64 100644 --- a/collectors/python.d.plugin/adaptec_raid/metadata.yaml +++ b/collectors/python.d.plugin/adaptec_raid/metadata.yaml @@ -27,8 +27,8 @@ modules: It uses the arcconf command line utility (from adaptec) to monitor your raid controller. Executed commands: - - sudo -n arcconf GETCONFIG 1 LD - - sudo -n arcconf GETCONFIG 1 PD + - `sudo -n arcconf GETCONFIG 1 LD` + - `sudo -n arcconf GETCONFIG 1 PD` supported_platforms: include: [] exclude: [] diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md deleted file mode 100644 index 0f956b291c4431..00000000000000 --- a/collectors/python.d.plugin/alarms/README.md +++ /dev/null @@ -1,89 +0,0 @@ - - -# Alarms - -This collector creates an 'Alarms' menu with one line plot showing alarm states over time. Alarm states are mapped to integer values according to the below default mapping. Any alarm status types not in this mapping will be ignored (Note: This mapping can be changed by editing the `status_map` in the `alarms.conf` file). If you would like to learn more about the different alarm statuses check out the docs [here](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-statuses). - -``` -{ - 'CLEAR': 0, - 'WARNING': 1, - 'CRITICAL': 2 -} -``` - -## Charts - -Below is an example of the chart produced when running `stress-ng --all 2` for a few minutes. You can see the various warning and critical alarms raised. - -![alarms collector](https://user-images.githubusercontent.com/1153921/101641493-0b086a80-39ef-11eb-9f55-0713e5dfb19f.png) - -## Configuration - -Enable the collector and [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md). - -```bash -cd /etc/netdata/ -sudo ./edit-config python.d.conf -# Set `alarms: no` to `alarms: yes` -sudo systemctl restart netdata -``` - -If needed, edit the `python.d/alarms.conf` configuration file using `edit-config` from the your agent's [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is usually at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/alarms.conf -``` - -The `alarms` specific part of the `alarms.conf` file should look like this: - -```yaml -# what url to pull data from -local: - url: 'http://127.0.0.1:19999/api/v1/alarms?all' - # define how to map alarm status to numbers for the chart - status_map: - CLEAR: 0 - WARNING: 1 - CRITICAL: 2 - # set to true to include a chart with calculated alarm values over time - collect_alarm_values: false - # define the type of chart for plotting status over time e.g. 'line' or 'stacked' - alarm_status_chart_type: 'line' - # a "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only - # alarms with "cpu" or "load" in alarm name. Default includes all. - alarm_contains_words: '' - # a "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude - # all alarms with "cpu" or "load" in alarm name. Default excludes None. - alarm_excludes_words: '' -``` - -It will default to pulling all alarms at each time step from the Netdata rest api at `http://127.0.0.1:19999/api/v1/alarms?all` -### Troubleshooting - -To troubleshoot issues with the `alarms` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `alarms` module in debug mode: - -```bash -./python.d.plugin alarms debug trace -``` - diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md new file mode 120000 index 00000000000000..85759ae6c648b7 --- /dev/null +++ b/collectors/python.d.plugin/alarms/README.md @@ -0,0 +1 @@ +integrations/netdata_agent_alarms.md \ No newline at end of file diff --git a/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md b/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md new file mode 100644 index 00000000000000..9fb69878a70441 --- /dev/null +++ b/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md @@ -0,0 +1,201 @@ + + +# Netdata Agent alarms + +Plugin: python.d.plugin +Module: alarms + + + +## Overview + +This collector creates an 'Alarms' menu with one line plot of `alarms.status`. + + +Alarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Netdata Agent alarms instance + +These metrics refer to the entire monitored application. + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| alarms.status | a dimension per alarm representing the latest status of the alarm. | status | +| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/alarms.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/alarms.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes | +| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {"CLEAR": 0, "WARNING": 1, "CRITICAL": 2} | yes | +| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes | +| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes | +| alarm_contains_words | A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all. | | yes | +| alarm_excludes_words | A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None. | | yes | +| update_every | Sets the default data collection frequency. | 10 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + url: 'http://127.0.0.1:19999/api/v1/alarms?all' + +``` +##### Advanced + +An advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts. +"ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms. + + +
Config + +```yaml +ML: + update_every: 5 + url: 'http://127.0.0.1:19999/api/v1/alarms?all' + status_map: + CLEAR: 0 + WARNING: 1 + CRITICAL: 2 + collect_alarm_values: true + alarm_status_chart_type: 'stacked' + alarm_contains_words: 'ml_' + +Default: + update_every: 5 + url: 'http://127.0.0.1:19999/api/v1/alarms?all' + status_map: + CLEAR: 0 + WARNING: 1 + CRITICAL: 2 + collect_alarm_values: false + alarm_status_chart_type: 'stacked' + alarm_excludes_words: 'ml_' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin alarms debug trace + ``` + + diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md deleted file mode 100644 index b8a6acb0b3aaf4..00000000000000 --- a/collectors/python.d.plugin/am2320/README.md +++ /dev/null @@ -1,76 +0,0 @@ - - -# AM2320 sensor monitoring with netdata - -Displays a graph of the temperature and humidity from a AM2320 sensor. - -## Requirements - - Adafruit Circuit Python AM2320 library - - Adafruit AM2320 I2C sensor - - Python 3 (Adafruit libraries are not Python 2.x compatible) - - -It produces the following charts: -1. **Temperature** -2. **Humidity** - -## Configuration - -Edit the `python.d/am2320.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/am2320.conf -``` - -Raspberry Pi Instructions: - -Hardware install: -Connect the am2320 to the Raspberry Pi I2C pins - -Raspberry Pi 3B/4 Pins: - -- Board 3.3V (pin 1) to sensor VIN (pin 1) -- Board SDA (pin 3) to sensor SDA (pin 2) -- Board GND (pin 6) to sensor GND (pin 3) -- Board SCL (pin 5) to sensor SCL (pin 4) - -You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each. - -Software install: -- `sudo pip3 install adafruit-circuitpython-am2320` -- edit `/etc/netdata/netdata.conf` -- find `[plugin:python.d]` -- add `command options = -ppython3` -- save the file. -- restart the netdata service. -- check the dashboard. - -### Troubleshooting - -To troubleshoot issues with the `am2320` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `am2320` module in debug mode: - -```bash -./python.d.plugin am2320 debug trace -``` - diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md new file mode 120000 index 00000000000000..0bc5ea90e839d6 --- /dev/null +++ b/collectors/python.d.plugin/am2320/README.md @@ -0,0 +1 @@ +integrations/am2320.md \ No newline at end of file diff --git a/collectors/python.d.plugin/am2320/integrations/am2320.md b/collectors/python.d.plugin/am2320/integrations/am2320.md new file mode 100644 index 00000000000000..72b351eb555013 --- /dev/null +++ b/collectors/python.d.plugin/am2320/integrations/am2320.md @@ -0,0 +1,181 @@ + + +# AM2320 + + + + + +Plugin: python.d.plugin +Module: am2320 + + + +## Overview + +This collector monitors AM2320 sensor metrics about temperature and humidity. + +It retrieves temperature and humidity values by contacting an AM2320 sensor over i2c. + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +Assuming prerequisites are met, the collector will try to connect to the sensor via i2c + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per AM2320 instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| am2320.temperature | temperature | celsius | +| am2320.humidity | humidity | percentage | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Sensor connection to a Raspberry Pi + +Connect the am2320 to the Raspberry Pi I2C pins + +Raspberry Pi 3B/4 Pins: + +- Board 3.3V (pin 1) to sensor VIN (pin 1) +- Board SDA (pin 3) to sensor SDA (pin 2) +- Board GND (pin 6) to sensor GND (pin 3) +- Board SCL (pin 5) to sensor SCL (pin 4) + +You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each. + + +#### Software requirements + +Install the Adafruit Circuit Python AM2320 library: + +`sudo pip3 install adafruit-circuitpython-am2320` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/am2320.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/am2320.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Local sensor + +A basic JOB configuration + +```yaml +local_sensor: + name: 'Local AM2320' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin am2320 debug trace + ``` + + diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md deleted file mode 100644 index c86ca354ab3cac..00000000000000 --- a/collectors/python.d.plugin/beanstalk/README.md +++ /dev/null @@ -1,156 +0,0 @@ - - -# Beanstalk collector - -Provides server and tube-level statistics. - -## Requirements - -- `python-beanstalkc` - -**Server statistics:** - -1. **Cpu usage** in cpu time - - - user - - system - -2. **Jobs rate** in jobs/s - - - total - - timeouts - -3. **Connections rate** in connections/s - - - connections - -4. **Commands rate** in commands/s - - - put - - peek - - peek-ready - - peek-delayed - - peek-buried - - reserve - - use - - watch - - ignore - - delete - - release - - bury - - kick - - stats - - stats-job - - stats-tube - - list-tubes - - list-tube-used - - list-tubes-watched - - pause-tube - -5. **Current tubes** in tubes - - - tubes - -6. **Current jobs** in jobs - - - urgent - - ready - - reserved - - delayed - - buried - -7. **Current connections** in connections - - - written - - producers - - workers - - waiting - -8. **Binlog** in records/s - - - written - - migrated - -9. **Uptime** in seconds - - - uptime - -**Per tube statistics:** - -1. **Jobs rate** in jobs/s - - - jobs - -2. **Jobs** in jobs - - - using - - ready - - reserved - - delayed - - buried - -3. **Connections** in connections - - - using - - waiting - - watching - -4. **Commands** in commands/s - - - deletes - - pauses - -5. **Pause** in seconds - - - since - - left - -## Configuration - -Edit the `python.d/beanstalk.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/beanstalk.conf -``` - -Sample: - -```yaml -host : '127.0.0.1' -port : 11300 -``` - -If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address - - - - -### Troubleshooting - -To troubleshoot issues with the `beanstalk` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `beanstalk` module in debug mode: - -```bash -./python.d.plugin beanstalk debug trace -``` - diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md new file mode 120000 index 00000000000000..4efe13889a9669 --- /dev/null +++ b/collectors/python.d.plugin/beanstalk/README.md @@ -0,0 +1 @@ +integrations/beanstalk.md \ No newline at end of file diff --git a/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md b/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md new file mode 100644 index 00000000000000..5095c0c284a6d2 --- /dev/null +++ b/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md @@ -0,0 +1,219 @@ + + +# Beanstalk + + + + + +Plugin: python.d.plugin +Module: beanstalk + + + +## Overview + +Monitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management. + +The collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +If no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Beanstalk instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| beanstalk.cpu_usage | user, system | cpu time | +| beanstalk.jobs_rate | total, timeouts | jobs/s | +| beanstalk.connections_rate | connections | connections/s | +| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s | +| beanstalk.connections_rate | tubes | tubes | +| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs | +| beanstalk.current_connections | written, producers, workers, waiting | connections | +| beanstalk.binlog | written, migrated | records/s | +| beanstalk.uptime | uptime | seconds | + +### Per tube + +Metrics related to Beanstalk tubes. Each tube produces its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| beanstalk.jobs_rate | jobs | jobs/s | +| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs | +| beanstalk.connections | using, waiting, watching | connections | +| beanstalk.commands | deletes, pauses | commands/s | +| beanstalk.pause | since, left | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. | + + +## Setup + +### Prerequisites + +#### beanstalkc python module + +The collector requires the `beanstalkc` python module to be installed. + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/beanstalk.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/beanstalk.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| host | IP or URL to a beanstalk service. | 127.0.0.1 | no | +| port | Port to the IP or URL to a beanstalk service. | 11300 | no | + +
+ +#### Examples + +##### Remote beanstalk server + +A basic remote beanstalk server + +```yaml +remote: + name: 'beanstalk' + host: '1.2.3.4' + port: 11300 + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +localhost: + name: 'local_beanstalk' + host: '127.0.0.1' + port: 11300 + +remote_job: + name: 'remote_beanstalk' + host: '192.0.2.1' + port: 113000 + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin beanstalk debug trace + ``` + + diff --git a/collectors/python.d.plugin/beanstalk/metadata.yaml b/collectors/python.d.plugin/beanstalk/metadata.yaml index b6ff2f11686336..7dff9cb3a1440b 100644 --- a/collectors/python.d.plugin/beanstalk/metadata.yaml +++ b/collectors/python.d.plugin/beanstalk/metadata.yaml @@ -8,7 +8,7 @@ modules: link: "https://beanstalkd.github.io/" categories: - data-collection.message-brokers - - data-collection.task-queues + #- data-collection.task-queues icon_filename: "beanstalk.svg" related_resources: integrations: diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md deleted file mode 100644 index aa173f385204ac..00000000000000 --- a/collectors/python.d.plugin/bind_rndc/README.md +++ /dev/null @@ -1,102 +0,0 @@ - - -# ISC Bind collector - -Collects Name server summary performance statistics using `rndc` tool. - -## Requirements - -- Version of bind must be 9.6 + -- Netdata must have permissions to run `rndc stats` - -It produces: - -1. **Name server statistics** - - - requests - - responses - - success - - auth_answer - - nonauth_answer - - nxrrset - - failure - - nxdomain - - recursion - - duplicate - - rejections - -2. **Incoming queries** - - - RESERVED0 - - A - - NS - - CNAME - - SOA - - PTR - - MX - - TXT - - X25 - - AAAA - - SRV - - NAPTR - - A6 - - DS - - RSIG - - DNSKEY - - SPF - - ANY - - DLV - -3. **Outgoing queries** - -- Same as Incoming queries - -## Configuration - -Edit the `python.d/bind_rndc.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/bind_rndc.conf -``` - -Sample: - -```yaml -local: - named_stats_path : '/var/log/bind/named.stats' -``` - -If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats` - - - - -### Troubleshooting - -To troubleshoot issues with the `bind_rndc` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `bind_rndc` module in debug mode: - -```bash -./python.d.plugin bind_rndc debug trace -``` - diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md new file mode 120000 index 00000000000000..03a182ae8d736b --- /dev/null +++ b/collectors/python.d.plugin/bind_rndc/README.md @@ -0,0 +1 @@ +integrations/isc_bind_rndc.md \ No newline at end of file diff --git a/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md b/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md new file mode 100644 index 00000000000000..163f8282c3818e --- /dev/null +++ b/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md @@ -0,0 +1,215 @@ + + +# ISC Bind (RNDC) + + + + + +Plugin: python.d.plugin +Module: bind_rndc + + + +## Overview + +Monitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery. + +This collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics. + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +If no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats` + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ISC Bind (RNDC) instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| bind_rndc.name_server_statistics | requests, rejected_queries, success, failure, responses, duplicate, recursion, nxrrset, nxdomain, non_auth_answer, auth_answer, dropped_queries | stats | +| bind_rndc.incoming_queries | a dimension per incoming query type | queries | +| bind_rndc.outgoing_queries | a dimension per outgoing query type | queries | +| bind_rndc.stats_size | stats_size | MiB | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ bind_rndc_stats_file_size ](https://github.com/netdata/netdata/blob/master/health/health.d/bind_rndc.conf) | bind_rndc.stats_size | BIND statistics-file size | + + +## Setup + +### Prerequisites + +#### Minimum bind version and permissions + +Version of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats` + +#### Setup log rotate for bind stats + +BIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec. +It is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate. + +To set up BIND to dump stats do the following: + +1. Add to 'named.conf.options' options {}: +`statistics-file "/var/log/bind/named.stats";` + +2. Create bind/ directory in /var/log: +`cd /var/log/ && mkdir bind` + +3. Change owner of directory to 'bind' user: +`chown bind bind/` + +4. RELOAD (NOT restart) BIND: +`systemctl reload bind9.service` + +5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory) + +To allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata: +`chown :netdata rndc.key` + +Last, BUT NOT least, is to create bind-rndc.conf in logrotate.d/: +``` +/var/log/bind/named.stats { + + daily + rotate 4 + compress + delaycompress + create 0644 bind bind + missingok + postrotate + rndc reload > /dev/null + endscript +} +``` +To test your logrotate conf file run as root: +`logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/bind_rndc.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/bind_rndc.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no | + +
+ +#### Examples + +##### Local bind stats + +Define a local path to bind stats file + +```yaml +local: + named_stats_path: '/var/log/bind/named.stats' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `bind_rndc` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin bind_rndc debug trace + ``` + + diff --git a/collectors/python.d.plugin/bind_rndc/metadata.yaml b/collectors/python.d.plugin/bind_rndc/metadata.yaml index 1e9fb24fe00b5f..e3568e448b4ea5 100644 --- a/collectors/python.d.plugin/bind_rndc/metadata.yaml +++ b/collectors/python.d.plugin/bind_rndc/metadata.yaml @@ -4,7 +4,7 @@ modules: plugin_name: python.d.plugin module_name: bind_rndc monitored_instance: - name: ISCBind (RNDC) + name: ISC Bind (RNDC) link: "https://www.isc.org/bind/" categories: - data-collection.dns-and-dhcp-servers diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md deleted file mode 100644 index ea439775445689..00000000000000 --- a/collectors/python.d.plugin/boinc/README.md +++ /dev/null @@ -1,64 +0,0 @@ - - -# BOINC collector - -Monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client using the same RPC interface that the BOINC monitoring GUI does. - -It provides charts tracking the total number of tasks and active tasks, as well as ones tracking each of the possible states for tasks. - -## Configuration - -Edit the `python.d/boinc.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/boinc.conf -``` - -BOINC requires use of a password to access it's RPC interface. You can -find this password in the `gui_rpc_auth.cfg` file in your BOINC directory. - -By default, the module will try to auto-detect the password by looking -in `/var/lib/boinc` for this file (this is the location most Linux -distributions use for a system-wide BOINC installation), so things may -just work without needing configuration for the local system. - -You can monitor remote systems as well: - -```yaml -remote: - hostname: some-host - password: some-password -``` - - - - -### Troubleshooting - -To troubleshoot issues with the `boinc` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `boinc` module in debug mode: - -```bash -./python.d.plugin boinc debug trace -``` - diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md new file mode 120000 index 00000000000000..22c10ca17c42e8 --- /dev/null +++ b/collectors/python.d.plugin/boinc/README.md @@ -0,0 +1 @@ +integrations/boinc.md \ No newline at end of file diff --git a/collectors/python.d.plugin/boinc/integrations/boinc.md b/collectors/python.d.plugin/boinc/integrations/boinc.md new file mode 100644 index 00000000000000..d6874d4559c2eb --- /dev/null +++ b/collectors/python.d.plugin/boinc/integrations/boinc.md @@ -0,0 +1,204 @@ + + +# BOINC + + + + + +Plugin: python.d.plugin +Module: boinc + + + +## Overview + +This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client. + +It uses the same RPC interface that the BOINC monitoring GUI does. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per BOINC instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| boinc.tasks | Total, Active | tasks | +| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks | +| boinc.sched | Uninitialized, Preempted, Scheduled | tasks | +| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes | +| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes | +| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes | +| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes | + + +## Setup + +### Prerequisites + +#### Boinc RPC interface + +BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory. + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/boinc.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/boinc.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| hostname | Define a hostname where boinc is running. | localhost | no | +| port | The port of boinc RPC interface. | | no | +| password | Provide a password to connect to a boinc RPC interface. | | no | + +
+ +#### Examples + +##### Configuration of a remote boinc instance + +A basic JOB configuration for a remote boinc instance + +```yaml +remote: + hostname: '1.2.3.4' + port: 1234 + password: 'some-password' + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +localhost: + name: 'local' + host: '127.0.0.1' + port: 1234 + password: 'some-password' + +remote_job: + name: 'remote' + host: '192.0.2.1' + port: 1234 + password: some-other-password + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin boinc debug trace + ``` + + diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md deleted file mode 100644 index 555491ad7b2661..00000000000000 --- a/collectors/python.d.plugin/ceph/README.md +++ /dev/null @@ -1,71 +0,0 @@ - - -# CEPH collector - -Monitors the ceph cluster usage and consumption data of a server, and produces: - -- Cluster statistics (usage, available, latency, objects, read/write rate) -- OSD usage -- OSD latency -- Pool usage -- Pool read/write operations -- Pool read/write rate -- number of objects per pool - -## Requirements - -- `rados` python module -- Granting read permissions to ceph group from keyring file - -```shell -# chmod 640 /etc/ceph/ceph.client.admin.keyring -``` - -## Configuration - -Edit the `python.d/ceph.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/ceph.conf -``` - -Sample: - -```yaml -local: - config_file: '/etc/ceph/ceph.conf' - keyring_file: '/etc/ceph/ceph.client.admin.keyring' -``` - - - - -### Troubleshooting - -To troubleshoot issues with the `ceph` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `ceph` module in debug mode: - -```bash -./python.d.plugin ceph debug trace -``` - diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md new file mode 120000 index 00000000000000..654248b701f7bb --- /dev/null +++ b/collectors/python.d.plugin/ceph/README.md @@ -0,0 +1 @@ +integrations/ceph.md \ No newline at end of file diff --git a/collectors/python.d.plugin/ceph/integrations/ceph.md b/collectors/python.d.plugin/ceph/integrations/ceph.md new file mode 100644 index 00000000000000..cfda01fbe785fa --- /dev/null +++ b/collectors/python.d.plugin/ceph/integrations/ceph.md @@ -0,0 +1,194 @@ + + +# Ceph + + + + + +Plugin: python.d.plugin +Module: ceph + + + +## Overview + +This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics. + +Uses the `rados` python module to connect to a Ceph cluster. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Ceph instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ceph.general_usage | avail, used | KiB | +| ceph.general_objects | cluster | objects | +| ceph.general_bytes | read, write | KiB/s | +| ceph.general_operations | read, write | operations | +| ceph.general_latency | apply, commit | milliseconds | +| ceph.pool_usage | a dimension per Ceph Pool | KiB | +| ceph.pool_objects | a dimension per Ceph Pool | objects | +| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s | +| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s | +| ceph.pool_read_operations | a dimension per Ceph Pool | operations | +| ceph.pool_write_operations | a dimension per Ceph Pool | operations | +| ceph.osd_usage | a dimension per Ceph OSD | KiB | +| ceph.osd_size | a dimension per Ceph OSD | KiB | +| ceph.apply_latency | a dimension per Ceph OSD | milliseconds | +| ceph.commit_latency | a dimension per Ceph OSD | milliseconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization | + + +## Setup + +### Prerequisites + +#### `rados` python module + +Make sure the `rados` python module is installed + +#### Granting read permissions to ceph group from keyring file + +Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring` + +#### Create a specific rados_id + +You can optionally create a rados_id to use instead of admin + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/ceph.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/ceph.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| config_file | Ceph config file | | yes | +| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes | +| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no | + +
+ +#### Examples + +##### Basic local Ceph cluster + +A basic configuration to connect to a local Ceph cluster. + +```yaml +local: + config_file: '/etc/ceph/ceph.conf' + keyring_file: '/etc/ceph/ceph.client.admin.keyring' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin ceph debug trace + ``` + + diff --git a/collectors/python.d.plugin/changefinder/README.md b/collectors/python.d.plugin/changefinder/README.md deleted file mode 100644 index 0e9bab88757fda..00000000000000 --- a/collectors/python.d.plugin/changefinder/README.md +++ /dev/null @@ -1,241 +0,0 @@ - - -# Online change point detection with Netdata - -This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to -perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection) -on your Netdata charts and/or dimensions. - -Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a -changepoint score for each chart or dimension you configure it to work on. This is -an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step -to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap -to compute at each step of data collection (see the notes section below for more details) and it should scale fairly -well to work on lots of charts or hosts (if running on a parent node for example). - -> As this is a somewhat unique collector and involves often subjective concepts like changepoints and anomalies, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/changefinder-collector-feedback/972) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful. - -## Charts - -Two charts are available: - -### ChangeFinder Scores (`changefinder.scores`) - -This chart shows the percentile of the score that is output from the ChangeFinder library (it is turned off by default -but available with `show_scores: true`). - -A high observed score is more likely to be a valid changepoint worth exploring, even more so when multiple charts or -dimensions have high changepoint scores at the same time or very close together. - -### ChangeFinder Flags (`changefinder.flags`) - -This chart shows `1` or `0` if the latest score has a percentile value that exceeds the `cf_threshold` threshold. By -default, any scores that are in the 99th or above percentile will raise a flag on this chart. - -The raw changefinder score itself can be a little noisy and so limiting ourselves to just periods where it surpasses -the 99th percentile can help manage the "[signal to noise ratio](https://en.wikipedia.org/wiki/Signal-to-noise_ratio)" -better. - -The `cf_threshold` parameter might be one you want to play around with to tune things specifically for the workloads on -your node and the specific charts you want to monitor. For example, maybe the 95th percentile might work better for you -than the 99th percentile. - -Below is an example of the chart produced by this collector. The first 3/4 of the period looks normal in that we see a -few individual changes being picked up somewhat randomly over time. But then at around 14:59 towards the end of the -chart we see two periods with 'spikes' of multiple changes for a small period of time. This is the sort of pattern that -might be a sign something on the system that has changed sufficiently enough to merit some investigation. - -![changepoint-collector](https://user-images.githubusercontent.com/2178292/108773528-665de980-7556-11eb-895d-798669bcd695.png) - -## Requirements - -- This collector will only work with Python 3 and requires the packages below be installed. - -```bash -# become netdata user -sudo su -s /bin/bash netdata -# install required packages for the netdata user -pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4 -``` - -**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section -of your `netdata.conf` file. - -```yaml -[ plugin:python.d ] - # update every = 1 - command options = -ppython3 -``` - -## Configuration - -Install the Python requirements above, enable the collector and restart Netdata. - -```bash -cd /etc/netdata/ -sudo ./edit-config python.d.conf -# Set `changefinder: no` to `changefinder: yes` -sudo systemctl restart netdata -``` - -The configuration for the changefinder collector defines how it will behave on your system and might take some -experimentation with over time to set it optimally for your node. Out of the box, the config comes with -some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-netdata/) to get you started that try to -balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the -node resources. - -_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and -leave the `changefinder.conf` file alone to begin with. Then you can return to it later if you would like to tune things -a bit more once the collector is running for a while and you have a feeling for its performance on your node._ - -Edit the `python.d/changefinder.conf` configuration file using `edit-config` from the your -agent's [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is usually at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/changefinder.conf -``` - -The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some -information about each one and what it does. - -```yaml -# - -# JOBS (data collection sources) - -# Pull data from local Netdata node. -local: - - # A friendly name for this job. - name: 'local' - - # What host to pull data from. - host: '127.0.0.1:19999' - - # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc. - charts_regex: 'system\..*' - - # Charts to exclude, useful if you would like to exclude some specific charts. - # Note: should be a ',' separated string like 'chart.name,chart.name'. - charts_to_exclude: '' - - # Get ChangeFinder scores 'per_dim' or 'per_chart'. - mode: 'per_chart' - - # Default parameters that can be passed to the changefinder library. - cf_r: 0.5 - cf_order: 1 - cf_smooth: 15 - - # The percentile above which scores will be flagged. - cf_threshold: 99 - - # The number of recent scores to use when calculating the percentile of the changefinder score. - n_score_samples: 14400 - - # Set to true if you also want to chart the percentile scores in addition to the flags. - # Mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time. - show_scores: false -``` - -## Troubleshooting - -To see any relevant log messages you can use a command like below. - -```bash -grep 'changefinder' /var/log/netdata/error.log -``` - -If you would like to log in as `netdata` user and run the collector in debug mode to see more detail. - -```bash -# become netdata user -sudo su -s /bin/bash netdata -# run collector in debug using `nolock` option if netdata is already running the collector itself. -/usr/libexec/netdata/plugins.d/python.d.plugin changefinder debug trace nolock -``` - -## Notes - -- It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's - typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly - this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw - score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have - already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then - should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning - approaches which need some initial window of time before they can be useful. -- As this collector does most of the work in Python itself, you may want to try it out first on a test or development - system to get a sense of its performance characteristics on a node similar to where you would like to use it. -- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the - typical performance characteristics we saw from running this collector (with defaults) were: - - A runtime (`netdata.runtime_changefinder`) of ~30ms. - - Typically ~1% additional cpu usage. - - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration. - -## Useful links and further reading - -- [PyPi changefinder](https://pypi.org/project/changefinder/) reference page. -- [GitHub repo](https://github.com/shunsukeaihara/changefinder) for the changefinder library. -- Relevant academic papers: - - Yamanishi K, Takeuchi J. A unifying framework for detecting outliers and change points from nonstationary time - series data. 8th ACM SIGKDD international conference on Knowledge discovery and data mining - KDD02. 2002: - 676. ([pdf](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.12.3469&rep=rep1&type=pdf)) - - Kawahara Y, Sugiyama M. Sequential Change-Point Detection Based on Direct Density-Ratio Estimation. SIAM - International Conference on Data Mining. 2009: - 389–400. ([pdf](https://onlinelibrary.wiley.com/doi/epdf/10.1002/sam.10124)) - - Liu S, Yamada M, Collier N, Sugiyama M. Change-point detection in time-series data by relative density-ratio - estimation. Neural Networks. Jul.2013 43:72–83. [PubMed: 23500502] ([pdf](https://arxiv.org/pdf/1203.0453.pdf)) - - T. Iwata, K. Nakamura, Y. Tokusashi, and H. Matsutani, “Accelerating Online Change-Point Detection Algorithm using - 10 GbE FPGA NIC,” Proc. International European Conference on Parallel and Distributed Computing (Euro-Par’18) - Workshops, vol.11339, pp.506–517, Aug. - 2018 ([pdf](https://www.arc.ics.keio.ac.jp/~matutani/papers/iwata_heteropar2018.pdf)) -- The [ruptures](https://github.com/deepcharles/ruptures) python package is also a good place to learn more about - changepoint detection (mostly offline as opposed to online but deals with similar concepts). -- A nice [blog post](https://techrando.com/2019/08/14/a-brief-introduction-to-change-point-detection-using-python/) - showing some of the other options and libraries for changepoint detection in Python. -- [Bayesian changepoint detection](https://github.com/hildensia/bayesian_changepoint_detection) library - we may explore - implementing a collector for this or integrating this approach into this collector at a future date if there is - interest and it proves computationaly feasible. -- You might also find the - Netdata [anomalies collector](https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/anomalies) - interesting. -- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page. -- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) - maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata. -- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, - libraries and resources. -- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some - interesting anomaly detection papers we have been reading. -- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. - Anodot also have some great whitepapers in this space too that some may find useful. -- Novelty and outlier detection in - the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html). - -### Troubleshooting - -To troubleshoot issues with the `changefinder` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `changefinder` module in debug mode: - -```bash -./python.d.plugin changefinder debug trace -``` - diff --git a/collectors/python.d.plugin/changefinder/README.md b/collectors/python.d.plugin/changefinder/README.md new file mode 120000 index 00000000000000..0ca704eb1f491f --- /dev/null +++ b/collectors/python.d.plugin/changefinder/README.md @@ -0,0 +1 @@ +integrations/python.d_changefinder.md \ No newline at end of file diff --git a/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md b/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md new file mode 100644 index 00000000000000..c338c93741a55a --- /dev/null +++ b/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md @@ -0,0 +1,217 @@ + + +# python.d changefinder + +Plugin: python.d.plugin +Module: changefinder + + + +## Overview + +This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to +perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection) +on your Netdata charts and/or dimensions. + + +Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example). +### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's + typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly + this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw + score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have + already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then + should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning + approaches which need some initial window of time before they can be useful. +- As this collector does most of the work in Python itself, you may want to try it out first on a test or development + system to get a sense of its performance characteristics on a node similar to where you would like to use it. +- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the + typical performance characteristics we saw from running this collector (with defaults) were: + - A runtime (`netdata.runtime_changefinder`) of ~30ms. + - Typically ~1% additional cpu usage. + - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default this collector will work over all `system.*` charts. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per python.d changefinder instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| changefinder.scores | a dimension per chart | score | +| changefinder.flags | a dimension per chart | flag | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Python Requirements + +This collector will only work with Python 3 and requires the packages below be installed. + +```bash +# become netdata user +sudo su -s /bin/bash netdata +# install required packages for the netdata user +pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4 +``` + +**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section +of your `netdata.conf` file. + +```yaml +[ plugin:python.d ] + # update every = 1 + command options = -ppython3 +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/changefinder.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/changefinder.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes | +| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no | +| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes | +| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no | +| cf_order | default parameters that can be passed to the changefinder library. | 1 | no | +| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no | +| cf_threshold | the percentile above which scores will be flagged. | 99 | no | +| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no | +| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no | + +
+ +#### Examples + +##### Default + +Default configuration. + +```yaml +local: + name: 'local' + host: '127.0.0.1:19999' + charts_regex: 'system\..*' + charts_to_exclude: '' + mode: 'per_chart' + cf_r: 0.5 + cf_order: 1 + cf_smooth: 15 + cf_threshold: 99 + n_score_samples: 14400 + show_scores: false + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin changefinder debug trace + ``` + +### Debug Mode + + + +### Log Messages + + + + diff --git a/collectors/python.d.plugin/changefinder/metadata.yaml b/collectors/python.d.plugin/changefinder/metadata.yaml index 6dcd903e72a890..170d9146a117ec 100644 --- a/collectors/python.d.plugin/changefinder/metadata.yaml +++ b/collectors/python.d.plugin/changefinder/metadata.yaml @@ -5,55 +5,187 @@ modules: module_name: changefinder monitored_instance: name: python.d changefinder - link: '' + link: "" categories: - data-collection.other - icon_filename: '' + icon_filename: "" related_resources: integrations: list: [] info_provided_to_referring_integrations: - description: '' - keywords: [] + description: "" + keywords: + - change detection + - anomaly detection + - machine learning + - ml most_popular: false overview: data_collection: - metrics_description: '' - method_description: '' + metrics_description: | + This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to + perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection) + on your Netdata charts and/or dimensions. + method_description: > + Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a + changepoint score for each chart or dimension you configure it to work on. This is + an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step + to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap + to compute at each step of data collection (see the notes section below for more details) and it should scale fairly + well to work on lots of charts or hosts (if running on a parent node for example). + + ### Notes + - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's + typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly + this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw + score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have + already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then + should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning + approaches which need some initial window of time before they can be useful. + - As this collector does most of the work in Python itself, you may want to try it out first on a test or development + system to get a sense of its performance characteristics on a node similar to where you would like to use it. + - On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the + typical performance characteristics we saw from running this collector (with defaults) were: + - A runtime (`netdata.runtime_changefinder`) of ~30ms. + - Typically ~1% additional cpu usage. + - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration. supported_platforms: include: [] exclude: [] multi_instance: true additional_permissions: - description: '' + description: "" default_behavior: auto_detection: - description: '' + description: "By default this collector will work over all `system.*` charts." limits: - description: '' + description: "" performance_impact: - description: '' + description: "" setup: prerequisites: - list: [] + list: + - title: Python Requirements + description: | + This collector will only work with Python 3 and requires the packages below be installed. + + ```bash + # become netdata user + sudo su -s /bin/bash netdata + # install required packages for the netdata user + pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4 + ``` + + **Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section + of your `netdata.conf` file. + + ```yaml + [ plugin:python.d ] + # update every = 1 + command options = -ppython3 + ``` configuration: file: - name: '' - description: '' + name: python.d/changefinder.conf + description: "" options: - description: '' + description: | + There are 2 sections: + + * Global variables + * One or more JOBS that can define multiple different instances to monitor. + + The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + + Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + + Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. folding: - title: '' + title: "Config options" enabled: true - list: [] + list: + - name: charts_regex + description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc. + default_value: "system\\..*" + required: true + - name: charts_to_exclude + description: | + charts to exclude, useful if you would like to exclude some specific charts. + note: should be a ',' separated string like 'chart.name,chart.name'. + default_value: "" + required: false + - name: mode + description: get ChangeFinder scores 'per_dim' or 'per_chart'. + default_value: "per_chart" + required: true + - name: cf_r + description: default parameters that can be passed to the changefinder library. + default_value: 0.5 + required: false + - name: cf_order + description: default parameters that can be passed to the changefinder library. + default_value: 1 + required: false + - name: cf_smooth + description: default parameters that can be passed to the changefinder library. + default_value: 15 + required: false + - name: cf_threshold + description: the percentile above which scores will be flagged. + default_value: 99 + required: false + - name: n_score_samples + description: the number of recent scores to use when calculating the percentile of the changefinder score. + default_value: 14400 + required: false + - name: show_scores + description: | + set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) + default_value: false + required: false examples: folding: enabled: true - title: '' - list: [] + title: "Config" + list: + - name: Default + description: Default configuration. + folding: + enabled: false + config: | + local: + name: 'local' + host: '127.0.0.1:19999' + charts_regex: 'system\..*' + charts_to_exclude: '' + mode: 'per_chart' + cf_r: 0.5 + cf_order: 1 + cf_smooth: 15 + cf_threshold: 99 + n_score_samples: 14400 + show_scores: false troubleshooting: problems: - list: [] + list: + - name: "Debug Mode" + description: | + If you would like to log in as `netdata` user and run the collector in debug mode to see more detail. + + ```bash + # become netdata user + sudo su -s /bin/bash netdata + # run collector in debug using `nolock` option if netdata is already running the collector itself. + /usr/libexec/netdata/plugins.d/python.d.plugin changefinder debug trace nolock + ``` + - name: "Log Messages" + description: | + To see any relevant log messages you can use a command like below. + + ```bash + grep 'changefinder' /var/log/netdata/error.log + grep 'changefinder' /var/log/netdata/collector.log + ``` alerts: [] metrics: folding: diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md deleted file mode 100644 index 2397b74789c319..00000000000000 --- a/collectors/python.d.plugin/dovecot/README.md +++ /dev/null @@ -1,128 +0,0 @@ - - -# Dovecot collector - -Provides statistics information from Dovecot server. - -Statistics are taken from dovecot socket by executing `EXPORT global` command. -More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics) - -Module isn't compatible with new statistic api (v2.3), but you are still able to use the module with Dovecot v2.3 -by following [upgrading steps.](https://wiki2.dovecot.org/Upgrading/2.3). - -**Requirement:** -Dovecot UNIX socket with R/W permissions for user `netdata` or Dovecot with configured TCP/IP socket. - -Module gives information with following charts: - -1. **sessions** - - - active sessions - -2. **logins** - - - logins - -3. **commands** - number of IMAP commands - - - commands - -4. **Faults** - - - minor - - major - -5. **Context Switches** - - - voluntary - - involuntary - -6. **disk** in bytes/s - - - read - - write - -7. **bytes** in bytes/s - - - read - - write - -8. **number of syscalls** in syscalls/s - - - read - - write - -9. **lookups** - number of lookups per second - - - path - - attr - -10. **hits** - number of cache hits - - - hits - -11. **attempts** - authorization attempts - - - success - - failure - -12. **cache** - cached authorization hits - - - hit - - miss - -## Configuration - -Edit the `python.d/dovecot.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/dovecot.conf -``` - -Sample: - -```yaml -localtcpip: - name : 'local' - host : '127.0.0.1' - port : 24242 - -localsocket: - name : 'local' - socket : '/var/run/dovecot/stats' -``` - -If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats` - - - - -### Troubleshooting - -To troubleshoot issues with the `dovecot` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `dovecot` module in debug mode: - -```bash -./python.d.plugin dovecot debug trace -``` - diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md new file mode 120000 index 00000000000000..c4749cedce0686 --- /dev/null +++ b/collectors/python.d.plugin/dovecot/README.md @@ -0,0 +1 @@ +integrations/dovecot.md \ No newline at end of file diff --git a/collectors/python.d.plugin/dovecot/integrations/dovecot.md b/collectors/python.d.plugin/dovecot/integrations/dovecot.md new file mode 100644 index 00000000000000..4e7952765057fe --- /dev/null +++ b/collectors/python.d.plugin/dovecot/integrations/dovecot.md @@ -0,0 +1,197 @@ + + +# Dovecot + + + + + +Plugin: python.d.plugin +Module: dovecot + + + +## Overview + +This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more. + +It uses the dovecot socket and executes the `EXPORT global` command to get the statistics. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +If no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats` + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Dovecot instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| dovecot.sessions | active sessions | number | +| dovecot.logins | logins | number | +| dovecot.commands | commands | commands | +| dovecot.faults | minor, major | faults | +| dovecot.context_switches | voluntary, involuntary | switches | +| dovecot.io | read, write | KiB/s | +| dovecot.net | read, write | kilobits/s | +| dovecot.syscalls | read, write | syscalls/s | +| dovecot.lookup | path, attr | number/s | +| dovecot.cache | hits | hits/s | +| dovecot.auth | ok, failed | attempts | +| dovecot.auth_cache | hit, miss | number | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Dovecot configuration + +The Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket. + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/dovecot.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/dovecot.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no | +| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no | +| port | Used in combination with host, configures the port devcot listens to. | | no | + +
+ +#### Examples + +##### Local TCP + +A basic TCP configuration. + +
Config + +```yaml +localtcpip: + name: 'local' + host: '127.0.0.1' + port: 24242 + +``` +
+ +##### Local socket + +A basic local socket configuration + +
Config + +```yaml +localsocket: + name: 'local' + socket: '/var/run/dovecot/stats' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin dovecot debug trace + ``` + + diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md deleted file mode 100644 index 63ec7a2985a6d5..00000000000000 --- a/collectors/python.d.plugin/example/README.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# Example module in Python - -You can add custom data collectors using Python. - -Netdata provides an [example python data collection module](https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/example). - -If you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial. - - -### Troubleshooting - -To troubleshoot issues with the `example` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `example` module in debug mode: - -```bash -./python.d.plugin example debug trace -``` - diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md new file mode 120000 index 00000000000000..55877a99ab64d6 --- /dev/null +++ b/collectors/python.d.plugin/example/README.md @@ -0,0 +1 @@ +integrations/example_collector.md \ No newline at end of file diff --git a/collectors/python.d.plugin/example/integrations/example_collector.md b/collectors/python.d.plugin/example/integrations/example_collector.md new file mode 100644 index 00000000000000..7dded67ba95832 --- /dev/null +++ b/collectors/python.d.plugin/example/integrations/example_collector.md @@ -0,0 +1,171 @@ + + +# Example collector + +Plugin: python.d.plugin +Module: example + + + +## Overview + +Example collector that generates some random numbers as metrics. + +If you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial. + + +The `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Example collector instance + +These metrics refer to the entire monitored application. + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| example.random | random | number | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/example.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/example.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| num_lines | The number of lines to create. | 4 | no | +| lower | The lower bound of numbers to randomly sample from. | 0 | no | +| upper | The upper bound of numbers to randomly sample from. | 100 | no | +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +four_lines: + name: "Four Lines" + update_every: 1 + priority: 60000 + penalty: yes + autodetection_retry: 0 + num_lines: 4 + lower: 0 + upper: 100 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin example debug trace + ``` + + diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md deleted file mode 100644 index bc00ab7c6e1566..00000000000000 --- a/collectors/python.d.plugin/exim/README.md +++ /dev/null @@ -1,64 +0,0 @@ - - -# Exim collector - -Simple module executing `exim -bpc` to grab exim queue. -This command can take a lot of time to finish its execution thus it is not recommended to run it every second. - -## Requirements - -The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`. - -1. Edit the `exim` configuration with your preferred editor and add: -`queue_list_requires_admin = false` -2. Restart `exim` and Netdata - -*WHM (CPanel) server* - -On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps. - -1. Login to WHM -2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor -3. Scroll down to the button **Add additional configuration setting** and click on it. -4. In the new dropdown which will appear above we need to find and choose: -`queue_list_requires_admin` and set to `false` -5. Scroll to the end and click the **Save** button. - -It produces only one chart: - -1. **Exim Queue Emails** - - - emails - -Configuration is not needed. - - - - -### Troubleshooting - -To troubleshoot issues with the `exim` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `exim` module in debug mode: - -```bash -./python.d.plugin exim debug trace -``` - diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md new file mode 120000 index 00000000000000..f1f2ef9f927dd8 --- /dev/null +++ b/collectors/python.d.plugin/exim/README.md @@ -0,0 +1 @@ +integrations/exim.md \ No newline at end of file diff --git a/collectors/python.d.plugin/exim/integrations/exim.md b/collectors/python.d.plugin/exim/integrations/exim.md new file mode 100644 index 00000000000000..f0ae33d3eeb881 --- /dev/null +++ b/collectors/python.d.plugin/exim/integrations/exim.md @@ -0,0 +1,181 @@ + + +# Exim + + + + + +Plugin: python.d.plugin +Module: exim + + + +## Overview + +This collector monitors Exim mail queue. + +It uses the `exim` command line binary to get the statistics. + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +Assuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Exim instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exim.qemails | emails | emails | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Exim configuration - local installation + +The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`. + +1. Edit the `exim` configuration with your preferred editor and add: +`queue_list_requires_admin = false` +2. Restart `exim` and Netdata + + +#### Exim configuration - WHM (CPanel) server + +On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps. + +1. Login to WHM +2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor +3. Scroll down to the button **Add additional configuration setting** and click on it. +4. In the new dropdown which will appear above we need to find and choose: +`queue_list_requires_admin` and set to `false` +5. Scroll to the end and click the **Save** button. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/exim.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/exim.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| command | Path and command to the `exim` binary | exim -bpc | no | + +
+ +#### Examples + +##### Local exim install + +A basic local exim install + +```yaml +local: + command: 'exim -bpc' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin exim debug trace + ``` + + diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md deleted file mode 100644 index 41276d5f760558..00000000000000 --- a/collectors/python.d.plugin/fail2ban/README.md +++ /dev/null @@ -1,105 +0,0 @@ - - -# Fail2ban collector - -Monitors the fail2ban log file to show all bans for all active jails. - -## Requirements - -The `fail2ban.log` file must be readable by the user `netdata`: - -- change the file ownership and access permissions. -- update `/etc/logrotate.d/fail2ban` to persists the changes after rotating the log file. - -
- Click to expand the instruction. - -To change the file ownership and access permissions, execute the following: - -```shell -sudo chown root:netdata /var/log/fail2ban.log -sudo chmod 640 /var/log/fail2ban.log -``` - -To persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`: - -```shell -/var/log/fail2ban.log { - - weekly - rotate 4 - compress - - delaycompress - missingok - postrotate - fail2ban-client flushlogs 1>/dev/null - endscript - - # If fail2ban runs as non-root it still needs to have write access - # to logfiles. - # create 640 fail2ban adm - create 640 root netdata -} -``` - -
- -## Charts - -- Failed attempts in attempts/s -- Bans in bans/s -- Banned IP addresses (since the last restart of netdata) in ips - -## Configuration - -Edit the `python.d/fail2ban.conf` configuration file using `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/fail2ban.conf -``` - -Sample: - -```yaml -local: - log_path: '/var/log/fail2ban.log' - conf_path: '/etc/fail2ban/jail.local' - exclude: 'dropbear apache' -``` - -If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file -at `/etc/fail2ban/jail.local`. If conf file is not found default jail is `ssh`. - - - - -### Troubleshooting - -To troubleshoot issues with the `fail2ban` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `fail2ban` module in debug mode: - -```bash -./python.d.plugin fail2ban debug trace -``` - diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md new file mode 120000 index 00000000000000..642a8bcf533105 --- /dev/null +++ b/collectors/python.d.plugin/fail2ban/README.md @@ -0,0 +1 @@ +integrations/fail2ban.md \ No newline at end of file diff --git a/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md b/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md new file mode 100644 index 00000000000000..a7116be5e47067 --- /dev/null +++ b/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md @@ -0,0 +1,209 @@ + + +# Fail2ban + + + + + +Plugin: python.d.plugin +Module: fail2ban + + + +## Overview + +Monitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security. + + +It collects metrics through reading the default log and configuration files of fail2ban. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The `fail2ban.log` file must be readable by the user `netdata`. + - change the file ownership and access permissions. + - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file. + +To change the file ownership and access permissions, execute the following: + +```shell +sudo chown root:netdata /var/log/fail2ban.log +sudo chmod 640 /var/log/fail2ban.log +``` + +To persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`: + +```shell +/var/log/fail2ban.log { + + weekly + rotate 4 + compress + + delaycompress + missingok + postrotate + fail2ban-client flushlogs 1>/dev/null + endscript + + # If fail2ban runs as non-root it still needs to have write access + # to logfiles. + # create 640 fail2ban adm + create 640 root netdata +} +``` + + +### Default Behavior + +#### Auto-Detection + +By default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local. +If conf file is not found default jail is ssh. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Fail2ban instance + +These metrics refer to the entire monitored application. + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| fail2ban.failed_attempts | a dimension per jail | attempts/s | +| fail2ban.bans | a dimension per jail | bans/s | +| fail2ban.banned_ips | a dimension per jail | ips | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/fail2ban.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/fail2ban.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no | +| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no | +| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no | +| exclude | jails you want to exclude from autodetection. | | no | +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +local: + log_path: '/var/log/fail2ban.log' + conf_path: '/etc/fail2ban/jail.local' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `fail2ban` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin fail2ban debug trace + ``` + +### Debug Mode + + + + diff --git a/collectors/python.d.plugin/fail2ban/metadata.yaml b/collectors/python.d.plugin/fail2ban/metadata.yaml index 80aa68b624b472..61f762679c9cda 100644 --- a/collectors/python.d.plugin/fail2ban/metadata.yaml +++ b/collectors/python.d.plugin/fail2ban/metadata.yaml @@ -35,29 +35,29 @@ modules: The `fail2ban.log` file must be readable by the user `netdata`. - change the file ownership and access permissions. - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file. - + To change the file ownership and access permissions, execute the following: - + ```shell sudo chown root:netdata /var/log/fail2ban.log sudo chmod 640 /var/log/fail2ban.log ``` - + To persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`: - + ```shell /var/log/fail2ban.log { - + weekly rotate 4 compress - + delaycompress missingok postrotate fail2ban-client flushlogs 1>/dev/null endscript - + # If fail2ban runs as non-root it still needs to have write access # to logfiles. # create 640 fail2ban adm @@ -67,7 +67,8 @@ modules: default_behavior: auto_detection: description: | - By default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local. If conf file is not found default jail is ssh. + By default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local. + If conf file is not found default jail is ssh. limits: description: "" performance_impact: @@ -77,19 +78,19 @@ modules: list: [] configuration: file: - name: "" + name: python.d/fail2ban.conf description: "" options: description: | There are 2 sections: - + * Global variables * One or more JOBS that can define multiple different instances to monitor. - + The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - + Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - + Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. folding: title: Config options @@ -146,7 +147,26 @@ modules: conf_path: '/etc/fail2ban/jail.local' troubleshooting: problems: - list: [] + list: + - name: Debug Mode + description: | + To troubleshoot issues with the `fail2ban` module, run the `python.d.plugin` with the debug option enabled. + The output will give you the output of the data collection job or error messages on why the collector isn't working. + + First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's + not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the + plugin's directory, switch to the `netdata` user. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + sudo su -s /bin/bash netdata + ``` + + Now you can manually run the `fail2ban` module in debug mode: + + ```bash + ./python.d.plugin fail2ban debug trace + ``` alerts: [] metrics: folding: diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md deleted file mode 100644 index 329c34726c5d7c..00000000000000 --- a/collectors/python.d.plugin/gearman/README.md +++ /dev/null @@ -1,73 +0,0 @@ - - -# Gearman collector - -Monitors Gearman worker statistics. A chart is shown for each job as well as one showing a summary of all workers. - -Note: Charts may show as a line graph rather than an area -graph if you load Netdata with no jobs running. To change -this go to "Settings" > "Which dimensions to show?" and -select "All". - -Plugin can obtain data from tcp socket **OR** unix socket. - -**Requirement:** -Socket MUST be readable by netdata user. - -It produces: - - * Workers queued - * Workers idle - * Workers running - -## Configuration - -Edit the `python.d/gearman.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/gearman.conf -``` - -```yaml -localhost: - name : 'local' - host : 'localhost' - port : 4730 - - # TLS information can be provided as well - tls : no - cert : /path/to/cert - key : /path/to/key -``` - -When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:4730`. - -### Troubleshooting - -To troubleshoot issues with the `gearman` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `gearman` module in debug mode: - -```bash -./python.d.plugin gearman debug trace -``` - diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md new file mode 120000 index 00000000000000..70189d69860f35 --- /dev/null +++ b/collectors/python.d.plugin/gearman/README.md @@ -0,0 +1 @@ +integrations/gearman.md \ No newline at end of file diff --git a/collectors/python.d.plugin/gearman/integrations/gearman.md b/collectors/python.d.plugin/gearman/integrations/gearman.md new file mode 100644 index 00000000000000..3923d14017749c --- /dev/null +++ b/collectors/python.d.plugin/gearman/integrations/gearman.md @@ -0,0 +1,210 @@ + + +# Gearman + + + + + +Plugin: python.d.plugin +Module: gearman + + + +## Overview + +Monitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management. + +This collector connects to a Gearman instance via either TCP or unix socket. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +When no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Gearman instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| gearman.total_jobs | Pending, Running | Jobs | + +### Per gearman job + +Metrics related to Gearman jobs. Each job produces its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| gearman.single_job | Pending, Idle, Runnning | Jobs | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes | + + +## Setup + +### Prerequisites + +#### Socket permissions + +The gearman UNIX socket should have read permission for user netdata. + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/gearman.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/gearman.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| host | URL or IP where gearman is running. | localhost | no | +| port | Port of URL or IP where gearman is running. | 4730 | no | +| tls | Use tls to connect to gearman. | false | no | +| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no | +| key | Provide a key file if needed to connect to a TLS gearman instance. | | no | + +
+ +#### Examples + +##### Local gearman service + +A basic host and port gearman configuration for localhost. + +```yaml +localhost: + name: 'local' + host: 'localhost' + port: 4730 + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +localhost: + name: 'local' + host: 'localhost' + port: 4730 + +remote: + name: 'remote' + host: '192.0.2.1' + port: 4730 + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin gearman debug trace + ``` + + diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md deleted file mode 100644 index f86fa6d04e53a7..00000000000000 --- a/collectors/python.d.plugin/go_expvar/README.md +++ /dev/null @@ -1,342 +0,0 @@ - - -# Go applications collector - -Monitors Go application that exposes its metrics with the use of `expvar` package from the Go standard library. The package produces charts for Go runtime memory statistics and optionally any number of custom charts. - -The `go_expvar` module produces the following charts: - -1. **Heap allocations** in kB - - - alloc: size of objects allocated on the heap - - inuse: size of allocated heap spans - -2. **Stack allocations** in kB - - - inuse: size of allocated stack spans - -3. **MSpan allocations** in kB - - - inuse: size of allocated mspan structures - -4. **MCache allocations** in kB - - - inuse: size of allocated mcache structures - -5. **Virtual memory** in kB - - - sys: size of reserved virtual address space - -6. **Live objects** - - - live: number of live objects in memory - -7. **GC pauses average** in ns - - - avg: average duration of all GC stop-the-world pauses - -## Monitoring Go applications - -Netdata can be used to monitor running Go applications that expose their metrics with -the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library. - -The `expvar` package exposes these metrics over HTTP and is very easy to use. -Consider this minimal sample below: - -```go -package main - -import ( - _ "expvar" - "net/http" -) - -func main() { - http.ListenAndServe("127.0.0.1:8080", nil) -} -``` - -When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that -exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening -the URL in your browser (or by using `wget` or `curl`). - -Sample output: - -```json -{ -"cmdline": ["./expvar-demo-binary"], -"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, } -} -``` - -You can of course expose and monitor your own variables as well. -Here is a sample Go application that exposes a few custom variables: - -```go -package main - -import ( - "expvar" - "net/http" - "runtime" - "time" -) - -func main() { - - tick := time.NewTicker(1 * time.Second) - num_go := expvar.NewInt("runtime.goroutines") - counters := expvar.NewMap("counters") - counters.Set("cnt1", new(expvar.Int)) - counters.Set("cnt2", new(expvar.Float)) - - go http.ListenAndServe(":8080", nil) - - for { - select { - case <- tick.C: - num_go.Set(int64(runtime.NumGoroutine())) - counters.Add("cnt1", 1) - counters.AddFloat("cnt2", 1.452) - } - } -} -``` - -Apart from the runtime memory stats, this application publishes two counters and the -number of currently running Goroutines and updates these stats every second. - -In the next section, we will cover how to monitor and chart these exposed stats with -the use of `netdata`s `go_expvar` module. - -### Using Netdata go_expvar module - -The `go_expvar` module is disabled by default. To enable it, edit `python.d.conf` (to edit it on your system run -`/etc/netdata/edit-config python.d.conf`), and change the `go_expvar` variable to `yes`: - -``` -# Enable / Disable python.d.plugin modules -#default_run: yes -# -# If "default_run" = "yes" the default for all modules is enabled (yes). -# Setting any of these to "no" will disable it. -# -# If "default_run" = "no" the default for all modules is disabled (no). -# Setting any of these to "yes" will enable it. -... -go_expvar: yes -... -``` - -Next, we need to edit the module configuration file (found at `/etc/netdata/python.d/go_expvar.conf` by default) (to -edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`). The module configuration consists of -jobs, where each job can be used to monitor a separate Go application. Let's see a sample job configuration: - -``` -# /etc/netdata/python.d/go_expvar.conf - -app1: - name : 'app1' - url : 'http://127.0.0.1:8080/debug/vars' - collect_memstats: true - extra_charts: {} -``` - -Let's go over each of the defined options: - -``` -name: 'app1' -``` - -This is the job name that will appear at the Netdata dashboard. -If not defined, the job_name (top level key) will be used. - -``` -url: 'http://127.0.0.1:8080/debug/vars' -``` - -This is the URL of the expvar endpoint. As the expvar handler can be installed -in a custom path, the whole URL has to be specified. This value is mandatory. - -``` -collect_memstats: true -``` - -Whether to enable collecting stats about Go runtime's memory. You can find more -information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats). - -``` -extra_charts: {} -``` - -Enables the user to specify custom expvars to monitor and chart. -Will be explained in more detail below. - -**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will -disable itself, as there will be no data to collect!** - -Apart from these options, each job supports options inherited from Netdata's `python.d.plugin` -and its base `UrlService` class. These are: - -``` -update_every: 1 # the job's data collection frequency -priority: 60000 # the job's order on the dashboard -user: admin # use when the expvar endpoint is protected by HTTP Basic Auth -password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth -``` - -### Monitoring custom vars with go_expvar - -Now, memory stats might be useful, but what if you want Netdata to monitor some custom values -that your Go application exposes? The `go_expvar` module can do that as well with the use of -the `extra_charts` configuration variable. - -The `extra_charts` variable is a YaML list of Netdata chart definitions. -Each chart definition has the following keys: - -``` -id: Netdata chart ID -options: a key-value mapping of chart options -lines: a list of line definitions -``` - -**Note: please do not use dots in the chart or line ID field. -See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.** - -Please see these two links to the official Netdata documentation for more information about the values: - -- [External plugins - charts](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#chart) -- [Chart variables](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#global-variables-order-and-chart) - -**Line definitions** - -Each chart can define multiple lines (dimensions). -A line definition is a key-value mapping of line options. -Each line can have the following options: - -``` -# mandatory -expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint -expvar_type: value type; supported are "float" or "int" -id: the id of this line/dimension in Netdata - -# optional - Netdata defaults are used if these options are not defined -name: '' -algorithm: absolute -multiplier: 1 -divisor: 100 if expvar_type == float, 1 if expvar_type == int -hidden: False -``` - -Please see the following link for more information about the options and their default values: -[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#dimension) - -Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map; -All dicts in the resulting JSON document are then flattened to one level. -Expvar names are joined together with '.' when flattening. - -Example: - -``` -{ - "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983}, - "runtime.goroutines": 5 -} -``` - -In the above case, the exported variables will be available under `runtime.goroutines`, -`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision, -the first defined key wins and all subsequent keys with the same name are ignored. - -## Enable the collector - -The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d.conf -``` - -Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl -restart netdata`, or the appropriate method for your system, to finish enabling the `go_expvar` collector. - -## Configuration - -Edit the `python.d/go_expvar.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/go_expvar.conf -``` - -The configuration below matches the second Go application described above. -Netdata will monitor and chart memory stats for the application, as well as a custom chart of -running goroutines and two dummy counters. - -``` -app1: - name : 'app1' - url : 'http://127.0.0.1:8080/debug/vars' - collect_memstats: true - extra_charts: - - id: "runtime_goroutines" - options: - name: num_goroutines - title: "runtime: number of goroutines" - units: goroutines - family: runtime - context: expvar.runtime.goroutines - chart_type: line - lines: - - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines} - - id: "foo_counters" - options: - name: counters - title: "some random counters" - units: awesomeness - family: counters - context: expvar.foo.counters - chart_type: line - lines: - - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1} - - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2} -``` - -**Netdata charts example** - -The images below show how do the final charts in Netdata look. - -![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png) - -![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png) - - -### Troubleshooting - -To troubleshoot issues with the `go_expvar` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `go_expvar` module in debug mode: - -```bash -./python.d.plugin go_expvar debug trace -``` - diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md new file mode 120000 index 00000000000000..f28a82f343c268 --- /dev/null +++ b/collectors/python.d.plugin/go_expvar/README.md @@ -0,0 +1 @@ +integrations/go_applications_expvar.md \ No newline at end of file diff --git a/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md b/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md new file mode 100644 index 00000000000000..8d61fa2ae9fe99 --- /dev/null +++ b/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md @@ -0,0 +1,335 @@ + + +# Go applications (EXPVAR) + + + + + +Plugin: python.d.plugin +Module: go_expvar + + + +## Overview + +This collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts. + +It connects via http to gather the metrics exposed via the `expvar` package. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Go applications (EXPVAR) instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| expvar.memstats.heap | alloc, inuse | KiB | +| expvar.memstats.stack | inuse | KiB | +| expvar.memstats.mspan | inuse | KiB | +| expvar.memstats.mcache | inuse | KiB | +| expvar.memstats.live_objects | live | objects | +| expvar.memstats.sys | sys | KiB | +| expvar.memstats.gc_pauses | avg | ns | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable the go_expvar collector + +The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory, if different +sudo ./edit-config python.d.conf +``` + +Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. + + +#### Sample `expvar` usage in a Go application + +The `expvar` package exposes metrics over HTTP and is very easy to use. +Consider this minimal sample below: + +```go +package main + +import ( + _ "expvar" + "net/http" +) + +func main() { + http.ListenAndServe("127.0.0.1:8080", nil) +} +``` + +When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that +exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening +the URL in your browser (or by using `wget` or `curl`). + +Sample output: + +```json +{ +"cmdline": ["./expvar-demo-binary"], +"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, } +} +``` + +You can of course expose and monitor your own variables as well. +Here is a sample Go application that exposes a few custom variables: + +```go +package main + +import ( + "expvar" + "net/http" + "runtime" + "time" +) + +func main() { + + tick := time.NewTicker(1 * time.Second) + num_go := expvar.NewInt("runtime.goroutines") + counters := expvar.NewMap("counters") + counters.Set("cnt1", new(expvar.Int)) + counters.Set("cnt2", new(expvar.Float)) + + go http.ListenAndServe(":8080", nil) + + for { + select { + case <- tick.C: + num_go.Set(int64(runtime.NumGoroutine())) + counters.Add("cnt1", 1) + counters.AddFloat("cnt2", 1.452) + } + } +} +``` + +Apart from the runtime memory stats, this application publishes two counters and the +number of currently running Goroutines and updates these stats every second. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/go_expvar.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/go_expvar.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes | +| user | If the URL is password protected, this is the username to use. | | no | +| pass | If the URL is password protected, this is the password to use. | | no | +| collect_memstats | Enables charts for Go runtime's memory statistics. | | no | +| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no | + +
+ +#### Examples + +##### Monitor a Go app1 application + +The example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second. + +The `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable. + +The `extra_charts` variable is a YaML list of Netdata chart definitions. +Each chart definition has the following keys: + +``` +id: Netdata chart ID +options: a key-value mapping of chart options +lines: a list of line definitions +``` + +**Note: please do not use dots in the chart or line ID field. +See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.** + +Please see these two links to the official Netdata documentation for more information about the values: + +- [External plugins - charts](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#chart) +- [Chart variables](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#global-variables-order-and-chart) + +**Line definitions** + +Each chart can define multiple lines (dimensions). +A line definition is a key-value mapping of line options. +Each line can have the following options: + +``` +# mandatory +expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint +expvar_type: value type; supported are "float" or "int" +id: the id of this line/dimension in Netdata + +# optional - Netdata defaults are used if these options are not defined +name: '' +algorithm: absolute +multiplier: 1 +divisor: 100 if expvar_type == float, 1 if expvar_type == int +hidden: False +``` + +Please see the following link for more information about the options and their default values: +[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#dimension) + +Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map; +All dicts in the resulting JSON document are then flattened to one level. +Expvar names are joined together with '.' when flattening. + +Example: + +``` +{ + "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983}, + "runtime.goroutines": 5 +} +``` + +In the above case, the exported variables will be available under `runtime.goroutines`, +`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision, +the first defined key wins and all subsequent keys with the same name are ignored. + + +```yaml +app1: + name : 'app1' + url : 'http://127.0.0.1:8080/debug/vars' + collect_memstats: true + extra_charts: + - id: "runtime_goroutines" + options: + name: num_goroutines + title: "runtime: number of goroutines" + units: goroutines + family: runtime + context: expvar.runtime.goroutines + chart_type: line + lines: + - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines} + - id: "foo_counters" + options: + name: counters + title: "some random counters" + units: awesomeness + family: counters + context: expvar.foo.counters + chart_type: line + lines: + - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1} + - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2} + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin go_expvar debug trace + ``` + + diff --git a/collectors/python.d.plugin/go_expvar/metadata.yaml b/collectors/python.d.plugin/go_expvar/metadata.yaml index 92669dd9c087bc..9419b024a654eb 100644 --- a/collectors/python.d.plugin/go_expvar/metadata.yaml +++ b/collectors/python.d.plugin/go_expvar/metadata.yaml @@ -4,7 +4,7 @@ modules: plugin_name: python.d.plugin module_name: go_expvar monitored_instance: - name: Go applications + name: Go applications (EXPVAR) link: "https://pkg.go.dev/expvar" categories: - data-collection.apm @@ -39,6 +39,16 @@ modules: setup: prerequisites: list: + - title: "Enable the go_expvar collector" + description: | + The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. + + ```bash + cd /etc/netdata # Replace this path with your Netdata config directory, if different + sudo ./edit-config python.d.conf + ``` + + Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - title: "Sample `expvar` usage in a Go application" description: | The `expvar` package exposes metrics over HTTP and is very easy to use. diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py index 6f94c9a0712cd3..f412febb7849d4 100644 --- a/collectors/python.d.plugin/haproxy/haproxy.chart.py +++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py @@ -44,6 +44,7 @@ 'bctime', 'health_sup', 'health_sdown', + 'health_smaint', 'health_bdown', 'health_idle' ] @@ -167,6 +168,10 @@ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'], 'lines': [] }, + 'health_smaint': { + 'options': [None, 'Backend Servers In MAINT State', 'maintenance servers', 'health', 'haproxy_hs.maint', 'line'], + 'lines': [] + }, 'health_bdown': { 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'], 'lines': [] @@ -267,6 +272,8 @@ def _get_stat_data(self): if server_status(server, name, 'UP')]) stat_data['hsdown_' + idx] = len([server for server in self.data['servers'] if server_status(server, name, 'DOWN')]) + stat_data['hsmaint_' + idx] = len([server for server in self.data['servers'] + if server_status(server, name, 'MAINT')]) stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0 for metric in BACKEND_METRICS: stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0 @@ -321,6 +328,7 @@ def create_charts(self): BACKEND_METRICS[metric]['divisor']]) self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute']) self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute']) + self.definitions['health_smaint']['lines'].append(['hsmaint_' + idx, name, 'absolute']) self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute']) @@ -352,7 +360,7 @@ def is_server(server): def server_status(server, backend_name, status='DOWN'): - return server.get('# pxname') == backend_name and server.get('status') == status + return server.get('# pxname') == backend_name and server.get('status').partition(' ')[0] == status def url_remove_params(url): diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md deleted file mode 100644 index b42da734627250..00000000000000 --- a/collectors/python.d.plugin/hddtemp/README.md +++ /dev/null @@ -1,61 +0,0 @@ - - -# Hard drive temperature collector - -Monitors disk temperatures from one or more `hddtemp` daemons. - -**Requirement:** -Running `hddtemp` in daemonized mode with access on tcp port - -It produces one chart **Temperature** with dynamic number of dimensions (one per disk) - -## Configuration - -Edit the `python.d/hddtemp.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/hddtemp.conf -``` - -Sample: - -```yaml -update_every: 3 -host: "127.0.0.1" -port: 7634 -``` - -If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address - - - - -### Troubleshooting - -To troubleshoot issues with the `hddtemp` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `hddtemp` module in debug mode: - -```bash -./python.d.plugin hddtemp debug trace -``` - diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md new file mode 120000 index 00000000000000..95c7593f803357 --- /dev/null +++ b/collectors/python.d.plugin/hddtemp/README.md @@ -0,0 +1 @@ +integrations/hdd_temperature.md \ No newline at end of file diff --git a/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md b/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md new file mode 100644 index 00000000000000..4a1504f0777d41 --- /dev/null +++ b/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md @@ -0,0 +1,217 @@ + + +# HDD temperature + + + + + +Plugin: python.d.plugin +Module: hddtemp + + + +## Overview + +This collector monitors disk temperatures. + + +It uses the `hddtemp` daemon to gather the metrics. + + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634` + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per HDD temperature instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hddtemp.temperatures | a dimension per disk | Celsius | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Run `hddtemp` in daemon mode + +You can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument. + +So running `hddtemp -d` would run the daemon, by default on port 7634. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/hddtemp.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/hddtemp.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + +By default this collector will try to autodetect disks (autodetection works only for disk which names start with "sd"). However this can be overridden by setting the option `disks` to an array of desired disks. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no | +| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no | +| host | The IP or HOSTNAME to connect to. | localhost | yes | +| port | The port to connect to. | 7634 | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +localhost: + name: 'local' + host: '127.0.0.1' + port: 7634 + +``` +##### Custom disk names + +An example defining the disk names to detect. + +
Config + +```yaml +localhost: + name: 'local' + host: '127.0.0.1' + port: 7634 + devices: + - customdisk1 + - customdisk2 + +``` +
+ +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +localhost: + name: 'local' + host: '127.0.0.1' + port: 7634 + +remote_job: + name : 'remote' + host : 'http://192.0.2.1:2812' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `hddtemp` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin hddtemp debug trace + ``` + + diff --git a/collectors/python.d.plugin/hddtemp/metadata.yaml b/collectors/python.d.plugin/hddtemp/metadata.yaml index ee62dc96da4348..d8b56fc66ec9f8 100644 --- a/collectors/python.d.plugin/hddtemp/metadata.yaml +++ b/collectors/python.d.plugin/hddtemp/metadata.yaml @@ -105,7 +105,7 @@ modules: examples: folding: enabled: true - title: "" + title: "Config" list: - name: Basic description: A basic example configuration. diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md deleted file mode 100644 index 12b25047588d4b..00000000000000 --- a/collectors/python.d.plugin/hpssa/README.md +++ /dev/null @@ -1,106 +0,0 @@ - - -# HP Smart Storage Arrays collector - -Monitors controller, cache module, logical and physical drive state and temperature using `ssacli` tool. - -Executed commands: - -- `sudo -n ssacli ctrl all show config detail` - -## Requirements: - -This module uses `ssacli`, which can only be executed by root. It uses -`sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password. - -- Add to your `/etc/sudoers` file: - -`which ssacli` shows the full path to the binary. - -```bash -netdata ALL=(root) NOPASSWD: /path/to/ssacli -``` - -- Reset Netdata's systemd - unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux - distributions with systemd) - -The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`. - -As the `root` user, do the following: - -```cmd -mkdir /etc/systemd/system/netdata.service.d -echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf -systemctl daemon-reload -systemctl restart netdata.service -``` - -## Charts - -- Controller status -- Controller temperature -- Logical drive status -- Physical drive status -- Physical drive temperature - -## Enable the collector - -The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` -file. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d.conf -``` - -Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl -restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - -## Configuration - -Edit the `python.d/hpssa.conf` configuration file using `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/hpssa.conf -``` - -If `ssacli` cannot be found in the `PATH`, configure it in `hpssa.conf`. - -```yaml -ssacli_path: /usr/sbin/ssacli -``` - -Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate -method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - -### Troubleshooting - -To troubleshoot issues with the `hpssa` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `hpssa` module in debug mode: - -```bash -./python.d.plugin hpssa debug trace -``` - diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md new file mode 120000 index 00000000000000..82802d8b475b56 --- /dev/null +++ b/collectors/python.d.plugin/hpssa/README.md @@ -0,0 +1 @@ +integrations/hp_smart_storage_arrays.md \ No newline at end of file diff --git a/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md b/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md new file mode 100644 index 00000000000000..d46cc9065ea5f6 --- /dev/null +++ b/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md @@ -0,0 +1,205 @@ + + +# HP Smart Storage Arrays + + + + + +Plugin: python.d.plugin +Module: hpssa + + + +## Overview + +This collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures. + +It uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail` + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +If no configuration is provided, the collector will try to execute the `ssacli` binary. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per HP Smart Storage Arrays instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hpssa.ctrl_status | ctrl_{adapter slot}_status, cache_{adapter slot}_status, battery_{adapter slot}_status per adapter | Status | +| hpssa.ctrl_temperature | ctrl_{adapter slot}_temperature, cache_{adapter slot}_temperature per adapter | Celsius | +| hpssa.ld_status | a dimension per logical drive | Status | +| hpssa.pd_status | a dimension per physical drive | Status | +| hpssa.pd_temperature | a dimension per physical drive | Celsius | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable the hpssa collector + +The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory, if different +sudo ./edit-config python.d.conf +``` + +Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. + + +#### Allow user netdata to execute `ssacli` as root. + +This module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password. + +- Add to your `/etc/sudoers` file: + +`which ssacli` shows the full path to the binary. + +```bash +netdata ALL=(root) NOPASSWD: /path/to/ssacli +``` + +- Reset Netdata's systemd + unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux + distributions with systemd) + +The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`. + +As the `root` user, do the following: + +```cmd +mkdir /etc/systemd/system/netdata.service.d +echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf +systemctl daemon-reload +systemctl restart netdata.service +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/hpssa.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/hpssa.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no | +| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no | + +
+ +#### Examples + +##### Local simple config + +A basic configuration, specyfing the path to `ssacli` + +```yaml +local: + ssacli_path: /usr/sbin/ssacli + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `hpssa` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin hpssa debug trace + ``` + + diff --git a/collectors/python.d.plugin/hpssa/metadata.yaml b/collectors/python.d.plugin/hpssa/metadata.yaml index dc91f05e441de7..7871cc276635eb 100644 --- a/collectors/python.d.plugin/hpssa/metadata.yaml +++ b/collectors/python.d.plugin/hpssa/metadata.yaml @@ -40,6 +40,16 @@ modules: setup: prerequisites: list: + - title: 'Enable the hpssa collector' + description: | + The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. + + ```bash + cd /etc/netdata # Replace this path with your Netdata config directory, if different + sudo ./edit-config python.d.conf + ``` + + Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - title: 'Allow user netdata to execute `ssacli` as root.' description: | This module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password. diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md deleted file mode 100644 index 25bbf738e2c25a..00000000000000 --- a/collectors/python.d.plugin/icecast/README.md +++ /dev/null @@ -1,67 +0,0 @@ - - -# Icecast collector - -Monitors the number of listeners for active sources. - -## Requirements - -- icecast version >= 2.4.0 - -It produces the following charts: - -1. **Listeners** in listeners - -- source number - -## Configuration - -Edit the `python.d/icecast.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/icecast.conf -``` - -Needs only `url` to server's `/status-json.xsl` - -Here is an example for remote server: - -```yaml -remote: - url : 'http://1.2.3.4:8443/status-json.xsl' -``` - -Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl` - - - - -### Troubleshooting - -To troubleshoot issues with the `icecast` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `icecast` module in debug mode: - -```bash -./python.d.plugin icecast debug trace -``` - diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md new file mode 120000 index 00000000000000..db3c1b57286867 --- /dev/null +++ b/collectors/python.d.plugin/icecast/README.md @@ -0,0 +1 @@ +integrations/icecast.md \ No newline at end of file diff --git a/collectors/python.d.plugin/icecast/integrations/icecast.md b/collectors/python.d.plugin/icecast/integrations/icecast.md new file mode 100644 index 00000000000000..12d7d59ee855f1 --- /dev/null +++ b/collectors/python.d.plugin/icecast/integrations/icecast.md @@ -0,0 +1,166 @@ + + +# Icecast + + + + + +Plugin: python.d.plugin +Module: icecast + + + +## Overview + +This collector monitors Icecast listener counts. + +It connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +Without configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Icecast instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| icecast.listeners | a dimension for each active source | listeners | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Icecast minimum version + +Needs at least icecast version >= 2.4.0 + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/icecast.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/icecast.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no | +| user | Username to use to connect to `url` if it's password protected. | | no | +| pass | Password to use to connect to `url` if it's password protected. | | no | + +
+ +#### Examples + +##### Remote Icecast server + +Configure a remote icecast server + +```yaml +remote: + url: 'http://1.2.3.4:8443/status-json.xsl' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin icecast debug trace + ``` + + diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md deleted file mode 100644 index c990ae34f0ab15..00000000000000 --- a/collectors/python.d.plugin/ipfs/README.md +++ /dev/null @@ -1,74 +0,0 @@ - - -# IPFS collector - -Collects [`IPFS`](https://ipfs.io) basic information like file system bandwidth, peers and repo metrics. - -## Charts - -It produces the following charts: - -- Bandwidth in `kilobits/s` -- Peers in `peers` -- Repo Size in `GiB` -- Repo Objects in `objects` - -## Configuration - -Edit the `python.d/ipfs.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/ipfs.conf -``` - - - -Calls to the following endpoints are disabled due to `IPFS` bugs: - -- `/api/v0/stats/repo` (https://github.com/ipfs/go-ipfs/issues/3874) -- `/api/v0/pin/ls` (https://github.com/ipfs/go-ipfs/issues/7528) - -Can be enabled in the collector configuration file. - -The configuration needs only `url` to `IPFS` server, here is an example for 2 `IPFS` instances: - -```yaml -localhost: - url: 'http://localhost:5001' - -remote: - url: 'http://203.0.113.10::5001' -``` - - - - -### Troubleshooting - -To troubleshoot issues with the `ipfs` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `ipfs` module in debug mode: - -```bash -./python.d.plugin ipfs debug trace -``` - diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md new file mode 120000 index 00000000000000..eee6a07b2a0e8b --- /dev/null +++ b/collectors/python.d.plugin/ipfs/README.md @@ -0,0 +1 @@ +integrations/ipfs.md \ No newline at end of file diff --git a/collectors/python.d.plugin/ipfs/integrations/ipfs.md b/collectors/python.d.plugin/ipfs/integrations/ipfs.md new file mode 100644 index 00000000000000..77dc745aabf936 --- /dev/null +++ b/collectors/python.d.plugin/ipfs/integrations/ipfs.md @@ -0,0 +1,203 @@ + + +# IPFS + + + + + +Plugin: python.d.plugin +Module: ipfs + + + +## Overview + +This collector monitors IPFS server metrics about its quality and performance. + +It connects to an http endpoint of the IPFS server to collect the metrics + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +If the endpoint is accessible by the Agent, netdata will autodetect it + +#### Limits + +Calls to the following endpoints are disabled due to IPFS bugs: + +/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874) +/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528) + + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per IPFS instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ipfs.bandwidth | in, out | kilobits/s | +| ipfs.peers | peers | peers | +| ipfs.repo_size | avail, size | GiB | +| ipfs.repo_objects | objects, pinned, recursive_pins | objects | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/ipfs.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/ipfs.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
+ +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no | +| url | URL to the IPFS API | no | yes | +| repoapi | Collect repo metrics. | no | no | +| pinapi | Set status of IPFS pinned object polling. | no | no | + +
+ +#### Examples + +##### Basic (default out-of-the-box) + +A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. + +```yaml +localhost: + name: 'local' + url: 'http://localhost:5001' + repoapi: no + pinapi: no + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +localhost: + name: 'local' + url: 'http://localhost:5001' + repoapi: no + pinapi: no + +remote_host: + name: 'remote' + url: 'http://192.0.2.1:5001' + repoapi: no + pinapi: no + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin ipfs debug trace + ``` + + diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md deleted file mode 100644 index 1ad5ad42c5be35..00000000000000 --- a/collectors/python.d.plugin/litespeed/README.md +++ /dev/null @@ -1,95 +0,0 @@ - - -# LiteSpeed collector - -Collects web server performance metrics for network, connection, requests, and cache. - -It produces: - -1. **Network Throughput HTTP** in kilobits/s - - - in - - out - -2. **Network Throughput HTTPS** in kilobits/s - - - in - - out - -3. **Connections HTTP** in connections - - - free - - used - -4. **Connections HTTPS** in connections - - - free - - used - -5. **Requests** in requests/s - - - requests - -6. **Requests In Processing** in requests - - - processing - -7. **Public Cache Hits** in hits/s - - - hits - -8. **Private Cache Hits** in hits/s - - - hits - -9. **Static Hits** in hits/s - - - hits - -## Configuration - -Edit the `python.d/litespeed.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/litespeed.conf -``` - -```yaml -local: - path : 'PATH' -``` - -If no configuration is given, module will use "/tmp/lshttpd/". - - - - -### Troubleshooting - -To troubleshoot issues with the `litespeed` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `litespeed` module in debug mode: - -```bash -./python.d.plugin litespeed debug trace -``` - diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md new file mode 120000 index 00000000000000..e7418b3dcc40c5 --- /dev/null +++ b/collectors/python.d.plugin/litespeed/README.md @@ -0,0 +1 @@ +integrations/litespeed.md \ No newline at end of file diff --git a/collectors/python.d.plugin/litespeed/integrations/litespeed.md b/collectors/python.d.plugin/litespeed/integrations/litespeed.md new file mode 100644 index 00000000000000..87f2d0b12abadb --- /dev/null +++ b/collectors/python.d.plugin/litespeed/integrations/litespeed.md @@ -0,0 +1,170 @@ + + +# Litespeed + + + + + +Plugin: python.d.plugin +Module: litespeed + + + +## Overview + +Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery. + +The collector uses the statistics under /tmp/lshttpd to gather the metrics. + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +If no configuration is present, the collector will attempt to read files under /tmp/lshttpd/. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Litespeed instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| litespeed.net_throughput | in, out | kilobits/s | +| litespeed.net_throughput | in, out | kilobits/s | +| litespeed.connections | free, used | conns | +| litespeed.connections | free, used | conns | +| litespeed.requests | requests | requests/s | +| litespeed.requests_processing | processing | requests | +| litespeed.cache | hits | hits/s | +| litespeed.cache | hits | hits/s | +| litespeed.static | hits | hits/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/litespeed.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/litespeed.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no | + +
+ +#### Examples + +##### Set the path to statistics + +Change the path for the litespeed stats files + +```yaml +localhost: + name: 'local' + path: '/tmp/lshttpd' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `litespeed` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin litespeed debug trace + ``` + + diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md deleted file mode 100644 index 1af4d0ea765add..00000000000000 --- a/collectors/python.d.plugin/megacli/README.md +++ /dev/null @@ -1,109 +0,0 @@ - - -# MegaRAID controller collector - -Collects adapter, physical drives and battery stats using `megacli` command-line tool. - -Executed commands: - -- `sudo -n megacli -LDPDInfo -aAll` -- `sudo -n megacli -AdpBbuCmd -a0` - -## Requirements - -The module uses `megacli`, which can only be executed by `root`. It uses -`sudo` and assumes that it is configured such that the `netdata` user can execute `megacli` as root without a password. - -- Add to your `/etc/sudoers` file: - -`which megacli` shows the full path to the binary. - -```bash -netdata ALL=(root) NOPASSWD: /path/to/megacli -``` - -- Reset Netdata's systemd - unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux - distributions with systemd) - -The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `megacli` using `sudo`. - - -As the `root` user, do the following: - -```cmd -mkdir /etc/systemd/system/netdata.service.d -echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf -systemctl daemon-reload -systemctl restart netdata.service -``` - -## Charts - -- Adapter State -- Physical Drives Media Errors -- Physical Drives Predictive Failures -- Battery Relative State of Charge -- Battery Cycle Count - -## Enable the collector - -The `megacli` collector is disabled by default. To enable it, use `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` -file. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d.conf -``` - -Change the value of the `megacli` setting to `yes`. Save the file and restart the Netdata Agent -with `sudo systemctl restart netdata`, or the appropriate method for your system. - -## Configuration - -Edit the `python.d/megacli.conf` configuration file using `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/megacli.conf -``` - -Battery stats disabled by default. To enable them, modify `megacli.conf`. - -```yaml -do_battery: yes -``` - -Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate -method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - - -### Troubleshooting - -To troubleshoot issues with the `megacli` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `megacli` module in debug mode: - -```bash -./python.d.plugin megacli debug trace -``` - diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md new file mode 120000 index 00000000000000..e5df4d41df2ffc --- /dev/null +++ b/collectors/python.d.plugin/megacli/README.md @@ -0,0 +1 @@ +integrations/megacli.md \ No newline at end of file diff --git a/collectors/python.d.plugin/megacli/integrations/megacli.md b/collectors/python.d.plugin/megacli/integrations/megacli.md new file mode 100644 index 00000000000000..0c4af78a9bee04 --- /dev/null +++ b/collectors/python.d.plugin/megacli/integrations/megacli.md @@ -0,0 +1,220 @@ + + +# MegaCLI + + + + + +Plugin: python.d.plugin +Module: megacli + + + +## Overview + +Examine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics. + +Collects adapter, physical drives and battery stats using megacli command-line tool + +Executed commands: + + - `sudo -n megacli -LDPDInfo -aAll` + - `sudo -n megacli -AdpBbuCmd -a0` + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + +The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password. + +### Default Behavior + +#### Auto-Detection + +After all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per MegaCLI instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| megacli.adapter_degraded | a dimension per adapter | is degraded | +| megacli.pd_media_error | a dimension per physical drive | errors/s | +| megacli.pd_predictive_failure | a dimension per physical drive | failures/s | + +### Per battery + +Metrics related to Battery Backup Units, each BBU provides its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| megacli.bbu_relative_charge | adapter {battery id} | percentage | +| megacli.bbu_cycle_count | adapter {battery id} | cycle count | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ megacli_adapter_state ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.adapter_degraded | adapter is in the degraded state (0: false, 1: true) | +| [ megacli_pd_media_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.pd_media_error | number of physical drive media errors | +| [ megacli_pd_predictive_failures ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.pd_predictive_failure | number of physical drive predictive failures | +| [ megacli_bbu_relative_charge ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.bbu_relative_charge | average battery backup unit (BBU) relative state of charge over the last 10 seconds | +| [ megacli_bbu_cycle_count ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.bbu_cycle_count | average battery backup unit (BBU) charge cycles count over the last 10 seconds | + + +## Setup + +### Prerequisites + +#### Grant permissions for netdata, to run megacli as sudoer + +The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password. + +Add to your /etc/sudoers file: +which megacli shows the full path to the binary. + +```bash +netdata ALL=(root) NOPASSWD: /path/to/megacli +``` + + +#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd) + +The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo. + +As root user, do the following: + +```bash +mkdir /etc/systemd/system/netdata.service.d +echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf +systemctl daemon-reload +systemctl restart netdata.service +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/megacli.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/megacli.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration per job + +```yaml +job_name: + name: myname + update_every: 1 + priority: 60000 + penalty: yes + autodetection_retry: 0 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `megacli` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin megacli debug trace + ``` + + diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py index ef35ff63f43742..8222092a800094 100644 --- a/collectors/python.d.plugin/megacli/megacli.chart.py +++ b/collectors/python.d.plugin/megacli/megacli.chart.py @@ -91,7 +91,7 @@ def battery_charts(bats): RE_ADAPTER = re.compile( - r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z]+)' + r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z ]+)' ) RE_VD = re.compile( @@ -124,14 +124,14 @@ def find_batteries(d): class Adapter: def __init__(self, n, state): self.id = n - self.state = int(state == 'Degraded') + # TODO: Rewrite all of this + self.state = int(state in ("Partially Degraded", "Degraded", "Failed")) def data(self): return { 'adapter_{0}_degraded'.format(self.id): self.state, } - class PD: def __init__(self, n, media_err, predict_fail): self.id = n diff --git a/collectors/python.d.plugin/megacli/metadata.yaml b/collectors/python.d.plugin/megacli/metadata.yaml index f75a8d2ab6f134..4a2ba43ee5ed5f 100644 --- a/collectors/python.d.plugin/megacli/metadata.yaml +++ b/collectors/python.d.plugin/megacli/metadata.yaml @@ -27,8 +27,8 @@ modules: Executed commands: - sudo -n megacli -LDPDInfo -aAll - sudo -n megacli -AdpBbuCmd -a0 + - `sudo -n megacli -LDPDInfo -aAll` + - `sudo -n megacli -AdpBbuCmd -a0` supported_platforms: include: [] exclude: [] diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md deleted file mode 100644 index 612bd49d7d7949..00000000000000 --- a/collectors/python.d.plugin/memcached/README.md +++ /dev/null @@ -1,122 +0,0 @@ - - -# Memcached collector - -Collects memory-caching system performance metrics. It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)). - - -1. **Network** in kilobytes/s - - - read - - written - -2. **Connections** per second - - - current - - rejected - - total - -3. **Items** in cluster - - - current - - total - -4. **Evicted and Reclaimed** items - - - evicted - - reclaimed - -5. **GET** requests/s - - - hits - - misses - -6. **GET rate** rate in requests/s - - - rate - -7. **SET rate** rate in requests/s - - - rate - -8. **DELETE** requests/s - - - hits - - misses - -9. **CAS** requests/s - - - hits - - misses - - bad value - -10. **Increment** requests/s - - - hits - - misses - -11. **Decrement** requests/s - - - hits - - misses - -12. **Touch** requests/s - - - hits - - misses - -13. **Touch rate** rate in requests/s - - - rate - -## Configuration - -Edit the `python.d/memcached.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/memcached.conf -``` - -Sample: - -```yaml -localtcpip: - name : 'local' - host : '127.0.0.1' - port : 24242 -``` - -If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address. - - - - -### Troubleshooting - -To troubleshoot issues with the `memcached` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `memcached` module in debug mode: - -```bash -./python.d.plugin memcached debug trace -``` - diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md new file mode 120000 index 00000000000000..2cb76d33c06255 --- /dev/null +++ b/collectors/python.d.plugin/memcached/README.md @@ -0,0 +1 @@ +integrations/memcached.md \ No newline at end of file diff --git a/collectors/python.d.plugin/memcached/integrations/memcached.md b/collectors/python.d.plugin/memcached/integrations/memcached.md new file mode 100644 index 00000000000000..113b86c8c96acb --- /dev/null +++ b/collectors/python.d.plugin/memcached/integrations/memcached.md @@ -0,0 +1,215 @@ + + +# Memcached + + + + + +Plugin: python.d.plugin +Module: memcached + + + +## Overview + +Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching. + +It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)). + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +If no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Memcached instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| memcached.cache | available, used | MiB | +| memcached.net | in, out | kilobits/s | +| memcached.connections | current, rejected, total | connections/s | +| memcached.items | current, total | items | +| memcached.evicted_reclaimed | reclaimed, evicted | items | +| memcached.get | hints, misses | requests | +| memcached.get_rate | rate | requests/s | +| memcached.set_rate | rate | requests/s | +| memcached.delete | hits, misses | requests | +| memcached.cas | hits, misses, bad value | requests | +| memcached.increment | hits, misses | requests | +| memcached.decrement | hits, misses | requests | +| memcached.touch | hits, misses | requests | +| memcached.touch_rate | rate | requests/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | cache memory utilization | +| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour | +| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/memcached.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/memcached.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| host | the host to connect to. | 127.0.0.1 | no | +| port | the port to connect to. | 11211 | no | +| update_every | Sets the default data collection frequency. | 10 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### localhost + +An example configuration for localhost. + +```yaml +localhost: + name: 'local' + host: 'localhost' + port: 11211 + +``` +##### localipv4 + +An example configuration for localipv4. + +
Config + +```yaml +localhost: + name: 'local' + host: '127.0.0.1' + port: 11211 + +``` +
+ +##### localipv6 + +An example configuration for localipv6. + +
Config + +```yaml +localhost: + name: 'local' + host: '::1' + port: 11211 + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin memcached debug trace + ``` + + diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md deleted file mode 100644 index f762de0d3702ae..00000000000000 --- a/collectors/python.d.plugin/monit/README.md +++ /dev/null @@ -1,78 +0,0 @@ - - -# Monit collector - -Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official -documentation). Mostly this plugin shows statuses of monit targets, i.e. -[statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks). - -1. **Filesystems** - - - Filesystems - - Directories - - Files - - Pipes - -2. **Applications** - - - Processes (+threads/childs) - - Programs - -3. **Network** - - - Hosts (+latency) - - Network interfaces - -## Configuration - -Edit the `python.d/monit.conf` configuration file using `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically -at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/monit.conf -``` - -Sample: - -```yaml -local: - name: 'local' - url: 'http://localhost:2812' - user: : admin - pass: : monit -``` - -If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`. - - - - -### Troubleshooting - -To troubleshoot issues with the `monit` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `monit` module in debug mode: - -```bash -./python.d.plugin monit debug trace -``` - diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md new file mode 120000 index 00000000000000..ac69496f40408c --- /dev/null +++ b/collectors/python.d.plugin/monit/README.md @@ -0,0 +1 @@ +integrations/monit.md \ No newline at end of file diff --git a/collectors/python.d.plugin/monit/integrations/monit.md b/collectors/python.d.plugin/monit/integrations/monit.md new file mode 100644 index 00000000000000..18219141d609f3 --- /dev/null +++ b/collectors/python.d.plugin/monit/integrations/monit.md @@ -0,0 +1,214 @@ + + +# Monit + + + + + +Plugin: python.d.plugin +Module: monit + + + +## Overview + +This collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more. + + +It gathers data from Monit's XML interface. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, this collector will attempt to connect to Monit at `http://localhost:2812` + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Monit instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| monit.filesystems | a dimension per target | filesystems | +| monit.directories | a dimension per target | directories | +| monit.files | a dimension per target | files | +| monit.fifos | a dimension per target | pipes | +| monit.programs | a dimension per target | programs | +| monit.services | a dimension per target | processes | +| monit.process_uptime | a dimension per target | seconds | +| monit.process_threads | a dimension per target | threads | +| monit.process_childrens | a dimension per target | children | +| monit.hosts | a dimension per target | hosts | +| monit.host_latency | a dimension per target | milliseconds | +| monit.networks | a dimension per target | interfaces | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/monit.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/monit.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no | +| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes | +| user | Username in case the URL is password protected. | | no | +| pass | Password in case the URL is password protected. | | no | + +
+ +#### Examples + +##### Basic + +A basic configuration example. + +```yaml +localhost: + name : 'local' + url : 'http://localhost:2812' + +``` +##### Basic Authentication + +Example using basic username and password in order to authenticate. + +
Config + +```yaml +localhost: + name : 'local' + url : 'http://localhost:2812' + user: 'foo' + pass: 'bar' + +``` +
+ +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +localhost: + name: 'local' + url: 'http://localhost:2812' + +remote_job: + name: 'remote' + url: 'http://192.0.2.1:2812' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin monit debug trace + ``` + + diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md deleted file mode 100644 index ccc4e712b14902..00000000000000 --- a/collectors/python.d.plugin/nsd/README.md +++ /dev/null @@ -1,91 +0,0 @@ - - -# NSD collector - -Uses the `nsd-control stats_noreset` command to provide `nsd` statistics. - -## Requirements - -- Version of `nsd` must be 4.0+ -- Netdata must have permissions to run `nsd-control stats_noreset` - -It produces: - -1. **Queries** - - - queries - -2. **Zones** - - - master - - slave - -3. **Protocol** - - - udp - - udp6 - - tcp - - tcp6 - -4. **Query Type** - - - A - - NS - - CNAME - - SOA - - PTR - - HINFO - - MX - - NAPTR - - TXT - - AAAA - - SRV - - ANY - -5. **Transfer** - - - NOTIFY - - AXFR - -6. **Return Code** - - - NOERROR - - FORMERR - - SERVFAIL - - NXDOMAIN - - NOTIMP - - REFUSED - - YXDOMAIN - -Configuration is not needed. - - - - -### Troubleshooting - -To troubleshoot issues with the `nsd` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `nsd` module in debug mode: - -```bash -./python.d.plugin nsd debug trace -``` - diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md new file mode 120000 index 00000000000000..59fcfe49134540 --- /dev/null +++ b/collectors/python.d.plugin/nsd/README.md @@ -0,0 +1 @@ +integrations/name_server_daemon.md \ No newline at end of file diff --git a/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md b/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md new file mode 100644 index 00000000000000..0e66c44ebe414b --- /dev/null +++ b/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md @@ -0,0 +1,199 @@ + + +# Name Server Daemon + + + + + +Plugin: python.d.plugin +Module: nsd + + + +## Overview + +This collector monitors NSD statistics like queries, zones, protocols, query types and more. + + +It uses the `nsd-control stats_noreset` command to gather metrics. + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +If permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Name Server Daemon instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nsd.queries | queries | queries/s | +| nsd.zones | master, slave | zones | +| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s | +| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s | +| nsd.transfer | NOTIFY, AXFR | queries/s | +| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### NSD version + +The version of `nsd` must be 4.0+. + + +#### Provide Netdata the permissions to run the command + +Netdata must have permissions to run the `nsd-control stats_noreset` command. + +You can: + +- Add "netdata" user to "nsd" group: + ``` + usermod -aG nsd netdata + ``` +- Add Netdata to sudoers + 1. Edit the sudoers file: + ``` + visudo -f /etc/sudoers.d/netdata + ``` + 2. Add the entry: + ``` + Defaults:netdata !requiretty + netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset + ``` + + > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/nsd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/nsd.conf +``` +#### Options + +This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior. + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 30 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| command | The command to run | nsd-control stats_noreset | no | + +
+ +#### Examples + +##### Basic + +A basic configuration example. + +```yaml +local: + name: 'nsd_local' + command: 'nsd-control stats_noreset' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin nsd debug trace + ``` + + diff --git a/collectors/python.d.plugin/nsd/metadata.yaml b/collectors/python.d.plugin/nsd/metadata.yaml index bd0a256f3dbf17..f5e2c46b0adc33 100644 --- a/collectors/python.d.plugin/nsd/metadata.yaml +++ b/collectors/python.d.plugin/nsd/metadata.yaml @@ -40,6 +40,9 @@ modules: setup: prerequisites: list: + - title: NSD version + description: | + The version of `nsd` must be 4.0+. - title: Provide Netdata the permissions to run the command description: | Netdata must have permissions to run the `nsd-control stats_noreset` command. diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md deleted file mode 100644 index eddf40b2cbb20d..00000000000000 --- a/collectors/python.d.plugin/openldap/README.md +++ /dev/null @@ -1,102 +0,0 @@ - - -# OpenLDAP collector - -Provides statistics information from openldap (slapd) server. -Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(5) is available. - -**Requirement:** - -- Follow instructions from to activate monitoring interface. -- Install python ldap module `pip install ldap` or `yum install python-ldap` -- Modify openldap.conf with your credentials - -### Module gives information with following charts: - -1. **connections** - - - total connections number - -2. **Bytes** - - - sent - -3. **operations** - - - completed - - initiated - -4. **referrals** - - - sent - -5. **entries** - - - sent - -6. **ldap operations** - - - bind - - search - - unbind - - add - - delete - - modify - - compare - -7. **waiters** - - - read - - write - -## Configuration - -Edit the `python.d/openldap.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/openldap.conf -``` - -Sample: - -```yaml -openldap: - name : 'local' - username : "cn=monitor,dc=superb,dc=eu" - password : "testpass" - server : 'localhost' - port : 389 -``` - - - - -### Troubleshooting - -To troubleshoot issues with the `openldap` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `openldap` module in debug mode: - -```bash -./python.d.plugin openldap debug trace -``` - diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md new file mode 120000 index 00000000000000..45f36b9b92ea6b --- /dev/null +++ b/collectors/python.d.plugin/openldap/README.md @@ -0,0 +1 @@ +integrations/openldap.md \ No newline at end of file diff --git a/collectors/python.d.plugin/openldap/integrations/openldap.md b/collectors/python.d.plugin/openldap/integrations/openldap.md new file mode 100644 index 00000000000000..a9480a490c3915 --- /dev/null +++ b/collectors/python.d.plugin/openldap/integrations/openldap.md @@ -0,0 +1,215 @@ + + +# OpenLDAP + + + + + +Plugin: python.d.plugin +Module: openldap + + + +## Overview + +This collector monitors OpenLDAP metrics about connections, operations, referrals and more. + +Statistics are taken from the monitoring interface of a openLDAP (slapd) server + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This collector doesn't work until all the prerequisites are checked. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per OpenLDAP instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| openldap.total_connections | connections | connections/s | +| openldap.traffic_stats | sent | KiB/s | +| openldap.operations_status | completed, initiated | ops/s | +| openldap.referrals | sent | referrals/s | +| openldap.entries | sent | entries/s | +| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s | +| openldap.waiters | write, read | waiters/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure the openLDAP server to expose metrics to monitor it. + +Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface. + + +#### Install python-ldap module + +Install python ldap module + +1. From pip package manager + +```bash +pip install ldap +``` + +2. With apt package manager (in most deb based distros) + + +```bash +apt-get install python-ldap +``` + + +3. With yum package manager (in most rpm based distros) + + +```bash +yum install python-ldap +``` + + +#### Insert credentials for Netdata to access openLDAP server + +Use the `ldappasswd` utility to set a password for the username you will use. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/openldap.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/openldap.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| username | The bind user with right to access monitor statistics | | yes | +| password | The password for the binded user | | yes | +| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes | +| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes | +| use_tls | Make True if a TLS connection is used over ldaps:// | no | no | +| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no | +| cert_check | False if you want to ignore certificate check | True | yes | +| timeout | Seconds to timeout if no connection exist | | yes | + +
+ +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +username: "cn=admin" +password: "pass" +server: "localhost" +port: "389" +check_cert: True +timeout: 1 + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin openldap debug trace + ``` + + diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md deleted file mode 100644 index 315816de0e3445..00000000000000 --- a/collectors/python.d.plugin/oracledb/README.md +++ /dev/null @@ -1,115 +0,0 @@ - - -# OracleDB collector - -Monitors the performance and health metrics of the Oracle database. - -## Requirements - -- `oracledb` package. - -It produces following charts: - -- session activity - - Session Count - - Session Limit Usage - - Logons -- disk activity - - Physical Disk Reads/Writes - - Sorts On Disk - - Full Table Scans -- database and buffer activity - - Database Wait Time Ratio - - Shared Pool Free Memory - - In-Memory Sorts Ratio - - SQL Service Response Time - - User Rollbacks - - Enqueue Timeouts -- cache - - Cache Hit Ratio - - Global Cache Blocks Events -- activities - - Activities -- wait time - - Wait Time -- tablespace - - Size - - Usage - - Usage In Percent -- allocated space - - Size - - Usage - - Usage In Percent - -## prerequisite - -To use the Oracle module do the following: - -1. Install `oracledb` package ([link](https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html)). - -2. Create a read-only `netdata` user with proper access to your Oracle Database Server. - -Connect to your Oracle database with an administrative user and execute: - -```SQL -CREATE USER netdata IDENTIFIED BY ; - -GRANT CONNECT TO netdata; -GRANT SELECT_CATALOG_ROLE TO netdata; -``` - -## Configuration - -Edit the `python.d/oracledb.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/oracledb.conf -``` - -```yaml -local: - user: 'netdata' - password: 'secret' - server: 'localhost:1521' - service: 'XE' - - -remote: - user: 'netdata' - password: 'secret' - server: '10.0.0.1:1521' - service: 'XE' -``` - -All parameters are required. Without them module will fail to start. - - -### Troubleshooting - -To troubleshoot issues with the `oracledb` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `oracledb` module in debug mode: - -```bash -./python.d.plugin oracledb debug trace -``` - diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md new file mode 120000 index 00000000000000..a75e3611e57644 --- /dev/null +++ b/collectors/python.d.plugin/oracledb/README.md @@ -0,0 +1 @@ +integrations/oracle_db.md \ No newline at end of file diff --git a/collectors/python.d.plugin/oracledb/integrations/oracle_db.md b/collectors/python.d.plugin/oracledb/integrations/oracle_db.md new file mode 100644 index 00000000000000..30557c0214f221 --- /dev/null +++ b/collectors/python.d.plugin/oracledb/integrations/oracle_db.md @@ -0,0 +1,226 @@ + + +# Oracle DB + + + + + +Plugin: python.d.plugin +Module: oracledb + + + +## Overview + +This collector monitors OracleDB database metrics about sessions, tables, memory and more. + +It collects the metrics via the supported database client library + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +In order for this collector to work, it needs a read-only user `netdata` in the RDBMS. + + +### Default Behavior + +#### Auto-Detection + +When the requirements are met, databases on the local host on port 1521 will be auto-detected + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +These metrics refer to the entire monitored application. + +### Per Oracle DB instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| oracledb.session_count | total, active | sessions | +| oracledb.session_limit_usage | usage | % | +| oracledb.logons | logons | events/s | +| oracledb.physical_disk_read_writes | reads, writes | events/s | +| oracledb.sorts_on_disks | sorts | events/s | +| oracledb.full_table_scans | full table scans | events/s | +| oracledb.database_wait_time_ratio | wait time ratio | % | +| oracledb.shared_pool_free_memory | free memory | % | +| oracledb.in_memory_sorts_ratio | in-memory sorts | % | +| oracledb.sql_service_response_time | time | seconds | +| oracledb.user_rollbacks | rollbacks | events/s | +| oracledb.enqueue_timeouts | enqueue timeouts | events/s | +| oracledb.cache_hit_ration | buffer, cursor, library, row | % | +| oracledb.global_cache_blocks | corrupted, lost | events/s | +| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s | +| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms | +| oracledb.tablespace_size | a dimension per active tablespace | KiB | +| oracledb.tablespace_usage | a dimension per active tablespace | KiB | +| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % | +| oracledb.allocated_size | a dimension per active tablespace | B | +| oracledb.allocated_usage | a dimension per active tablespace | B | +| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install the python-oracledb package + +You can follow the official guide below to install the required package: + +Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html + + +#### Create a read only user for netdata + +Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach + +Connect to your Oracle database with an administrative user and execute: + +```bash +CREATE USER netdata IDENTIFIED BY ; + +GRANT CONNECT TO netdata; +GRANT SELECT_CATALOG_ROLE TO netdata; +``` + + +#### Edit the configuration + +Edit the configuration troubleshooting: + +1. Provide a valid user for the netdata collector to access the database +2. Specify the network target this database is listening. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/oracledb.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/oracledb.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| user | The username for the user account. | no | yes | +| password | The password for the user account. | no | yes | +| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes | +| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes | +| protocol | one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes | + +
+ +#### Examples + +##### Basic + +A basic example configuration, two jobs described for two databases. + +```yaml +local: + user: 'netdata' + password: 'secret' + server: 'localhost:1521' + service: 'XE' + protocol: 'tcps' + +remote: + user: 'netdata' + password: 'secret' + server: '10.0.0.1:1521' + service: 'XE' + protocol: 'tcps' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin oracledb debug trace + ``` + + diff --git a/collectors/python.d.plugin/pandas/README.md b/collectors/python.d.plugin/pandas/README.md deleted file mode 100644 index 19b11d5be41c74..00000000000000 --- a/collectors/python.d.plugin/pandas/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Ingest structured data (Pandas) - - - Pandas - - -[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python. -If you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html), -either locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector. - -The collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based -preprocessing, before feeding to Netdata. - -## Requirements - -This collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`. - -```bash -sudo pip install pandas requests -``` - -Note: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well. - -```bash -sudo pip install 'sqlalchemy<2.0' psycopg2-binary -``` - -## Configuration - -Below is an example configuration to query some json weather data from [Open-Meteo](https://open-meteo.com), -do some data wrangling on it and save in format as expected by Netdata. - -```yaml -# example pulling some hourly temperature data -temperature: - name: "temperature" - update_every: 3 - chart_configs: - - name: "temperature_by_city" - title: "Temperature By City" - family: "temperature.today" - context: "pandas.temperature" - type: "line" - units: "Celsius" - df_steps: > - pd.DataFrame.from_dict( - {city: requests.get( - f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m' - ).json()['hourly']['temperature_2m'] - for (city,lat,lng) - in [ - ('dublin', 53.3441, -6.2675), - ('athens', 37.9792, 23.7166), - ('london', 51.5002, -0.1262), - ('berlin', 52.5235, 13.4115), - ('paris', 48.8567, 2.3510), - ] - } - ); # use dictionary comprehension to make multiple requests; - df.describe(); # get aggregate stats for each city; - df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max; - df.rename(columns={'index':'city'}); # some column renaming; - df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city; - df.rename(columns={0:'degrees'}); # some column renaming; - pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label; - df.rename(columns={0:'measurement'}); # some column renaming; - df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want; - df.sort_index(); # sort by city name; - df.transpose(); # transpose so its just one wide row; -``` - -`chart_configs` is a list of dictionary objects where each one defines the sequence of `df_steps` to be run using [`pandas`](https://pandas.pydata.org/), -and the `name`, `title` etc to define the -[CHART variables](https://github.com/netdata/netdata/blob/master/docs/guides/python-collector.md#create-charts) -that will control how the results will look in netdata. - -The example configuration above would result in a `data` dictionary like the below being collected by Netdata -at each time step. They keys in this dictionary will be the "dimensions" of the chart. - -```javascript -{'athens_max': 26.2, 'athens_mean': 19.45952380952381, 'athens_min': 12.2, 'berlin_max': 17.4, 'berlin_mean': 10.764285714285714, 'berlin_min': 5.7, 'dublin_max': 15.3, 'dublin_mean': 12.008928571428571, 'dublin_min': 6.6, 'london_max': 18.9, 'london_mean': 12.510714285714286, 'london_min': 5.2, 'paris_max': 19.4, 'paris_mean': 12.054166666666665, 'paris_min': 4.8} -``` - -Which, given the above configuration would end up as a chart like below in Netdata. - -![pandas collector temperature example chart](https://user-images.githubusercontent.com/2178292/195075312-8ce8cf68-5172-48e3-af09-104ffecfcdd6.png) - -## Notes -- Each line in `df_steps` must return a pandas -[DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) object (`df`) at each step. -- You can use -[this colab notebook](https://colab.research.google.com/drive/1VYrddSegZqGtkWGFuiUbMbUk5f3rW6Hi?usp=sharing) -to mock up and work on your `df_steps` iteratively before adding them to your config. -- This collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken -as the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`). -See [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html). diff --git a/collectors/python.d.plugin/pandas/README.md b/collectors/python.d.plugin/pandas/README.md new file mode 120000 index 00000000000000..2fabe63c15fbdb --- /dev/null +++ b/collectors/python.d.plugin/pandas/README.md @@ -0,0 +1 @@ +integrations/pandas.md \ No newline at end of file diff --git a/collectors/python.d.plugin/pandas/integrations/pandas.md b/collectors/python.d.plugin/pandas/integrations/pandas.md new file mode 100644 index 00000000000000..83c5c66b1bb94f --- /dev/null +++ b/collectors/python.d.plugin/pandas/integrations/pandas.md @@ -0,0 +1,365 @@ + + +# Pandas + + + + + +Plugin: python.d.plugin +Module: pandas + + + +## Overview + +[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python. +If you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html), +either locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector. + +This collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas. + + +The collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +This collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken +as the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`). +See [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html)." + + +### Per Pandas instance + +These metrics refer to the entire monitored application. + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Python Requirements + +This collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`. + +```bash +sudo pip install pandas requests +``` + +Note: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well. + +```bash +sudo pip install 'sqlalchemy<2.0' psycopg2-binary +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/pandas.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/pandas.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| chart_configs | an array of chart configuration dictionaries | [] | yes | +| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes | +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Temperature API Example + +example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current. + +
Config + +```yaml +temperature: + name: "temperature" + update_every: 5 + chart_configs: + - name: "temperature_forecast_by_city" + title: "Temperature By City - Today Forecast" + family: "temperature.today" + context: "pandas.temperature" + type: "line" + units: "Celsius" + df_steps: > + pd.DataFrame.from_dict( + {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m'] + for (city,lat,lng) + in [ + ('dublin', 53.3441, -6.2675), + ('athens', 37.9792, 23.7166), + ('london', 51.5002, -0.1262), + ('berlin', 52.5235, 13.4115), + ('paris', 48.8567, 2.3510), + ('madrid', 40.4167, -3.7033), + ('new_york', 40.71, -74.01), + ('los_angeles', 34.05, -118.24), + ] + } + ); + df.describe(); # get aggregate stats for each city; + df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max; + df.rename(columns={'index':'city'}); # some column renaming; + df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city; + df.rename(columns={0:'degrees'}); # some column renaming; + pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label; + df.rename(columns={0:'measurement'}); # some column renaming; + df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want; + df.sort_index(); # sort by city name; + df.transpose(); # transpose so its just one wide row; + - name: "temperature_current_by_city" + title: "Temperature By City - Current" + family: "temperature.current" + context: "pandas.temperature" + type: "line" + units: "Celsius" + df_steps: > + pd.DataFrame.from_dict( + {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather'] + for (city,lat,lng) + in [ + ('dublin', 53.3441, -6.2675), + ('athens', 37.9792, 23.7166), + ('london', 51.5002, -0.1262), + ('berlin', 52.5235, 13.4115), + ('paris', 48.8567, 2.3510), + ('madrid', 40.4167, -3.7033), + ('new_york', 40.71, -74.01), + ('los_angeles', 34.05, -118.24), + ] + } + ); + df.transpose(); + df[['temperature']]; + df.transpose(); + +``` +
+ +##### API CSV Example + +example showing a read_csv from a url and some light pandas data wrangling. + +
Config + +```yaml +example_csv: + name: "example_csv" + update_every: 2 + chart_configs: + - name: "london_system_cpu" + title: "London System CPU - Ratios" + family: "london_system_cpu" + context: "pandas" + type: "line" + units: "n" + df_steps: > + pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'}); + df.drop('time', axis=1); + df.mean().to_frame().transpose(); + df.apply(lambda row: (row.user / row.system), axis = 1).to_frame(); + df.rename(columns={0:'average_user_system_ratio'}); + df*100; + +``` +
+ +##### API JSON Example + +example showing a read_json from a url and some light pandas data wrangling. + +
Config + +```yaml +example_json: + name: "example_json" + update_every: 2 + chart_configs: + - name: "london_system_net" + title: "London System Net - Total Bandwidth" + family: "london_system_net" + context: "pandas" + type: "area" + units: "kilobits/s" + df_steps: > + pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']); + df.drop('time', axis=1); + abs(df); + df.sum(axis=1).to_frame(); + df.rename(columns={0:'total_bandwidth'}); + +``` +
+ +##### XML Example + +example showing a read_xml from a url and some light pandas data wrangling. + +
Config + +```yaml +example_xml: + name: "example_xml" + update_every: 2 + line_sep: "|" + chart_configs: + - name: "temperature_forcast" + title: "Temperature Forecast" + family: "temp" + context: "pandas.temp" + type: "line" + units: "celsius" + df_steps: > + pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')| + df.rename(columns={'value': 'dublin'})| + df[['dublin']]| + +``` +
+ +##### SQL Example + +example showing a read_sql from a postgres database using sqlalchemy. + +
Config + +```yaml +sql: + name: "sql" + update_every: 5 + chart_configs: + - name: "sql" + title: "SQL Example" + family: "sql.example" + context: "example" + type: "line" + units: "percent" + df_steps: > + pd.read_sql_query( + sql='\ + select \ + random()*100 as metric_1, \ + random()*100 as metric_2 \ + ', + con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata') + ); + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin pandas debug trace + ``` + + diff --git a/collectors/python.d.plugin/pandas/metadata.yaml b/collectors/python.d.plugin/pandas/metadata.yaml index 28a1d3b212d533..92ee1e986f6d37 100644 --- a/collectors/python.d.plugin/pandas/metadata.yaml +++ b/collectors/python.d.plugin/pandas/metadata.yaml @@ -5,7 +5,7 @@ modules: module_name: pandas monitored_instance: name: Pandas - link: https://learn.netdata.cloud/docs/data-collection/generic-data-collection/structured-data-pandas + link: https://pandas.pydata.org/ categories: - data-collection.generic-data-collection icon_filename: pandas.png @@ -26,8 +26,6 @@ modules: either locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector. This collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas. - - More detailed information can be found in the Netdata documentation [here](https://learn.netdata.cloud/docs/data-collection/generic-data-collection/structured-data-pandas). method_description: | The collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata. supported_platforms: @@ -92,11 +90,11 @@ modules: default_value: None required: true - name: chart_configs.family - description: "[family](https://learn.netdata.cloud/docs/data-collection/chart-dimensions-contexts-and-families#family) of the chart to be displayed in the dashboard." + description: "[family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard." default_value: None required: true - name: chart_configs.context - description: "[context](https://learn.netdata.cloud/docs/data-collection/chart-dimensions-contexts-and-families#context) of the chart to be displayed in the dashboard." + description: "[context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard." default_value: None required: true - name: chart_configs.type diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md deleted file mode 100644 index ba5565499a65b3..00000000000000 --- a/collectors/python.d.plugin/postfix/README.md +++ /dev/null @@ -1,59 +0,0 @@ - - -# Postfix collector - -Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool. - -The collector executes `postqueue -p` to get Postfix queue statistics. - -## Requirements - -Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view -the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to -view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file. - -See the `authorized_mailq_users` setting in -the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details. - -## Charts - -It produces only two charts: - -1. **Postfix Queue Emails** - - - emails - -2. **Postfix Queue Emails Size** in KB - - - size - -## Configuration - -Configuration is not needed. -### Troubleshooting - -To troubleshoot issues with the `postfix` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `postfix` module in debug mode: - -```bash -./python.d.plugin postfix debug trace -``` - diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md new file mode 120000 index 00000000000000..c62eb5c247bb07 --- /dev/null +++ b/collectors/python.d.plugin/postfix/README.md @@ -0,0 +1 @@ +integrations/postfix.md \ No newline at end of file diff --git a/collectors/python.d.plugin/postfix/integrations/postfix.md b/collectors/python.d.plugin/postfix/integrations/postfix.md new file mode 100644 index 00000000000000..2bb99922c7744f --- /dev/null +++ b/collectors/python.d.plugin/postfix/integrations/postfix.md @@ -0,0 +1,151 @@ + + +# Postfix + + + + + +Plugin: python.d.plugin +Module: postfix + + + +## Overview + +Keep an eye on Postfix metrics for efficient mail server operations. +Improve your mail server performance with Netdata's real-time metrics and built-in alerts. + + +Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file. +See the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details. + + +### Default Behavior + +#### Auto-Detection + +The collector executes `postqueue -p` to get Postfix queue statistics. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Postfix instance + +These metrics refer to the entire monitored application. + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| postfix.qemails | emails | emails | +| postfix.qsize | size | KiB | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +There is no configuration file. +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples +There are no configuration examples. + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin postfix debug trace + ``` + + diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md deleted file mode 100644 index 3b0c55b97d9336..00000000000000 --- a/collectors/python.d.plugin/puppet/README.md +++ /dev/null @@ -1,90 +0,0 @@ - - -# Puppet collector - -Monitor status of Puppet Server and Puppet DB. - -Following charts are drawn: - -1. **JVM Heap** - - - committed (allocated from OS) - - used (actual use) - -2. **JVM Non-Heap** - - - committed (allocated from OS) - - used (actual use) - -3. **CPU Usage** - - - execution - - GC (taken by garbage collection) - -4. **File Descriptors** - - - max - - used - -## Configuration - -Edit the `python.d/puppet.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/puppet.conf -``` - -```yaml -puppetdb: - url: 'https://fqdn.example.com:8081' - tls_cert_file: /path/to/client.crt - tls_key_file: /path/to/client.key - autodetection_retry: 1 - -puppetserver: - url: 'https://fqdn.example.com:8140' - autodetection_retry: 1 -``` - -When no configuration is given, module uses `https://fqdn.example.com:8140`. - -### notes - -- Exact Fully Qualified Domain Name of the node should be used. -- Usually Puppet Server/DB startup time is VERY long. So, there should - be quite reasonable retry count. -- Secure PuppetDB config may require client certificate. Not applies - to default PuppetDB configuration though. - - - - -### Troubleshooting - -To troubleshoot issues with the `puppet` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `puppet` module in debug mode: - -```bash -./python.d.plugin puppet debug trace -``` - diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md new file mode 120000 index 00000000000000..b6c4c83f930244 --- /dev/null +++ b/collectors/python.d.plugin/puppet/README.md @@ -0,0 +1 @@ +integrations/puppet.md \ No newline at end of file diff --git a/collectors/python.d.plugin/puppet/integrations/puppet.md b/collectors/python.d.plugin/puppet/integrations/puppet.md new file mode 100644 index 00000000000000..ca190b576c53de --- /dev/null +++ b/collectors/python.d.plugin/puppet/integrations/puppet.md @@ -0,0 +1,215 @@ + + +# Puppet + + + + + +Plugin: python.d.plugin +Module: puppet + + + +## Overview + +This collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.' + + +It uses Puppet's metrics API endpoint to gather the metrics. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Puppet instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| puppet.jvm | committed, used | MiB | +| puppet.jvm | committed, used | MiB | +| puppet.cpu | execution, GC | percentage | +| puppet.fdopen | used | descriptors | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/puppet.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/puppet.conf +``` +#### Options + +This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior. + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + +> Notes: +> - Exact Fully Qualified Domain Name of the node should be used. +> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count. +> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes | +| tls_verify | Control HTTPS server certificate verification. | False | no | +| tls_ca_file | Optional CA (bundle) file to use | | no | +| tls_cert_file | Optional client certificate file | | no | +| tls_key_file | Optional client key file | | no | +| update_every | Sets the default data collection frequency. | 30 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration + +```yaml +puppetserver: + url: 'https://fqdn.example.com:8140' + autodetection_retry: 1 + +``` +##### TLS Certificate + +An example using a TLS certificate + +
Config + +```yaml +puppetdb: + url: 'https://fqdn.example.com:8081' + tls_cert_file: /path/to/client.crt + tls_key_file: /path/to/client.key + autodetection_retry: 1 + +``` +
+ +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +puppetserver1: + url: 'https://fqdn.example.com:8140' + autodetection_retry: 1 + +puppetserver2: + url: 'https://fqdn.example2.com:8140' + autodetection_retry: 1 + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin puppet debug trace + ``` + + diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in index 681ceb403ae509..86fea209c5d488 100644 --- a/collectors/python.d.plugin/python.d.plugin.in +++ b/collectors/python.d.plugin/python.d.plugin.in @@ -222,8 +222,11 @@ class ModuleConfig: def __init__(self, name, config=None): self.name = name self.config = config or OrderedDict() + self.is_stock = False def load(self, abs_path): + if not IS_ATTY: + self.is_stock = abs_path.startswith(DIRS.modules_stock_config) self.config.update(load_config(abs_path) or dict()) def defaults(self): @@ -242,6 +245,7 @@ class ModuleConfig: config = OrderedDict() config.update(job_config) config['job_name'] = job_name + config['__is_stock'] = self.is_stock for k, v in self.defaults().items(): config.setdefault(k, v) @@ -309,7 +313,8 @@ class JobsConfigsBuilder: return None configs = config.create_jobs() - self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs))) + if not config.is_stock: + self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs))) self.apply_defaults(configs, self.module_defaults) self.apply_defaults(configs, self.job_defaults) @@ -338,6 +343,7 @@ class Job(threading.Thread): self.autodetection_retry = config['autodetection_retry'] self.checks = self.inf self.job = None + self.is_stock = config.get('__is_stock', False) self.status = JOB_STATUS_INIT def is_inited(self): @@ -350,8 +356,14 @@ class Job(threading.Thread): return self.job.name def check(self): + if self.is_stock: + self.job.logger.mute() + ok = self.job.check() + + self.job.logger.unmute() self.checks -= self.checks != self.inf and not ok + return ok def create(self): @@ -503,7 +515,6 @@ class FileLockRegistry: name = "docker" + name[7:] return name - def register(self, name): name = self.rename(name) if name in self.locks: @@ -582,8 +593,8 @@ class Plugin: try: statuses = JobsStatuses().from_file(abs_path) except Exception as error: - self.log.error("[{0}] config file invalid YAML format: {1}".format( - module_name, ' '.join([v.strip() for v in str(error).split('\n')]))) + self.log.error("'{0}' invalid JSON format: {1}".format( + abs_path, ' '.join([v.strip() for v in str(error).split('\n')]))) return None self.log.debug("'{0}' is loaded".format(abs_path)) return statuses @@ -685,12 +696,14 @@ class Plugin: try: ok = job.check() except Exception as error: - self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format( - job.module_name, job.real_name, repr(error))) + if not job.is_stock: + self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format( + job.module_name, job.real_name, repr(error))) job.status = JOB_STATUS_DROPPED continue if not ok: - self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name)) + if not job.is_stock: + self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name)) job.status = JOB_STATUS_RECOVERING if job.need_to_recheck() else JOB_STATUS_DROPPED continue self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name)) @@ -876,6 +889,17 @@ def main(): cmd = parse_command_line() log = PythonDLogger() + level = os.getenv('NETDATA_LOG_LEVEL') or str() + level = level.lower() + if level == 'debug': + log.logger.severity = 'DEBUG' + elif level == 'info': + log.logger.severity = 'INFO' + elif level == 'warn' or level == 'warning': + log.logger.severity = 'WARNING' + elif level == 'err' or level == 'error': + log.logger.severity = 'ERROR' + if cmd.debug: log.logger.severity = 'DEBUG' if cmd.trace: diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py index a7acc23b664590..3f122e1d9de7c1 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py @@ -8,7 +8,7 @@ from bases.charts import Charts, ChartError, create_runtime_chart from bases.collection import safe_print -from bases.loggers import PythonDLimitedLogger +from bases.loggers import PythonDLogger from third_party.monotonic import monotonic from time import sleep, time @@ -62,7 +62,7 @@ def clean_module_name(name): return name -class SimpleService(PythonDLimitedLogger, object): +class SimpleService(PythonDLogger, object): """ Prototype of Service class. Implemented basic functionality to run jobs by `python.d.plugin` @@ -73,7 +73,7 @@ def __init__(self, configuration, name=''): :param configuration: :param name: """ - PythonDLimitedLogger.__init__(self) + PythonDLogger.__init__(self) self.configuration = configuration self.order = list() self.definitions = dict() diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py index 1faf036a4bac53..76129d376fe620 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py @@ -6,8 +6,6 @@ import urllib3 -from distutils.version import StrictVersion as version - from bases.FrameworkServices.SimpleService import SimpleService try: @@ -15,28 +13,11 @@ except AttributeError: pass -# https://github.com/urllib3/urllib3/blob/master/CHANGES.rst#19-2014-07-04 -# New retry logic and urllib3.util.retry.Retry configuration object. (Issue https://github.com/urllib3/urllib3/pull/326) -URLLIB3_MIN_REQUIRED_VERSION = '1.9' URLLIB3_VERSION = urllib3.__version__ URLLIB3 = 'urllib3' - -def version_check(): - if version(URLLIB3_VERSION) >= version(URLLIB3_MIN_REQUIRED_VERSION): - return - - err = '{0} version: {1}, minimum required version: {2}, please upgrade'.format( - URLLIB3, - URLLIB3_VERSION, - URLLIB3_MIN_REQUIRED_VERSION, - ) - raise Exception(err) - - class UrlService(SimpleService): def __init__(self, configuration=None, name=None): - version_check() SimpleService.__init__(self, configuration=configuration, name=name) self.debug("{0} version: {1}".format(URLLIB3, URLLIB3_VERSION)) self.url = self.configuration.get('url') diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py index 47f196a6de32e0..7ae8ab0c12a288 100644 --- a/collectors/python.d.plugin/python_modules/bases/loggers.py +++ b/collectors/python.d.plugin/python_modules/bases/loggers.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: GPL-3.0-or-later import logging +import os +import stat import traceback from sys import exc_info @@ -15,39 +17,46 @@ from bases.collection import on_try_except_finally, unicode_str +LOGGING_LEVELS = { + 'CRITICAL': 50, + 'ERROR': 40, + 'WARNING': 30, + 'INFO': 20, + 'DEBUG': 10, + 'NOTSET': 0, +} -LOGGING_LEVELS = {'CRITICAL': 50, - 'ERROR': 40, - 'WARNING': 30, - 'INFO': 20, - 'DEBUG': 10, - 'NOTSET': 0} -DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s' -DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' +def is_stderr_connected_to_journal(): + journal_stream = os.environ.get("JOURNAL_STREAM") + if not journal_stream: + return False -PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s' -PYTHON_D_LOG_NAME = 'python.d' + colon_index = journal_stream.find(":") + if colon_index <= 0: + return False + device, inode = journal_stream[:colon_index], journal_stream[colon_index + 1:] -def limiter(log_max_count=30, allowed_in_seconds=60): - def on_decorator(func): + try: + device_number, inode_number = os.fstat(2)[stat.ST_DEV], os.fstat(2)[stat.ST_INO] + except OSError: + return False - def on_call(*args): - current_time = args[0]._runtime_counters.start_mono - lc = args[0]._logger_counters + return str(device_number) == device and str(inode_number) == inode - if lc.logged and lc.logged % log_max_count == 0: - if current_time - lc.time_to_compare <= allowed_in_seconds: - lc.dropped += 1 - return - lc.time_to_compare = current_time - lc.logged += 1 - func(*args) +is_journal = is_stderr_connected_to_journal() + +DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s' +PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s' + +if is_journal: + DEFAULT_LOG_LINE_FORMAT = '%(name)s %(levelname)s : %(message)s' + PYTHON_D_LOG_LINE_FORMAT = '%(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s ' - return on_call - return on_decorator +DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' +PYTHON_D_LOG_NAME = 'python.d' def add_traceback(func): @@ -66,27 +75,16 @@ def on_call(*args): return on_call -class LoggerCounters: - def __init__(self): - self.logged = 0 - self.dropped = 0 - self.time_to_compare = time() - - def __repr__(self): - return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged, - dropped=self.dropped) - - class BaseLogger(object): - def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT, - handler=logging.StreamHandler): - """ - :param logger_name: - :param log_fmt: - :param date_fmt: - :param handler: - """ + def __init__( + self, + logger_name, + log_fmt=DEFAULT_LOG_LINE_FORMAT, + date_fmt=DEFAULT_LOG_TIME_FORMAT, + handler=logging.StreamHandler, + ): self.logger = logging.getLogger(logger_name) + self._muted = False if not self.has_handlers(): self.severity = 'INFO' self.logger.addHandler(handler()) @@ -96,11 +94,6 @@ def __repr__(self): return ''.format(name=self.logger.name) def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT): - """ - :param fmt: - :param date_fmt: - :return: - """ if self.has_handlers(): self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt)) @@ -113,43 +106,48 @@ def severity(self): @severity.setter def severity(self, level): - """ - :param level: or - :return: - """ if level in LOGGING_LEVELS: self.logger.setLevel(LOGGING_LEVELS[level]) + def _log(self, level, *msg, **kwargs): + if not self._muted: + self.logger.log(level, ' '.join(map(unicode_str, msg)), **kwargs) + def debug(self, *msg, **kwargs): - self.logger.debug(' '.join(map(unicode_str, msg)), **kwargs) + self._log(logging.DEBUG, *msg, **kwargs) def info(self, *msg, **kwargs): - self.logger.info(' '.join(map(unicode_str, msg)), **kwargs) + self._log(logging.INFO, *msg, **kwargs) def warning(self, *msg, **kwargs): - self.logger.warning(' '.join(map(unicode_str, msg)), **kwargs) + self._log(logging.WARN, *msg, **kwargs) def error(self, *msg, **kwargs): - self.logger.error(' '.join(map(unicode_str, msg)), **kwargs) + self._log(logging.ERROR, *msg, **kwargs) - def alert(self, *msg, **kwargs): - self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs) + def alert(self, *msg, **kwargs): + self._log(logging.CRITICAL, *msg, **kwargs) @on_try_except_finally(on_finally=(exit, 1)) def fatal(self, *msg, **kwargs): - self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs) + self._log(logging.CRITICAL, *msg, **kwargs) + + def mute(self): + self._muted = True + + def unmute(self): + self._muted = False class PythonDLogger(object): - def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT): - """ - :param logger_name: - :param log_fmt: - """ + def __init__( + self, + logger_name=PYTHON_D_LOG_NAME, + log_fmt=PYTHON_D_LOG_LINE_FORMAT, + ): self.logger = BaseLogger(logger_name, log_fmt=log_fmt) self.module_name = 'plugin' self.job_name = 'main' - self._logger_counters = LoggerCounters() _LOG_TRACEBACK = False @@ -162,45 +160,39 @@ def log_traceback(self, value): PythonDLogger._LOG_TRACEBACK = value def debug(self, *msg): - self.logger.debug(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) + self.logger.debug(*msg, extra={ + 'module_name': self.module_name, + 'job_name': self.job_name or self.module_name, + }) def info(self, *msg): - self.logger.info(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) + self.logger.info(*msg, extra={ + 'module_name': self.module_name, + 'job_name': self.job_name or self.module_name, + }) def warning(self, *msg): - self.logger.warning(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) + self.logger.warning(*msg, extra={ + 'module_name': self.module_name, + 'job_name': self.job_name or self.module_name, + }) @add_traceback def error(self, *msg): - self.logger.error(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) + self.logger.error(*msg, extra={ + 'module_name': self.module_name, + 'job_name': self.job_name or self.module_name, + }) @add_traceback def alert(self, *msg): - self.logger.alert(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) + self.logger.alert(*msg, extra={ + 'module_name': self.module_name, + 'job_name': self.job_name or self.module_name, + }) def fatal(self, *msg): - self.logger.fatal(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) - - -class PythonDLimitedLogger(PythonDLogger): - @limiter() - def info(self, *msg): - PythonDLogger.info(self, *msg) - - @limiter() - def warning(self, *msg): - PythonDLogger.warning(self, *msg) - - @limiter() - def error(self, *msg): - PythonDLogger.error(self, *msg) - - @limiter() - def alert(self, *msg): - PythonDLogger.alert(self, *msg) + self.logger.fatal(*msg, extra={ + 'module_name': self.module_name, + 'job_name': self.job_name or self.module_name, + }) diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md deleted file mode 100644 index 527ce4c316c146..00000000000000 --- a/collectors/python.d.plugin/rethinkdbs/README.md +++ /dev/null @@ -1,77 +0,0 @@ - - -# RethinkDB collector - -Collects database server and cluster statistics. - -Following charts are drawn: - -1. **Connected Servers** - - - connected - - missing - -2. **Active Clients** - - - active - -3. **Queries** per second - - - queries - -4. **Documents** per second - - - documents - -## Configuration - -Edit the `python.d/rethinkdbs.conf` configuration file using `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically -at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/rethinkdbs.conf -``` - -```yaml -localhost: - name: 'local' - host: '127.0.0.1' - port: 28015 - user: "user" - password: "pass" -``` - -When no configuration file is found, module tries to connect to `127.0.0.1:28015`. - - - - -### Troubleshooting - -To troubleshoot issues with the `rethinkdbs` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `rethinkdbs` module in debug mode: - -```bash -./python.d.plugin rethinkdbs debug trace -``` - diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md new file mode 120000 index 00000000000000..78ddcfa18e2c66 --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/README.md @@ -0,0 +1 @@ +integrations/rethinkdb.md \ No newline at end of file diff --git a/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md b/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md new file mode 100644 index 00000000000000..ab51c05149d621 --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md @@ -0,0 +1,190 @@ + + +# RethinkDB + + + + + +Plugin: python.d.plugin +Module: rethinkdbs + + + +## Overview + +This collector monitors metrics about RethinkDB clusters and database servers. + +It uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +When no configuration file is found, the collector tries to connect to 127.0.0.1:28015. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per RethinkDB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| rethinkdb.cluster_connected_servers | connected, missing | servers | +| rethinkdb.cluster_clients_active | active | clients | +| rethinkdb.cluster_queries | queries | queries/s | +| rethinkdb.cluster_documents | reads, writes | documents/s | + +### Per database server + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| rethinkdb.client_connections | connections | connections | +| rethinkdb.clients_active | active | clients | +| rethinkdb.queries | queries | queries/s | +| rethinkdb.documents | reads, writes | documents/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Required python module + +The collector requires the `rethinkdb` python module to be installed. + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/rethinkdbs.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/rethinkdbs.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| host | Hostname or ip of the RethinkDB server. | localhost | no | +| port | Port to connect to the RethinkDB server. | 28015 | no | +| user | The username to use to connect to the RethinkDB server. | admin | no | +| password | The password to use to connect to the RethinkDB server. | | no | +| timeout | Set a connect timeout to the RethinkDB server. | 2 | no | + +
+ +#### Examples + +##### Local RethinkDB server + +An example of a configuration for a local RethinkDB server + +```yaml +localhost: + name: 'local' + host: '127.0.0.1' + port: 28015 + user: "user" + password: "pass" + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin rethinkdbs debug trace + ``` + + diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md deleted file mode 100644 index b7f2fcb14d4b47..00000000000000 --- a/collectors/python.d.plugin/retroshare/README.md +++ /dev/null @@ -1,70 +0,0 @@ - - -# RetroShare collector - -Monitors application bandwidth, peers and DHT metrics. - -This module will monitor one or more `RetroShare` applications, depending on your configuration. - -## Charts - -This module produces the following charts: - -- Bandwidth in `kilobits/s` -- Peers in `peers` -- DHT in `peers` - - -## Configuration - -Edit the `python.d/retroshare.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/retroshare.conf -``` - -Here is an example for 2 servers: - -```yaml -localhost: - url : 'http://localhost:9090' - user : "user" - password : "pass" - -remote: - url : 'http://203.0.113.1:9090' - user : "user" - password : "pass" -``` - - - -### Troubleshooting - -To troubleshoot issues with the `retroshare` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `retroshare` module in debug mode: - -```bash -./python.d.plugin retroshare debug trace -``` - diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md new file mode 120000 index 00000000000000..4e4c2cdb74d520 --- /dev/null +++ b/collectors/python.d.plugin/retroshare/README.md @@ -0,0 +1 @@ +integrations/retroshare.md \ No newline at end of file diff --git a/collectors/python.d.plugin/retroshare/integrations/retroshare.md b/collectors/python.d.plugin/retroshare/integrations/retroshare.md new file mode 100644 index 00000000000000..4fc003c6f909eb --- /dev/null +++ b/collectors/python.d.plugin/retroshare/integrations/retroshare.md @@ -0,0 +1,191 @@ + + +# RetroShare + + + + + +Plugin: python.d.plugin +Module: retroshare + + + +## Overview + +This collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics. + +It connects to the RetroShare web interface to gather metrics. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per RetroShare instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| retroshare.bandwidth | Upload, Download | kilobits/s | +| retroshare.peers | All friends, Connected friends | peers | +| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers | + + +## Setup + +### Prerequisites + +#### RetroShare web interface + +RetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/retroshare.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/retroshare.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no | + +
+ +#### Examples + +##### Local RetroShare Web UI + +A basic configuration for a RetroShare server running on localhost. + +
Config + +```yaml +localhost: + name: 'local retroshare' + url: 'http://localhost:9090' + +``` +
+ +##### Remote RetroShare Web UI + +A basic configuration for a remote RetroShare server. + +
Config + +```yaml +remote: + name: 'remote retroshare' + url: 'http://1.2.3.4:9090' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin retroshare debug trace + ``` + + diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md deleted file mode 100644 index e822c551ebae8b..00000000000000 --- a/collectors/python.d.plugin/riakkv/README.md +++ /dev/null @@ -1,149 +0,0 @@ - - -# Riak KV collector - -Collects database stats from `/stats` endpoint. - -## Requirements - -- An accessible `/stats` endpoint. See [the Riak KV configuration reference documentation](https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces) - for how to enable this. - -The following charts are included, which are mostly derived from the metrics -listed -[here](https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#riak-metrics-to-graph). - -1. **Throughput** in operations/s - -- **KV operations** - - gets - - puts - -- **Data type updates** - - counters - - sets - - maps - -- **Search queries** - - queries - -- **Search documents** - - indexed - -- **Strong consistency operations** - - gets - - puts - -2. **Latency** in milliseconds - -- **KV latency** of the past minute - - get (mean, median, 95th / 99th / 100th percentile) - - put (mean, median, 95th / 99th / 100th percentile) - -- **Data type latency** of the past minute - - counter_merge (mean, median, 95th / 99th / 100th percentile) - - set_merge (mean, median, 95th / 99th / 100th percentile) - - map_merge (mean, median, 95th / 99th / 100th percentile) - -- **Search latency** of the past minute - - query (median, min, max, 95th / 99th percentile) - - index (median, min, max, 95th / 99th percentile) - -- **Strong consistency latency** of the past minute - - get (mean, median, 95th / 99th / 100th percentile) - - put (mean, median, 95th / 99th / 100th percentile) - -3. **Erlang VM metrics** - -- **System counters** - - processes - -- **Memory allocation** in MB - - processes.allocated - - processes.used - -4. **General load / health metrics** - -- **Siblings encountered in KV operations** during the past minute - - get (mean, median, 95th / 99th / 100th percentile) - -- **Object size in KV operations** during the past minute in KB - - get (mean, median, 95th / 99th / 100th percentile) - -- **Message queue length** in unprocessed messages - - vnodeq_size (mean, median, 95th / 99th / 100th percentile) - -- **Index operations** encountered by Search - - errors - -- **Protocol buffer connections** - - active - -- **Repair operations coordinated by this node** - - read - -- **Active finite state machines by kind** - - get - - put - - secondary_index - - list_keys - -- **Rejected finite state machines** - - get - - put - -- **Number of writes to Search failed due to bad data format by reason** - - bad_entry - - extract_fail - -## Configuration - -Edit the `python.d/riakkv.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/riakkv.conf -``` - -The module needs to be passed the full URL to Riak's stats endpoint. -For example: - -```yaml -myriak: - url: http://myriak.example.com:8098/stats -``` - -With no explicit configuration given, the module will attempt to connect to -`http://localhost:8098/stats`. - -The default update frequency for the plugin is set to 2 seconds as Riak -internally updates the metrics every second. If we were to update the metrics -every second, the resulting graph would contain odd jitter. -### Troubleshooting - -To troubleshoot issues with the `riakkv` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `riakkv` module in debug mode: - -```bash -./python.d.plugin riakkv debug trace -``` - diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md new file mode 120000 index 00000000000000..f43ece09ba3b0f --- /dev/null +++ b/collectors/python.d.plugin/riakkv/README.md @@ -0,0 +1 @@ +integrations/riakkv.md \ No newline at end of file diff --git a/collectors/python.d.plugin/riakkv/integrations/riakkv.md b/collectors/python.d.plugin/riakkv/integrations/riakkv.md new file mode 100644 index 00000000000000..2e8279bc3122f9 --- /dev/null +++ b/collectors/python.d.plugin/riakkv/integrations/riakkv.md @@ -0,0 +1,220 @@ + + +# RiakKV + + + + + +Plugin: python.d.plugin +Module: riakkv + + + +## Overview + +This collector monitors RiakKV metrics about throughput, latency, resources and more.' + + +This collector reads the database stats from the `/stats` endpoint. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +If the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per RiakKV instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| riak.kv.throughput | gets, puts | operations/s | +| riak.dt.vnode_updates | counters, sets, maps | operations/s | +| riak.search | queries | queries/s | +| riak.search.documents | indexed | documents/s | +| riak.consistent.operations | gets, puts | operations/s | +| riak.kv.latency.get | mean, median, 95, 99, 100 | ms | +| riak.kv.latency.put | mean, median, 95, 99, 100 | ms | +| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms | +| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms | +| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms | +| riak.search.latency.query | median, min, 95, 99, 999, max | ms | +| riak.search.latency.index | median, min, 95, 99, 999, max | ms | +| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms | +| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms | +| riak.vm | processes | total | +| riak.vm.memory.processes | allocated, used | MB | +| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings | +| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB | +| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages | +| riak.search.index | errors | errors | +| riak.core.protobuf_connections | active | connections | +| riak.core.repairs | read | repairs | +| riak.core.fsm_active | get, put, secondary index, list keys | fsms | +| riak.core.fsm_rejected | get, put | fsms | +| riak.search.index | bad_entry, extract_fail | writes | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour | +| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour | +| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour | +| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour | +| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM | +| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines | + + +## Setup + +### Prerequisites + +#### Configure RiakKV to enable /stats endpoint + +You can follow the RiakKV configuration reference documentation for how to enable this. + +Source : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/riakkv.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/riakkv.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| url | The url of the server | no | yes | + +
+ +#### Examples + +##### Basic (default) + +A basic example configuration per job + +```yaml +local: +url: 'http://localhost:8098/stats' + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +local: + url: 'http://localhost:8098/stats' + +remote: + url: 'http://192.0.2.1:8098/stats' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin riakkv debug trace + ``` + + diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md deleted file mode 100644 index 8fe133fd510794..00000000000000 --- a/collectors/python.d.plugin/samba/README.md +++ /dev/null @@ -1,144 +0,0 @@ - - -# Samba collector - -Monitors the performance metrics of Samba file sharing using `smbstatus` command-line tool. - -Executed commands: - -- `sudo -n smbstatus -P` - -## Requirements - -- `smbstatus` program -- `sudo` program -- `smbd` must be compiled with profiling enabled -- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level` - -The module uses `smbstatus`, which can only be executed by `root`. It uses -`sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a -password. - -- Add to your `/etc/sudoers` file: - -`which smbstatus` shows the full path to the binary. - -```bash -netdata ALL=(root) NOPASSWD: /path/to/smbstatus -``` - -- Reset Netdata's systemd - unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux - distributions with systemd) - -The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`. - - -As the `root` user, do the following: - -```cmd -mkdir /etc/systemd/system/netdata.service.d -echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf -systemctl daemon-reload -systemctl restart netdata.service -``` - -## Charts - -1. **Syscall R/Ws** in kilobytes/s - - - sendfile - - recvfile - -2. **Smb2 R/Ws** in kilobytes/s - - - readout - - writein - - readin - - writeout - -3. **Smb2 Create/Close** in operations/s - - - create - - close - -4. **Smb2 Info** in operations/s - - - getinfo - - setinfo - -5. **Smb2 Find** in operations/s - - - find - -6. **Smb2 Notify** in operations/s - - - notify - -7. **Smb2 Lesser Ops** as counters - - - tcon - - negprot - - tdis - - cancel - - logoff - - flush - - lock - - keepalive - - break - - sessetup - -## Enable the collector - -The `samba` collector is disabled by default. To enable it, use `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` -file. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d.conf -``` - -Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl -restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - -## Configuration - -Edit the `python.d/samba.conf` configuration file using `edit-config` from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/samba.conf -``` - - - - -### Troubleshooting - -To troubleshoot issues with the `samba` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `samba` module in debug mode: - -```bash -./python.d.plugin samba debug trace -``` - diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md new file mode 120000 index 00000000000000..3b63bbab6615bf --- /dev/null +++ b/collectors/python.d.plugin/samba/README.md @@ -0,0 +1 @@ +integrations/samba.md \ No newline at end of file diff --git a/collectors/python.d.plugin/samba/integrations/samba.md b/collectors/python.d.plugin/samba/integrations/samba.md new file mode 100644 index 00000000000000..1bd1664ee69440 --- /dev/null +++ b/collectors/python.d.plugin/samba/integrations/samba.md @@ -0,0 +1,221 @@ + + +# Samba + + + + + +Plugin: python.d.plugin +Module: samba + + + +## Overview + +This collector monitors the performance metrics of Samba file sharing. + +It is using the `smbstatus` command-line tool. + +Executed commands: + +- `sudo -n smbstatus -P` + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + +`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password. + + +### Default Behavior + +#### Auto-Detection + +After all the permissions are satisfied, the `smbstatus -P` binary is executed. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Samba instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| syscall.rw | sendfile, recvfile | KiB/s | +| smb2.rw | readout, writein, readin, writeout | KiB/s | +| smb2.create_close | create, close | operations/s | +| smb2.get_set_info | getinfo, setinfo | operations/s | +| smb2.find | find | operations/s | +| smb2.notify | notify | operations/s | +| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable the samba collector + +The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory, if different +sudo ./edit-config python.d.conf +``` +Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. + + +#### Permissions and programs + +To run the collector you need: + +- `smbstatus` program +- `sudo` program +- `smbd` must be compiled with profiling enabled +- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level` + +The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password. + +- add to your `/etc/sudoers` file: + + `which smbstatus` shows the full path to the binary. + + ```bash + netdata ALL=(root) NOPASSWD: /path/to/smbstatus + ``` + +- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd) + + The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`. + + + As the `root` user, do the following: + + ```cmd + mkdir /etc/systemd/system/netdata.service.d + echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf + systemctl daemon-reload + systemctl restart netdata.service + ``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/samba.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/samba.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration. + +
Config + +```yaml +my_job_name: + name: my_name + update_every: 1 + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin samba debug trace + ``` + + diff --git a/collectors/python.d.plugin/samba/metadata.yaml b/collectors/python.d.plugin/samba/metadata.yaml index 43bca208ed194c..ec31e047555773 100644 --- a/collectors/python.d.plugin/samba/metadata.yaml +++ b/collectors/python.d.plugin/samba/metadata.yaml @@ -23,9 +23,9 @@ modules: metrics_description: "This collector monitors the performance metrics of Samba file sharing." method_description: | It is using the `smbstatus` command-line tool. - + Executed commands: - + - `sudo -n smbstatus -P` supported_platforms: include: [] @@ -44,32 +44,41 @@ modules: setup: prerequisites: list: + - title: Enable the samba collector + description: | + The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. + + ```bash + cd /etc/netdata # Replace this path with your Netdata config directory, if different + sudo ./edit-config python.d.conf + ``` + Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. - title: Permissions and programs description: | To run the collector you need: - + - `smbstatus` program - `sudo` program - `smbd` must be compiled with profiling enabled - `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level` - + The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password. - + - add to your `/etc/sudoers` file: - + `which smbstatus` shows the full path to the binary. - + ```bash netdata ALL=(root) NOPASSWD: /path/to/smbstatus ``` - + - Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd) - + The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`. - - + + As the `root` user, do the following: - + ```cmd mkdir /etc/systemd/system/netdata.service.d echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf @@ -82,14 +91,14 @@ modules: options: description: | There are 2 sections: - + * Global variables * One or more JOBS that can define multiple different instances to monitor. - + The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - + Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - + Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. folding: title: "Config options" diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md deleted file mode 100644 index 7ee31bd6728015..00000000000000 --- a/collectors/python.d.plugin/sensors/README.md +++ /dev/null @@ -1,55 +0,0 @@ - - -# Linux machine sensors collector - -Reads system sensors information (temperature, voltage, electric current, power, etc.). - -Charts are created dynamically. - -## Configuration - -Edit the `python.d/sensors.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/sensors.conf -``` - -### possible issues - -There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) -when ACPI sensors are being accessed. We are tracking such cases in -issue [#827](https://github.com/netdata/netdata/issues/827). Please join this discussion for help. - -When `lm-sensors` doesn't work on your device (e.g. for RPi temperatures), -use [the legacy bash collector](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/README.md) - - -### Troubleshooting - -To troubleshoot issues with the `sensors` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `sensors` module in debug mode: - -```bash -./python.d.plugin sensors debug trace -``` - diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md new file mode 120000 index 00000000000000..4e92b088274370 --- /dev/null +++ b/collectors/python.d.plugin/sensors/README.md @@ -0,0 +1 @@ +integrations/linux_sensors_lm-sensors.md \ No newline at end of file diff --git a/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md b/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md new file mode 100644 index 00000000000000..e426c8c839b989 --- /dev/null +++ b/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md @@ -0,0 +1,187 @@ + + +# Linux Sensors (lm-sensors) + + + + + +Plugin: python.d.plugin +Module: sensors + + + +## Overview + +Examine Linux Sensors metrics with Netdata for insights into hardware health and performance. + +Enhance your system's reliability with real-time hardware health insights. + + +Reads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The following type of sensors are auto-detected: +- temperature - fan - voltage - current - power - energy - humidity + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per chip + +Metrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`. + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| sensors.temperature | a dimension per sensor | Celsius | +| sensors.voltage | a dimension per sensor | Volts | +| sensors.current | a dimension per sensor | Ampere | +| sensors.power | a dimension per sensor | Watt | +| sensors.fan | a dimension per sensor | Rotations/min | +| sensors.energy | a dimension per sensor | Joule | +| sensors.humidity | a dimension per sensor | Percent | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/sensors.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/sensors.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes | +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | + +
+ +#### Examples + +##### Default + +Default configuration. + +```yaml +types: + - temperature + - fan + - voltage + - current + - power + - energy + - humidity + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `sensors` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin sensors debug trace + ``` + +### lm-sensors doesn't work on your device + + + +### ACPI ring buffer errors are printed + + + + diff --git a/collectors/python.d.plugin/sensors/metadata.yaml b/collectors/python.d.plugin/sensors/metadata.yaml index c3f681915a9323..d7cb2206fd4dc1 100644 --- a/collectors/python.d.plugin/sensors/metadata.yaml +++ b/collectors/python.d.plugin/sensors/metadata.yaml @@ -117,7 +117,16 @@ modules: - humidity troubleshooting: problems: - list: [] + list: + - name: lm-sensors doesn't work on your device + description: | + When `lm-sensors` doesn't work on your device (e.g. for RPi temperatures), + use [the legacy bash collector](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/README.md) + - name: ACPI ring buffer errors are printed + description: | + There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) + when ACPI sensors are being accessed. We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827). + Please join this discussion for help. alerts: [] metrics: folding: diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py index 701bf641472cf0..0d9de3750fad9c 100644 --- a/collectors/python.d.plugin/sensors/sensors.chart.py +++ b/collectors/python.d.plugin/sensors/sensors.chart.py @@ -66,7 +66,7 @@ LIMITS = { 'temperature': [-127, 1000], - 'voltage': [-127, 127], + 'voltage': [-400, 400], 'current': [-127, 127], 'fan': [0, 65535] } diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md deleted file mode 100644 index e79348b05fd5c4..00000000000000 --- a/collectors/python.d.plugin/smartd_log/README.md +++ /dev/null @@ -1,148 +0,0 @@ - - -# Storage devices collector - -Monitors `smartd` log files to collect HDD/SSD S.M.A.R.T attributes. - -## Requirements - -- `smartmontools` - -It produces following charts for SCSI devices: - -1. **Read Error Corrected** - -2. **Read Error Uncorrected** - -3. **Write Error Corrected** - -4. **Write Error Uncorrected** - -5. **Verify Error Corrected** - -6. **Verify Error Uncorrected** - -7. **Temperature** - -For ATA devices: - -1. **Read Error Rate** - -2. **Seek Error Rate** - -3. **Soft Read Error Rate** - -4. **Write Error Rate** - -5. **SATA Interface Downshift** - -6. **UDMA CRC Error Count** - -7. **Throughput Performance** - -8. **Seek Time Performance** - -9. **Start/Stop Count** - -10. **Power-On Hours Count** - -11. **Power Cycle Count** - -12. **Unexpected Power Loss** - -13. **Spin-Up Time** - -14. **Spin-up Retries** - -15. **Calibration Retries** - -16. **Temperature** - -17. **Reallocated Sectors Count** - -18. **Reserved Block Count** - -19. **Program Fail Count** - -20. **Erase Fail Count** - -21. **Wear Leveller Worst Case Erase Count** - -22. **Unused Reserved NAND Blocks** - -23. **Reallocation Event Count** - -24. **Current Pending Sector Count** - -25. **Offline Uncorrectable Sector Count** - -26. **Percent Lifetime Used** - -## prerequisite - -`smartd` must be running with `-A` option to write smartd attribute information to files. - -For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`: - -``` -# dump smartd attrs info every 600 seconds -smartd_opts="-A /var/log/smartd/ -i 600" -``` - -You may need to create the smartd directory before smartd will write to it: - -```sh -mkdir -p /var/log/smartd -``` - -Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also for more info on the `-A --attributelog=PREFIX` command. - -`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files. - -## Configuration - -Edit the `python.d/smartd_log.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/smartd_log.conf -``` - -```yaml -local: - log_path : '/var/log/smartd/' -``` - -If no configuration is given, module will attempt to read log files in `/var/log/smartd/` directory. - - - - -### Troubleshooting - -To troubleshoot issues with the `smartd_log` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `smartd_log` module in debug mode: - -```bash -./python.d.plugin smartd_log debug trace -``` - diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md new file mode 120000 index 00000000000000..63aad6c8506180 --- /dev/null +++ b/collectors/python.d.plugin/smartd_log/README.md @@ -0,0 +1 @@ +integrations/s.m.a.r.t..md \ No newline at end of file diff --git a/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md b/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md new file mode 100644 index 00000000000000..5c5b569e9dc8df --- /dev/null +++ b/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md @@ -0,0 +1,223 @@ + + +# S.M.A.R.T. + + + + + +Plugin: python.d.plugin +Module: smartd_log + + + +## Overview + +This collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance. + + +It reads `smartd` log files to collect the metrics. + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +Upon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The metrics listed below are split in terms of availability on device type, SCSI or ATA. + +### Per S.M.A.R.T. instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | SCSI | ATA | +|:------|:----------|:----|:---:|:---:| +| smartd_log.read_error_rate | a dimension per device | value | | • | +| smartd_log.seek_error_rate | a dimension per device | value | | • | +| smartd_log.soft_read_error_rate | a dimension per device | errors | | • | +| smartd_log.write_error_rate | a dimension per device | value | | • | +| smartd_log.read_total_err_corrected | a dimension per device | errors | • | | +| smartd_log.read_total_unc_errors | a dimension per device | errors | • | | +| smartd_log.write_total_err_corrected | a dimension per device | errors | • | | +| smartd_log.write_total_unc_errors | a dimension per device | errors | • | | +| smartd_log.verify_total_err_corrected | a dimension per device | errors | • | | +| smartd_log.verify_total_unc_errors | a dimension per device | errors | • | | +| smartd_log.sata_interface_downshift | a dimension per device | events | | • | +| smartd_log.udma_crc_error_count | a dimension per device | errors | | • | +| smartd_log.throughput_performance | a dimension per device | value | | • | +| smartd_log.seek_time_performance | a dimension per device | value | | • | +| smartd_log.start_stop_count | a dimension per device | events | | • | +| smartd_log.power_on_hours_count | a dimension per device | hours | | • | +| smartd_log.power_cycle_count | a dimension per device | events | | • | +| smartd_log.unexpected_power_loss | a dimension per device | events | | • | +| smartd_log.spin_up_time | a dimension per device | ms | | • | +| smartd_log.spin_up_retries | a dimension per device | retries | | • | +| smartd_log.calibration_retries | a dimension per device | retries | | • | +| smartd_log.airflow_temperature_celsius | a dimension per device | celsius | | • | +| smartd_log.temperature_celsius | a dimension per device | celsius | • | • | +| smartd_log.reallocated_sectors_count | a dimension per device | sectors | | • | +| smartd_log.reserved_block_count | a dimension per device | percentage | | • | +| smartd_log.program_fail_count | a dimension per device | errors | | • | +| smartd_log.erase_fail_count | a dimension per device | failures | | • | +| smartd_log.wear_leveller_worst_case_erase_count | a dimension per device | erases | | • | +| smartd_log.unused_reserved_nand_blocks | a dimension per device | blocks | | • | +| smartd_log.reallocation_event_count | a dimension per device | events | | • | +| smartd_log.current_pending_sector_count | a dimension per device | sectors | | • | +| smartd_log.offline_uncorrectable_sector_count | a dimension per device | sectors | | • | +| smartd_log.percent_lifetime_used | a dimension per device | percentage | | • | +| smartd_log.media_wearout_indicator | a dimension per device | percentage | | • | +| smartd_log.nand_writes_1gib | a dimension per device | GiB | | • | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure `smartd` to write attribute information to files. + +`smartd` must be running with `-A` option to write `smartd` attribute information to files. + +For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`: + +``` +# dump smartd attrs info every 600 seconds +smartd_opts="-A /var/log/smartd/ -i 600" +``` + +You may need to create the smartd directory before smartd will write to it: + +```sh +mkdir -p /var/log/smartd +``` + +Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also for more info on the `-A --attributelog=PREFIX` command. + +`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/smartd_log.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/smartd_log.conf +``` +#### Options + +This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior. + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| log_path | path to smartd log files. | /var/log/smartd | yes | +| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no | +| age | Time in minutes since the last dump to file. | 30 | no | +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Basic + +A basic configuration example. + +```yaml +custom: + name: smartd_log + log_path: '/var/log/smartd/' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `smartd_log` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin smartd_log debug trace + ``` + + diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py index dc4e95decd0bb8..a896164dfc6580 100644 --- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py +++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py @@ -39,6 +39,7 @@ ATTR172 = '172' ATTR173 = '173' ATTR174 = '174' +ATTR177 = '177' ATTR180 = '180' ATTR183 = '183' ATTR190 = '190' @@ -50,6 +51,8 @@ ATTR202 = '202' ATTR206 = '206' ATTR233 = '233' +ATTR241 = '241' +ATTR242 = '242' ATTR249 = '249' ATTR_READ_ERR_COR = 'read-total-err-corrected' ATTR_READ_ERR_UNC = 'read-total-unc-errors' @@ -114,6 +117,8 @@ 'offline_uncorrectable_sector_count', 'percent_lifetime_used', 'media_wearout_indicator', + 'total_lbas_written', + 'total_lbas_read', ] CHARTS = { @@ -329,7 +334,7 @@ 'media_wearout_indicator': { 'options': [None, 'Media Wearout Indicator', 'percentage', 'wear', 'smartd_log.media_wearout_indicator', 'line'], 'lines': [], - 'attrs': [ATTR233], + 'attrs': [ATTR233, ATTR177], 'algo': ABSOLUTE, }, 'nand_writes_1gib': { @@ -338,6 +343,18 @@ 'attrs': [ATTR249], 'algo': ABSOLUTE, }, + 'total_lbas_written': { + 'options': [None, 'Total LBAs Written', 'sectors', 'wear', 'smartd_log.total_lbas_written', 'line'], + 'lines': [], + 'attrs': [ATTR241], + 'algo': ABSOLUTE, + }, + 'total_lbas_read': { + 'options': [None, 'Total LBAs Read', 'sectors', 'wear', 'smartd_log.total_lbas_read', 'line'], + 'lines': [], + 'attrs': [ATTR242], + 'algo': ABSOLUTE, + }, } # NOTE: 'parse_temp' decodes ATA 194 raw value. Not heavily tested. Written by @Ferroin @@ -519,6 +536,7 @@ def ata_attribute_factory(value): elif name in [ ATTR1, ATTR7, + ATTR177, ATTR202, ATTR206, ATTR233, diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md deleted file mode 100644 index f39d9bab679e6b..00000000000000 --- a/collectors/python.d.plugin/spigotmc/README.md +++ /dev/null @@ -1,61 +0,0 @@ - - -# SpigotMC collector - -Performs basic monitoring for Spigot Minecraft servers. - -It provides two charts, one tracking server-side ticks-per-second in -1, 5 and 15 minute averages, and one tracking the number of currently -active users. - -This is not compatible with Spigot plugins which change the format of -the data returned by the `tps` or `list` console commands. - -## Configuration - -Edit the `python.d/spigotmc.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/spigotmc.conf -``` - -```yaml -host: localhost -port: 25575 -password: pass -``` - -By default, a connection to port 25575 on the local system is attempted with an empty password. - - - - -### Troubleshooting - -To troubleshoot issues with the `spigotmc` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `spigotmc` module in debug mode: - -```bash -./python.d.plugin spigotmc debug trace -``` - diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md new file mode 120000 index 00000000000000..66e5c9c47d0b33 --- /dev/null +++ b/collectors/python.d.plugin/spigotmc/README.md @@ -0,0 +1 @@ +integrations/spigotmc.md \ No newline at end of file diff --git a/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md b/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md new file mode 100644 index 00000000000000..55ec8fa224947b --- /dev/null +++ b/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md @@ -0,0 +1,216 @@ + + +# SpigotMC + + + + + +Plugin: python.d.plugin +Module: spigotmc + + + +## Overview + +This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users. + + +It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses. + + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per SpigotMC instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks | +| spigotmc.users | Users | users | +| spigotmc.mem | used, allocated, max | MiB | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable the Remote Console Protocol + +Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`. + +This will allow the Server to listen and respond to queries over the rcon protocol. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/spigotmc.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/spigotmc.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| host | The host's IP to connect to. | localhost | yes | +| port | The port the remote console is listening on. | 25575 | yes | +| password | Remote console password if any. | | no | + +
+ +#### Examples + +##### Basic + +A basic configuration example. + +```yaml +local: + name: local_server + url: 127.0.0.1 + port: 25575 + +``` +##### Basic Authentication + +An example using basic password for authentication with the remote console. + +
Config + +```yaml +local: + name: local_server_pass + url: 127.0.0.1 + port: 25575 + password: 'foobar' + +``` +
+ +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +local_server: + name : my_local_server + url : 127.0.0.1 + port: 25575 + +remote_server: + name : another_remote_server + url : 192.0.2.1 + port: 25575 + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin spigotmc debug trace + ``` + + diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md deleted file mode 100644 index da5349184f56c5..00000000000000 --- a/collectors/python.d.plugin/squid/README.md +++ /dev/null @@ -1,81 +0,0 @@ - - -# Squid collector - -Monitors one or more squid instances depending on configuration. - -It produces following charts: - -1. **Client Bandwidth** in kilobits/s - - - in - - out - - hits - -2. **Client Requests** in requests/s - - - requests - - hits - - errors - -3. **Server Bandwidth** in kilobits/s - - - in - - out - -4. **Server Requests** in requests/s - - - requests - - errors - -## Configuration - -Edit the `python.d/squid.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/squid.conf -``` - -```yaml -priority : 50000 - -local: - request : 'cache_object://localhost:3128/counters' - host : 'localhost' - port : 3128 -``` - -Without any configuration module will try to autodetect where squid presents its `counters` data - - - - -### Troubleshooting - -To troubleshoot issues with the `squid` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `squid` module in debug mode: - -```bash -./python.d.plugin squid debug trace -``` - diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md new file mode 120000 index 00000000000000..c4e5a03d773053 --- /dev/null +++ b/collectors/python.d.plugin/squid/README.md @@ -0,0 +1 @@ +integrations/squid.md \ No newline at end of file diff --git a/collectors/python.d.plugin/squid/integrations/squid.md b/collectors/python.d.plugin/squid/integrations/squid.md new file mode 100644 index 00000000000000..6599826da606fb --- /dev/null +++ b/collectors/python.d.plugin/squid/integrations/squid.md @@ -0,0 +1,199 @@ + + +# Squid + + + + + +Plugin: python.d.plugin +Module: squid + + + +## Overview + +This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests. + + +It collects metrics from the endpoint where Squid exposes its `counters` data. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Squid instance + +These metrics refer to each monitored Squid instance. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| squid.clients_net | in, out, hits | kilobits/s | +| squid.clients_requests | requests, hits, errors | requests/s | +| squid.servers_net | in, out | kilobits/s | +| squid.servers_requests | requests, errors | requests/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure Squid's Cache Manager + +Take a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/squid.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/squid.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 1 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no | +| host | The host to connect to. | | yes | +| port | The port to connect to. | | yes | +| request | The URL to request from Squid. | | yes | + +
+ +#### Examples + +##### Basic + +A basic configuration example. + +```yaml +example_job_name: + name: 'local' + host: 'localhost' + port: 3128 + request: 'cache_object://localhost:3128/counters' + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +local_job: + name: 'local' + host: '127.0.0.1' + port: 3128 + request: 'cache_object://127.0.0.1:3128/counters' + +remote_job: + name: 'remote' + host: '192.0.2.1' + port: 3128 + request: 'cache_object://192.0.2.1:3128/counters' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin squid debug trace + ``` + + diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md deleted file mode 100644 index 923d6238f78461..00000000000000 --- a/collectors/python.d.plugin/tomcat/README.md +++ /dev/null @@ -1,76 +0,0 @@ - - -# Apache Tomcat collector - -Presents memory utilization of tomcat containers. - -Charts: - -1. **Requests** per second - - - accesses - -2. **Volume** in KB/s - - - volume - -3. **Threads** - - - current - - busy - -4. **JVM Free Memory** in MB - - - jvm - -## Configuration - -Edit the `python.d/tomcat.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/tomcat.conf -``` - -```yaml -localhost: - name : 'local' - url : 'http://127.0.0.1:8080/manager/status?XML=true' - user : 'tomcat_username' - pass : 'secret_tomcat_password' -``` - -Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials. -So it will probably fail. - - - - -### Troubleshooting - -To troubleshoot issues with the `tomcat` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `tomcat` module in debug mode: - -```bash -./python.d.plugin tomcat debug trace -``` - diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md new file mode 120000 index 00000000000000..997090c35102a9 --- /dev/null +++ b/collectors/python.d.plugin/tomcat/README.md @@ -0,0 +1 @@ +integrations/tomcat.md \ No newline at end of file diff --git a/collectors/python.d.plugin/tomcat/integrations/tomcat.md b/collectors/python.d.plugin/tomcat/integrations/tomcat.md new file mode 100644 index 00000000000000..883f29dd31ca11 --- /dev/null +++ b/collectors/python.d.plugin/tomcat/integrations/tomcat.md @@ -0,0 +1,203 @@ + + +# Tomcat + + + + + +Plugin: python.d.plugin +Module: tomcat + + + +## Overview + +This collector monitors Tomcat metrics about bandwidth, processing time, threads and more. + + +It parses the information provided by the http endpoint of the `/manager/status` in XML format + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +You need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint + +### Default Behavior + +#### Auto-Detection + +If the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail. + +#### Limits + +This module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Tomcat instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| tomcat.accesses | accesses, errors | requests/s | +| tomcat.bandwidth | sent, received | KiB/s | +| tomcat.processing_time | processing time | seconds | +| tomcat.threads | current, busy | current threads | +| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB | +| tomcat.jvm_eden | used, committed, max | MiB | +| tomcat.jvm_survivor | used, committed, max | MiB | +| tomcat.jvm_tenured | used, committed, max | MiB | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Create a read-only `netdata` user, to monitor the `/status` endpoint. + +This is necessary for configuring the collector. + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/tomcat.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/tomcat.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options per job + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes | +| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no | +| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no | +| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration + +```yaml +localhost: + name : 'local' + url : 'http://localhost:8080/manager/status?XML=true' + +``` +##### Using an IPv4 endpoint + +A typical configuration using an IPv4 endpoint + +
Config + +```yaml +local_ipv4: + name : 'local' + url : 'http://127.0.0.1:8080/manager/status?XML=true' + +``` +
+ +##### Using an IPv6 endpoint + +A typical configuration using an IPv6 endpoint + +
Config + +```yaml +local_ipv6: + name : 'local' + url : 'http://[::1]:8080/manager/status?XML=true' + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin tomcat debug trace + ``` + + diff --git a/collectors/python.d.plugin/tomcat/metadata.yaml b/collectors/python.d.plugin/tomcat/metadata.yaml index c22f4f58b37035..e685260736f33c 100644 --- a/collectors/python.d.plugin/tomcat/metadata.yaml +++ b/collectors/python.d.plugin/tomcat/metadata.yaml @@ -45,7 +45,7 @@ modules: prerequisites: list: - title: Create a read-only `netdata` user, to monitor the `/status` endpoint. - description: You will need this configuring the collector + description: This is necessary for configuring the collector. configuration: file: name: "python.d/tomcat.conf" diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md deleted file mode 100644 index 15f7e22823f131..00000000000000 --- a/collectors/python.d.plugin/tor/README.md +++ /dev/null @@ -1,89 +0,0 @@ - - -# Tor collector - -Connects to the Tor control port to collect traffic statistics. - -## Requirements - -- `tor` program -- `stem` python package - -It produces only one chart: - -1. **Traffic** - - - read - - write - -## Configuration - -Edit the `python.d/tor.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/tor.conf -``` - -Needs only `control_port`. - -Here is an example for local server: - -```yaml -update_every : 1 -priority : 60000 - -local_tcp: - name: 'local' - control_port: 9051 - password: # if required - -local_socket: - name: 'local' - control_port: '/var/run/tor/control' - password: # if required -``` - -### prerequisite - -Add to `/etc/tor/torrc`: - -``` -ControlPort 9051 -``` - -For more options please read the manual. - -Without configuration, module attempts to connect to `127.0.0.1:9051`. - - - - -### Troubleshooting - -To troubleshoot issues with the `tor` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `tor` module in debug mode: - -```bash -./python.d.plugin tor debug trace -``` - diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md new file mode 120000 index 00000000000000..7c20cd40ad9934 --- /dev/null +++ b/collectors/python.d.plugin/tor/README.md @@ -0,0 +1 @@ +integrations/tor.md \ No newline at end of file diff --git a/collectors/python.d.plugin/tor/integrations/tor.md b/collectors/python.d.plugin/tor/integrations/tor.md new file mode 100644 index 00000000000000..0e57fa793af378 --- /dev/null +++ b/collectors/python.d.plugin/tor/integrations/tor.md @@ -0,0 +1,197 @@ + + +# Tor + + + + + +Plugin: python.d.plugin +Module: tor + + + +## Overview + +This collector monitors Tor bandwidth traffic . + +It connects to the Tor control port to collect traffic statistics. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +If no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Tor instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| tor.traffic | read, write | KiB/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Required python module + +The `stem` python library needs to be installed. + + +#### Required Tor configuration + +Add to /etc/tor/torrc: + +ControlPort 9051 + +For more options please read the manual. + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/tor.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/tor.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| control_addr | Tor control IP address | 127.0.0.1 | no | +| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no | +| password | Tor control password | | no | + +
+ +#### Examples + +##### Local TCP + +A basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1` + +
Config + +```yaml +local_tcp: + name: 'local' + control_port: 9051 + password: # if required + +``` +
+ +##### Local socket + +A basic local socket configuration + +
Config + +```yaml +local_socket: + name: 'local' + control_port: '/var/run/tor/control' + password: # if required + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin tor debug trace + ``` + + diff --git a/collectors/python.d.plugin/tor/metadata.yaml b/collectors/python.d.plugin/tor/metadata.yaml index d0ecc1a43d4d70..8647eca2320ae0 100644 --- a/collectors/python.d.plugin/tor/metadata.yaml +++ b/collectors/python.d.plugin/tor/metadata.yaml @@ -39,6 +39,9 @@ modules: setup: prerequisites: list: + - title: 'Required python module' + description: | + The `stem` python library needs to be installed. - title: 'Required Tor configuration' description: | Add to /etc/tor/torrc: diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md deleted file mode 100644 index 393be9fc5886bb..00000000000000 --- a/collectors/python.d.plugin/uwsgi/README.md +++ /dev/null @@ -1,75 +0,0 @@ - - -# uWSGI collector - -Monitors performance metrics exposed by [`Stats Server`](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html). - - -Following charts are drawn: - -1. **Requests** - - - requests per second - - transmitted data - - average request time - -2. **Memory** - - - rss - - vsz - -3. **Exceptions** -4. **Harakiris** -5. **Respawns** - -## Configuration - -Edit the `python.d/uwsgi.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/uwsgi.conf -``` - -```yaml -socket: - name : 'local' - socket : '/tmp/stats.socket' - -localhost: - name : 'local' - host : 'localhost' - port : 1717 -``` - -When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`. - - -### Troubleshooting - -To troubleshoot issues with the `uwsgi` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `uwsgi` module in debug mode: - -```bash -./python.d.plugin uwsgi debug trace -``` - diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md new file mode 120000 index 00000000000000..44b8559492a874 --- /dev/null +++ b/collectors/python.d.plugin/uwsgi/README.md @@ -0,0 +1 @@ +integrations/uwsgi.md \ No newline at end of file diff --git a/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md b/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md new file mode 100644 index 00000000000000..af58608bd0706a --- /dev/null +++ b/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md @@ -0,0 +1,219 @@ + + +# uWSGI + + + + + +Plugin: python.d.plugin +Module: uwsgi + + + +## Overview + +This collector monitors uWSGI metrics about requests, workers, memory and more. + +It collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per uWSGI instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| uwsgi.requests | a dimension per worker | requests/s | +| uwsgi.tx | a dimension per worker | KiB/s | +| uwsgi.avg_rt | a dimension per worker | milliseconds | +| uwsgi.memory_rss | a dimension per worker | MiB | +| uwsgi.memory_vsz | a dimension per worker | MiB | +| uwsgi.exceptions | exceptions | exceptions | +| uwsgi.harakiris | harakiris | harakiris | +| uwsgi.respawns | respawns | respawns | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable the uWSGI Stats server + +Make sure that you uWSGI exposes it's metrics via a Stats server. + +Source: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/uwsgi.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/uwsgi.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no | +| socket | The 'path/to/uwsgistats.sock' | no | no | +| host | The host to connect to | no | no | +| port | The port to connect to | no | no | + +
+ +#### Examples + +##### Basic (default out-of-the-box) + +A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time. + +
Config + +```yaml +socket: + name : 'local' + socket : '/tmp/stats.socket' + +localhost: + name : 'local' + host : 'localhost' + port : 1717 + +localipv4: + name : 'local' + host : '127.0.0.1' + port : 1717 + +localipv6: + name : 'local' + host : '::1' + port : 1717 + +``` +
+ +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
Config + +```yaml +local: + name : 'local' + host : 'localhost' + port : 1717 + +remote: + name : 'remote' + host : '192.0.2.1' + port : 1717 + +``` +
+ + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin uwsgi debug trace + ``` + + diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md deleted file mode 100644 index d30a9fb1dc9f7c..00000000000000 --- a/collectors/python.d.plugin/varnish/README.md +++ /dev/null @@ -1,88 +0,0 @@ - - -# Varnish Cache collector - -Provides HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics using `varnishstat` tool. - -Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported. - -## Requirements - -- `netdata` user must be a member of the `varnish` group - -## Charts - -This module produces the following charts: - -- Connections Statistics in `connections/s` -- Client Requests in `requests/s` -- All History Hit Rate Ratio in `percent` -- Current Poll Hit Rate Ratio in `percent` -- Expired Objects in `expired/s` -- Least Recently Used Nuked Objects in `nuked/s` -- Number Of Threads In All Pools in `pools` -- Threads Statistics in `threads/s` -- Current Queue Length in `requests` -- Backend Connections Statistics in `connections/s` -- Requests To The Backend in `requests/s` -- ESI Statistics in `problems/s` -- Memory Usage in `MiB` -- Uptime in `seconds` - -For every backend (VBE): - -- Backend Response Statistics in `kilobits/s` - -For every storage (SMF, SMA, or MSE): - -- Storage Usage in `KiB` -- Storage Allocated Objects - -## Configuration - -Edit the `python.d/varnish.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/varnish.conf -``` - -Only one parameter is supported: - -```yaml -instance_name: 'name' -``` - -The name of the `varnishd` instance to get logs from. If not specified, the host name is used. - - - - -### Troubleshooting - -To troubleshoot issues with the `varnish` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `varnish` module in debug mode: - -```bash -./python.d.plugin varnish debug trace -``` - diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md new file mode 120000 index 00000000000000..194be2335a1901 --- /dev/null +++ b/collectors/python.d.plugin/varnish/README.md @@ -0,0 +1 @@ +integrations/varnish.md \ No newline at end of file diff --git a/collectors/python.d.plugin/varnish/integrations/varnish.md b/collectors/python.d.plugin/varnish/integrations/varnish.md new file mode 100644 index 00000000000000..da74dcf8f0e9c3 --- /dev/null +++ b/collectors/python.d.plugin/varnish/integrations/varnish.md @@ -0,0 +1,213 @@ + + +# Varnish + + + + + +Plugin: python.d.plugin +Module: varnish + + + +## Overview + +This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics. + +Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported. + + +It uses the `varnishstat` tool in order to collect the metrics. + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + +`netdata` user must be a member of the `varnish` group. + + +### Default Behavior + +#### Auto-Detection + +By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Varnish instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| varnish.session_connection | accepted, dropped | connections/s | +| varnish.client_requests | received | requests/s | +| varnish.all_time_hit_rate | hit, miss, hitpass | percentage | +| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage | +| varnish.cached_objects_expired | objects | expired/s | +| varnish.cached_objects_nuked | objects | nuked/s | +| varnish.threads_total | None | number | +| varnish.threads_statistics | created, failed, limited | threads/s | +| varnish.threads_queue_len | in queue | requests | +| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s | +| varnish.backend_requests | sent | requests/s | +| varnish.esi_statistics | errors, warnings | problems/s | +| varnish.memory_usage | free, allocated | MiB | +| varnish.uptime | uptime | seconds | + +### Per Backend + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| varnish.backend | header, body | kilobits/s | + +### Per Storage + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| varnish.storage_usage | free, allocated | KiB | +| varnish.storage_alloc_objs | allocated | objects | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Provide the necessary permissions + +In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool: + +``` +usermod -aG varnish netdata +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/varnish.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/varnish.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes | +| update_every | Sets the default data collection frequency. | 10 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | + +
+ +#### Examples + +##### Basic + +An example configuration. + +```yaml +job_name: + instance_name: '' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin varnish debug trace + ``` + + diff --git a/collectors/python.d.plugin/varnish/metadata.yaml b/collectors/python.d.plugin/varnish/metadata.yaml index aa245c25fcb404..d31c1cf6fccef1 100644 --- a/collectors/python.d.plugin/varnish/metadata.yaml +++ b/collectors/python.d.plugin/varnish/metadata.yaml @@ -75,8 +75,8 @@ modules: enabled: true list: - name: instance_name - description: the name of the varnishd instance to get logs from. If not specified, the host name is used. - default_value: '' + description: the name of the varnishd instance to get logs from. If not specified, the local host name is used. + default_value: "" required: true - name: update_every description: Sets the default data collection frequency. diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md deleted file mode 100644 index ca08b0400985c3..00000000000000 --- a/collectors/python.d.plugin/w1sensor/README.md +++ /dev/null @@ -1,50 +0,0 @@ - - -# 1-Wire Sensors collector - -Monitors sensor temperature. - -On Linux these are supported by the wire, w1_gpio, and w1_therm modules. -Currently temperature sensors are supported and automatically detected. - -Charts are created dynamically based on the number of detected sensors. - -## Configuration - -Edit the `python.d/w1sensor.conf` configuration file using `edit-config` from the Netdata [config -directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/w1sensor.conf -``` - -An example of a working configuration can be found in the default [configuration file](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/w1sensor/w1sensor.conf) of this collector. - -### Troubleshooting - -To troubleshoot issues with the `w1sensor` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `w1sensor` module in debug mode: - -```bash -./python.d.plugin w1sensor debug trace -``` - diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md new file mode 120000 index 00000000000000..c0fa9cd1bb5f10 --- /dev/null +++ b/collectors/python.d.plugin/w1sensor/README.md @@ -0,0 +1 @@ +integrations/1-wire_sensors.md \ No newline at end of file diff --git a/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md new file mode 100644 index 00000000000000..fe3c05ba6fa836 --- /dev/null +++ b/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md @@ -0,0 +1,167 @@ + + +# 1-Wire Sensors + + + + + +Plugin: python.d.plugin +Module: w1sensor + + + +## Overview + +Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts. + +The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector will try to auto detect available 1-Wire devices. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per 1-Wire Sensors instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| w1sensor.temp | a dimension per sensor | Celsius | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Required Linux kernel modules + +Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded. + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/w1sensor.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/w1sensor.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | +| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | +| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no | + +
+ +#### Examples + +##### Provide human readable names + +Associate two 1-Wire identifiers with human readable names. + +```yaml +sensors: + name_00000022276e: 'Machine room' + name_00000022298f: 'Rack 12' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin w1sensor debug trace + ``` + + diff --git a/collectors/python.d.plugin/zscores/README.md b/collectors/python.d.plugin/zscores/README.md deleted file mode 100644 index dcb685c989d89b..00000000000000 --- a/collectors/python.d.plugin/zscores/README.md +++ /dev/null @@ -1,158 +0,0 @@ -# Basic anomaly detection using Z-scores - -By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis. - -This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev` -for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`). For each dimension -it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over -time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score -at each time step. - -## Charts - -Two charts are produced: - -- **Z-Score** (`zscores.z`): This chart shows the calculated Z-Score per chart (or dimension if `mode='per_dim'`). -- **Z-Score >3** (`zscores.3stddev`): This chart shows a `1` if the absolute value of the Z-Score is greater than 3 or - a `0` otherwise. - -Below is an example of the charts produced by this collector and a typical example of how they would look when things -are 'normal' on the system. Most of the zscores tend to bounce randomly around a range typically between 0 to +3 (or -3 -to +3 if `z_abs: 'false'`), a few charts might stay steady at a more constant higher value depending on your -configuration and the typical workload on your system (typically those charts that do not change that much have a -smaller range of values on which to calculate a zscore and so tend to have a higher typical zscore). - -So really its a combination of the zscores values themselves plus, perhaps more importantly, how they change when -something strange occurs on your system which can be most useful. - -![zscores-collector-normal](https://user-images.githubusercontent.com/2178292/108776300-21d44d00-755a-11eb-92a4-ecb8f7d2f175.png) - -For example, if we go onto the system and run a command -like [`stress-ng --all 2`](https://wiki.ubuntu.com/Kernel/Reference/stress-ng) to create some stress, we see many charts -begin to have zscores that jump outside the typical range. When the absolute zscore for a chart is greater than 3 you -will see a corresponding line appear on the `zscores.3stddev` chart to make it a bit clearer what charts might be worth -looking at first (for more background information on why 3 stddev -see [here](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule#:~:text=In%20the%20empirical%20sciences%20the,99.7%25%20probability%20as%20near%20certainty.)) -. - -In the example below we basically took a sledge hammer to our system so its not surprising that lots of charts light up -after we run the stress command. In a more realistic setting you might just see a handful of charts with strange zscores -and that could be a good indication of where to look first. - -![zscores-collector-abnormal](https://user-images.githubusercontent.com/2178292/108776316-28fb5b00-755a-11eb-80de-ec5d38089ecc.png) - -Then as the issue passes the zscores should settle back down into their normal range again as they are calculated in a -rolling and smoothed way (as defined by your `zscores.conf` file). - -![zscores-collector-normal-again](https://user-images.githubusercontent.com/2178292/108776439-4fb99180-755a-11eb-8bb7-b4df144cb44c.png) - -## Requirements - -This collector will only work with Python 3 and requires the below packages be installed. - -```bash -# become netdata user -sudo su -s /bin/bash netdata -# install required packages -pip3 install numpy pandas requests netdata-pandas==0.0.38 -``` - -## Configuration - -Install the underlying Python requirements, Enable the collector and restart Netdata. - -```bash -cd /etc/netdata/ -sudo ./edit-config python.d.conf -# Set `zscores: no` to `zscores: yes` -sudo systemctl restart netdata -``` - -The configuration for the zscores collector defines how it will behave on your system and might take some -experimentation with over time to set it optimally. Out of the box, the config comes with -some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-netdata/) to get you started. - -If you are unsure about any of the below configuration options then it's best to just ignore all this and leave -the `zscores.conf` files alone to begin with. Then you can return to it later if you would like to tune things a bit -more once the collector is running for a while. - -Edit the `python.d/zscores.conf` configuration file using `edit-config` from the your -agent's [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory), which is -usually at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/zscores.conf -``` - -The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some -information about each one and what it does. - -```bash -# what host to pull data from -host: '127.0.0.1:19999' -# What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc. -charts_regex: 'system\..*' -# length of time to base calculations off for mean and stddev -train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore -# offset preceding latest data to ignore when calculating mean and stddev -offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev -# recalculate the mean and stddev every n steps of the collector -train_every_n: 900 # recalculate mean and stddev every 15 minutes -# smooth the z score by averaging it over last n values -z_smooth_n: 15 # take a rolling average of the last 15 zscore values to reduce sensitivity to temporary 'spikes' -# cap absolute value of zscore (before smoothing) for better stability -z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscores swamping any rolling average -# set z_abs: 'true' to make all zscores be absolute values only. -z_abs: 'true' -# burn in period in which to initially calculate mean and stddev on every step -burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return -# mode can be to get a zscore 'per_dim' or 'per_chart' -mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step -# per_chart_agg is how you aggregate from dimension to chart when mode='per_chart' -per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average. -``` - -## Notes - -- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses python async - libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous - calls to the netdata rest api to get the required data for each chart when calculating the mean and stddev. -- It may take a few hours or so for the collector to 'settle' into it's typical behaviour in terms of the scores you - will see in the normal running of your system. -- The zscore you see for each chart when using `mode: 'per_chart'` as actually an aggregated zscore across all the - dimensions on the underlying chart. -- If you set `mode: 'per_dim'` then you will see a zscore for each dimension on each chart as opposed to one per chart. -- As this collector does some calculations itself in python you may want to try it out first on a test or development - system to get a sense of its performance characteristics. Most of the work in calculating the mean and stddev will be - pushed down to the underlying Netdata C libraries via the rest api. But some data wrangling and calculations are then - done using [Pandas](https://pandas.pydata.org/) and [Numpy](https://numpy.org/) within the collector itself. -- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the - typical performance characteristics we saw from running this collector were: - - A runtime (`netdata.runtime_zscores`) of ~50ms when doing scoring and ~500ms when recalculating the mean and - stddev. - - Typically 3%-3.5% cpu usage from scoring, jumping to ~35% for one second when recalculating the mean and stddev. - - About ~50mb of ram (`apps.mem`) being continually used by the `python.d.plugin`. -- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a - proper zscore. So until you actually have `train_secs` of available data the mean and stddev calculated will be subject - to more noise. -### Troubleshooting - -To troubleshoot issues with the `zscores` module, run the `python.d.plugin` with the debug option enabled. The -output will give you the output of the data collection job or error messages on why the collector isn't working. - -First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's -not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the -plugin's directory, switch to the `netdata` user. - -```bash -cd /usr/libexec/netdata/plugins.d/ -sudo su -s /bin/bash netdata -``` - -Now you can manually run the `zscores` module in debug mode: - -```bash -./python.d.plugin zscores debug trace -``` - diff --git a/collectors/python.d.plugin/zscores/README.md b/collectors/python.d.plugin/zscores/README.md new file mode 120000 index 00000000000000..159ce078713440 --- /dev/null +++ b/collectors/python.d.plugin/zscores/README.md @@ -0,0 +1 @@ +integrations/python.d_zscores.md \ No newline at end of file diff --git a/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md new file mode 100644 index 00000000000000..9d7d1c3d5ab0d8 --- /dev/null +++ b/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md @@ -0,0 +1,195 @@ + + +# python.d zscores + +Plugin: python.d.plugin +Module: zscores + + + +## Overview + +By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis. + + +This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev` +for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`). + +For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over +time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per python.d zscores instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| zscores.z | a dimension per chart or dimension | z | +| zscores.3stddev | a dimension per chart or dimension | count | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Python Requirements + +This collector will only work with Python 3 and requires the below packages be installed. + +```bash +# become netdata user +sudo su -s /bin/bash netdata +# install required packages +pip3 install numpy pandas requests netdata-pandas==0.0.38 +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `python.d/zscores.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config python.d/zscores.conf +``` +#### Options + +There are 2 sections: + +* Global variables +* One or more JOBS that can define multiple different instances to monitor. + +The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + +Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + +Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes | +| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes | +| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes | +| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes | +| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes | +| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes | +| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes | +| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes | +| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes | +| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes | +| update_every | Sets the default data collection frequency. | 5 | no | +| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | +| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | +| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | + +
+ +#### Examples + +##### Default + +Default configuration. + +```yaml +local: + name: 'local' + host: '127.0.0.1:19999' + charts_regex: 'system\..*' + charts_to_exclude: 'system.uptime' + train_secs: 14400 + offset_secs: 300 + train_every_n: 900 + z_smooth_n: 15 + z_clip: 10 + z_abs: 'true' + burn_in: 2 + mode: 'per_chart' + per_chart_agg: 'mean' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `python.d.plugin` to debug the collector: + + ```bash + ./python.d.plugin zscores debug trace + ``` + + diff --git a/collectors/slabinfo.plugin/README.md b/collectors/slabinfo.plugin/README.md deleted file mode 100644 index abcbe1e3fe926d..00000000000000 --- a/collectors/slabinfo.plugin/README.md +++ /dev/null @@ -1,36 +0,0 @@ - - -# slabinfo.plugin - -SLAB is a cache mechanism used by the Kernel to avoid fragmentation. - -Each internal structure (process, file descriptor, inode...) is stored within a SLAB. - -## configuring Netdata for slabinfo - -The plugin is disabled by default because it collects and displays a huge amount of metrics. -To enable it set `slabinfo = yes` in the `plugins` section of the `netdata.conf` configuration file. - -If you are using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), you will additionally need to install the `netdata-plugin-slabinfo` -package using your system package manager. - -There is currently no configuration needed for the plugin itself. - -As `/proc/slabinfo` is only readable by root, this plugin is setuid root. - -## For what use - -This slabinfo details allows to have clues on actions done on your system. -In the following screenshot, you can clearly see a `find` done on a ext4 filesystem (the number of `ext4_inode_cache` & `dentry` are rising fast), and a few seconds later, an admin issued a `echo 3 > /proc/sys/vm/drop_cached` as their count dropped. - -![netdata_slabinfo](https://user-images.githubusercontent.com/9157986/64433811-7f06e500-d0bf-11e9-8e1e-087497e61033.png) - - - diff --git a/collectors/slabinfo.plugin/README.md b/collectors/slabinfo.plugin/README.md new file mode 120000 index 00000000000000..4d4629a77f86a2 --- /dev/null +++ b/collectors/slabinfo.plugin/README.md @@ -0,0 +1 @@ +integrations/linux_kernel_slab_allocator_statistics.md \ No newline at end of file diff --git a/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md b/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md new file mode 100644 index 00000000000000..ce8115270b076c --- /dev/null +++ b/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md @@ -0,0 +1,131 @@ + + +# Linux kernel SLAB allocator statistics + + + + + +Plugin: slabinfo.plugin +Module: slabinfo.plugin + + + +## Overview + +Collects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel. + + +The plugin parses `/proc/slabinfo` + +This collector is only supported on the following platforms: + +- Linux + +This collector only supports collecting metrics from a single instance of this integration. + +This integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root. + + +### Default Behavior + +#### Auto-Detection + +Due to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf` + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +SLAB cache utilization metrics for the whole system. + +### Per Linux kernel SLAB allocator statistics instance + + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.slabmemory | a dimension per cache | B | +| mem.slabfilling | a dimension per cache | % | +| mem.slabwaste | a dimension per cache | B | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Minimum setup + +If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`. + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugins]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
The main configuration file. + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/slabinfo.plugin/metadata.yaml b/collectors/slabinfo.plugin/metadata.yaml index 7d135d611a5579..f19778297db64f 100644 --- a/collectors/slabinfo.plugin/metadata.yaml +++ b/collectors/slabinfo.plugin/metadata.yaml @@ -50,7 +50,9 @@ modules: description: "" setup: prerequisites: - list: [] + list: + - title: Minimum setup + description: "If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`." configuration: file: name: "netdata.conf" diff --git a/collectors/slabinfo.plugin/slabinfo.c b/collectors/slabinfo.plugin/slabinfo.c index 25b96e386ef9e0..9b9119a6ea1415 100644 --- a/collectors/slabinfo.plugin/slabinfo.c +++ b/collectors/slabinfo.plugin/slabinfo.c @@ -336,12 +336,11 @@ void usage(void) { } int main(int argc, char **argv) { - stderror = stderr; clocks_init(); + nd_log_initialize_for_external_plugins("slabinfo.plugin"); program_name = argv[0]; program_version = "0.1"; - error_log_syslog = 0; int update_every = 1, i, n, freq = 0; diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md index dd74923ec03414..e3c8f9f81f1c40 100644 --- a/collectors/statsd.plugin/README.md +++ b/collectors/statsd.plugin/README.md @@ -36,7 +36,7 @@ Netdata ships with a few synthetic chart definitions to automatically present ap more uniform way. These synthetic charts are configuration files (you can create your own) that re-arrange statsd metrics into a more meaningful way. -On synthetic charts, we can have alarms as with any metric and chart. +On synthetic charts, we can have alerts as with any metric and chart. - [K6 load testing tool](https://k6.io) - **Description:** k6 is a developer-centric, free and open-source load testing tool built for making performance testing a productive and enjoyable experience. @@ -173,8 +173,8 @@ You can find the configuration at `/etc/netdata/netdata.conf`: # update every (flushInterval) = 1 # udp messages to process at once = 10 # create private charts for metrics matching = * - # max private charts allowed = 200 # max private charts hard limit = 1000 + # cleanup obsolete charts after secs = 0 # private charts memory mode = save # private charts history = 3996 # histograms and timers percentile (percentThreshold) = 95.00000 @@ -234,13 +234,11 @@ The default behavior is to use the same settings as the rest of the Netdata Agen - `private charts memory mode` - `private charts history` -### Optimize private metric charts visualization and storage +### Optimize private metric charts storage -If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the Netdata dashboard (this is a web browser issue we need to address at the Netdata UI). So, Netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached. +For optimization reasons, Netdata imposes a hard limit on private metric charts. The limit is set via the `max private charts hard limit` setting (which defaults to 1000 charts). Metrics above this hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too). -The metrics above this soft limit are still processed by Netdata, can be used in synthetic charts and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, Netdata will still generate charts, but they will automatically be created with `memory mode = none` (Netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`. - -Metrics above the hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too). +If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after secs`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after secs`. Setting `set charts as obsolete after secs` to 0 (which is also the default value) will disable this functionality. Example private charts (automatically generated without any configuration): @@ -348,11 +346,11 @@ Using the above configuration `myapp` should get its own section on the dashboar - `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application in case that no metrics are collected. - `memory mode` sets the memory mode for all charts of the application. The default is the global default for Netdata (not the global default for StatsD private charts). We suggest not to use this (we have commented it out in the example) and let your app use the global default for Netdata, which is our dbengine. -- `history` sets the size of the round robin database for this application. The default is the global default for Netdata (not the global default for StatsD private charts). This is only relevant if you use `memory mode = save`. Read more on our [metrics storage(]/docs/store/change-metrics-storage.md) doc. +- `history` sets the size of the round-robin database for this application. The default is the global default for Netdata (not the global default for StatsD private charts). This is only relevant if you use `memory mode = save`. Read more on our [metrics storage(]/docs/store/change-metrics-storage.md) doc. `[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing. -Then, add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational. +Then, add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alert templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational. Add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters: @@ -361,7 +359,7 @@ Add any number of metrics to a chart, using `dimension` lines. These lines accep 3. an optional selector (type) of the value to shown (see below) 4. an optional multiplier 5. an optional divider -6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms. +6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently, the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alerts. So, the format is this: @@ -439,7 +437,7 @@ Use the dictionary in 2 ways: 1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name` 2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name` -In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms use either of the 2 as `${myapp.metric1}` or `${metric1 name}`. +In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alerts use either of the 2 as `${myapp.metric1}` or `${metric1 name}`. > keep in mind that if you add multiple times the same StatsD metric to a chart, Netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, Netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc. diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c index 5422d2905956e3..9cc3a9d97952b8 100644 --- a/collectors/statsd.plugin/statsd.c +++ b/collectors/statsd.plugin/statsd.c @@ -95,6 +95,7 @@ typedef enum __attribute__((packed)) statsd_metric_options { STATSD_METRIC_OPTION_USEFUL = 0x00000080, // set when the charting thread finds the metric useful (i.e. used in a chart) STATSD_METRIC_OPTION_COLLECTION_FULL_LOGGED = 0x00000100, // set when the collection is full for this metric STATSD_METRIC_OPTION_UPDATED_CHART_METADATA = 0x00000200, // set when the private chart metadata have been updated via tags + STATSD_METRIC_OPTION_OBSOLETE = 0x00004000, // set when the metric is obsoleted } STATS_METRIC_OPTIONS; typedef enum __attribute__((packed)) statsd_metric_type { @@ -117,6 +118,7 @@ typedef struct statsd_metric { // metadata about data collection collected_number events; // the number of times this metric has been collected (never resets) uint32_t count; // the number of times this metric has been collected since the last flush + time_t last_collected; // timestamp of the last incoming value // the actual collected data union { @@ -268,6 +270,7 @@ static struct statsd { collected_number decimal_detail; uint32_t private_charts; uint32_t max_private_charts_hard; + uint32_t set_obsolete_after; STATSD_APP *apps; uint32_t recvmmsg_size; @@ -476,6 +479,16 @@ static inline int value_is_zinit(const char *value) { #define is_metric_checked(m) ((m)->options & STATSD_METRIC_OPTION_CHECKED) #define is_metric_useful_for_collection(m) (!is_metric_checked(m) || ((m)->options & STATSD_METRIC_OPTION_USEFUL)) +static inline void metric_update_counters_and_obsoletion(STATSD_METRIC *m) { + m->events++; + m->count++; + m->last_collected = now_realtime_sec(); + if (m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE))) { + rrdset_isnot_obsolete___safe_from_collector_thread(m->st); + m->options &= ~STATSD_METRIC_OPTION_OBSOLETE; + } +} + static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, const char *sampling) { if(!is_metric_useful_for_collection(m)) return; @@ -498,8 +511,7 @@ static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, con else m->gauge.value = statsd_parse_float(value, 1.0); - m->events++; - m->count++; + metric_update_counters_and_obsoletion(m); } } @@ -516,8 +528,7 @@ static inline void statsd_process_counter_or_meter(STATSD_METRIC *m, const char else { m->counter.value += llrintndd((NETDATA_DOUBLE) statsd_parse_int(value, 1) / statsd_parse_sampling_rate(sampling)); - m->events++; - m->count++; + metric_update_counters_and_obsoletion(m); } } @@ -559,8 +570,7 @@ static inline void statsd_process_histogram_or_timer(STATSD_METRIC *m, const cha m->histogram.ext->values[m->histogram.ext->used++] = v; } - m->events++; - m->count++; + metric_update_counters_and_obsoletion(m); } } @@ -597,8 +607,7 @@ static inline void statsd_process_set(STATSD_METRIC *m, const char *value) { #else dictionary_set(m->set.dict, value, NULL, 0); #endif - m->events++; - m->count++; + metric_update_counters_and_obsoletion(m); } } @@ -630,8 +639,7 @@ static inline void statsd_process_dictionary(STATSD_METRIC *m, const char *value } t->count++; - m->events++; - m->count++; + metric_update_counters_and_obsoletion(m); } } @@ -1627,6 +1635,9 @@ static inline RRDSET *statsd_private_rrdset_create( static inline void statsd_private_chart_gauge(STATSD_METRIC *m) { netdata_log_debug(D_STATSD, "updating private chart for gauge metric '%s'", m->name); + if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE))) + return; + if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) { m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA; @@ -1667,6 +1678,9 @@ static inline void statsd_private_chart_gauge(STATSD_METRIC *m) { static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) { netdata_log_debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name); + if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE))) + return; + if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) { m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA; @@ -1707,6 +1721,9 @@ static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const static inline void statsd_private_chart_set(STATSD_METRIC *m) { netdata_log_debug(D_STATSD, "updating private chart for set metric '%s'", m->name); + if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE))) + return; + if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) { m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA; @@ -1747,6 +1764,9 @@ static inline void statsd_private_chart_set(STATSD_METRIC *m) { static inline void statsd_private_chart_dictionary(STATSD_METRIC *m) { netdata_log_debug(D_STATSD, "updating private chart for dictionary metric '%s'", m->name); + if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE))) + return; + if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) { m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA; @@ -1790,6 +1810,9 @@ static inline void statsd_private_chart_dictionary(STATSD_METRIC *m) { static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) { netdata_log_debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name); + if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE))) + return; + if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) { m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA; @@ -1842,6 +1865,16 @@ static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, con // -------------------------------------------------------------------------------------------------------------------- // statsd flush metrics +static inline void metric_check_obsoletion(STATSD_METRIC *m) { + if(statsd.set_obsolete_after && + !rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE) && + m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && + m->last_collected + statsd.set_obsolete_after < now_realtime_sec()) { + rrdset_is_obsolete___safe_from_collector_thread(m->st); + m->options |= STATSD_METRIC_OPTION_OBSOLETE; + } +} + static inline void statsd_flush_gauge(STATSD_METRIC *m) { netdata_log_debug(D_STATSD, "flushing gauge metric '%s'", m->name); @@ -1855,6 +1888,8 @@ static inline void statsd_flush_gauge(STATSD_METRIC *m) { if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) statsd_private_chart_gauge(m); + + metric_check_obsoletion(m); } static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) { @@ -1870,6 +1905,8 @@ static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *d if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) statsd_private_chart_counter_or_meter(m, dim, family); + + metric_check_obsoletion(m); } static inline void statsd_flush_counter(STATSD_METRIC *m) { @@ -1896,6 +1933,8 @@ static inline void statsd_flush_set(STATSD_METRIC *m) { if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) statsd_private_chart_set(m); + + metric_check_obsoletion(m); } static inline void statsd_flush_dictionary(STATSD_METRIC *m) { @@ -1924,6 +1963,8 @@ static inline void statsd_flush_dictionary(STATSD_METRIC *m) { dictionary_entries(m->dictionary.dict)); } } + + metric_check_obsoletion(m); } static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) { @@ -1977,6 +2018,8 @@ static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) statsd_private_chart_timer_or_histogram(m, dim, family, units); + + metric_check_obsoletion(m); } static inline void statsd_flush_timer(STATSD_METRIC *m) { @@ -2283,7 +2326,7 @@ static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_ if(unlikely(is_metric_checked(m))) break; if(unlikely(!(m->options & STATSD_METRIC_OPTION_CHECKED_IN_APPS))) { - netdata_log_access("NEW STATSD METRIC '%s': '%s'", statsd_metric_type_string(m->type), m->name); + nd_log(NDLS_ACCESS, NDLP_DEBUG, "NEW STATSD METRIC '%s': '%s'", statsd_metric_type_string(m->type), m->name); check_if_metric_is_for_app(index, m); m->options |= STATSD_METRIC_OPTION_CHECKED_IN_APPS; } @@ -2326,8 +2369,20 @@ static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_ dfe_done(m); // flush all the useful metrics - for(m = index->first_useful; m ; m = m->next_useful) { + STATSD_METRIC *m_prev; + for(m_prev = m = index->first_useful; m ; m = m->next_useful) { flush_metric(m); + if (m->options & STATSD_METRIC_OPTION_OBSOLETE) { + if (m == index->first_useful) + index->first_useful = m->next_useful; + else + m_prev->next_useful = m->next_useful; + dictionary_del(index->dict, m->name); + index->useful--; + index->metrics--; + statsd.private_charts--; + } else + m_prev = m; } } @@ -2447,6 +2502,7 @@ void *statsd_main(void *ptr) { config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL, SIMPLE_PATTERN_EXACT, true); statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard); + statsd.set_obsolete_after = (size_t)config_get_number(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", (long long)statsd.set_obsolete_after); statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail); statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout); statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden); @@ -2458,7 +2514,7 @@ void *statsd_main(void *ptr) { } { char buffer[314 + 1]; - snprintfz(buffer, 314, "%0.1f%%", statsd.histogram_percentile); + snprintfz(buffer, sizeof(buffer) - 1, "%0.1f%%", statsd.histogram_percentile); statsd.histogram_percentile_str = strdupz(buffer); } diff --git a/collectors/systemd-journal.plugin/Makefile.am b/collectors/systemd-journal.plugin/Makefile.am index fd8f4ab2166c06..48f667c1bc03a7 100644 --- a/collectors/systemd-journal.plugin/Makefile.am +++ b/collectors/systemd-journal.plugin/Makefile.am @@ -5,6 +5,11 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in dist_noinst_DATA = \ README.md \ + systemd-journal-self-signed-certs.sh \ + forward_secure_sealing.md \ + active_journal_centralization_guide_no_encryption.md \ + passive_journal_centralization_guide_no_encryption.md \ + passive_journal_centralization_guide_self_signed_certs.md \ $(NULL) dist_libconfig_DATA = \ diff --git a/collectors/systemd-journal.plugin/README.md b/collectors/systemd-journal.plugin/README.md index e69de29bb2d1d6..c3c639045d7f30 100644 --- a/collectors/systemd-journal.plugin/README.md +++ b/collectors/systemd-journal.plugin/README.md @@ -0,0 +1,472 @@ + +# `systemd` journal plugin + +[KEY FEATURES](#key-features) | [JOURNAL SOURCES](#journal-sources) | [JOURNAL FIELDS](#journal-fields) | +[PLAY MODE](#play-mode) | [FULL TEXT SEARCH](#full-text-search) | [PERFORMANCE](#query-performance) | +[CONFIGURATION](#configuration-and-maintenance) | [FAQ](#faq) + +The `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and +efficient. +It automatically discovers available journal sources, allows advanced filtering, offers interactive visual +representations and supports exploring the logs of both individual servers and the logs on infrastructure wide +journal centralization servers. + +![image](https://github.com/netdata/netdata/assets/2662304/691b7470-ec56-430c-8b81-0c9e49012679) + +## Key features + +- Works on both **individual servers** and **journal centralization servers**. +- Supports `persistent` and `volatile` journals. +- Supports `system`, `user`, `namespaces` and `remote` journals. +- Allows filtering on **any journal field** or **field value**, for any time-frame. +- Allows **full text search** (`grep`) on all journal fields, for any time-frame. +- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any + time-frame. +- Works directly on journal files, without any other third-party components. +- Supports coloring log entries, the same way `journalctl` does. +- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are + received. + +### Prerequisites + +`systemd-journal.plugin` is a Netdata Function Plugin. + +To protect your privacy, as with all Netdata Functions, a free Netdata Cloud user account is required to access it. +For more information check [this discussion](https://github.com/netdata/netdata/discussions/16136). + +### Limitations + +#### Plugin availability + +The following are limitations related to the availability of the plugin: + +- Netdata versions prior to 1.44 shipped in a docker container do not include this plugin. + The problem is that `libsystemd` is not available in Alpine Linux (there is a `libsystemd`, but it is a dummy that + returns failure on all calls). Starting with Netdata version 1.44, Netdata containers use a Debian base image + making this plugin available when Netdata is running in a container. +- For the same reason (lack of `systemd` support for Alpine Linux), the plugin is not available on `static` builds of + Netdata (which are based on `muslc`, not `glibc`). If your Netdata is installed in `/opt/netdata` you most likely have + a static build of Netdata. +- On old systemd systems (like Centos 7), the plugin runs always in "full data query" mode, which makes it slower. The + reason, is that systemd API is missing some important calls we need to use the field indexes of `systemd` journal. + However, when running in this mode, the plugin offers also negative matches on the data (like filtering for all logs + that do not have set some field), and this is the reason "full data query" mode is also offered as an option even on + newer versions of `systemd`. + +#### `systemd` journal features + +The following are limitations related to the features of `systemd` journal: + +- This plugin assumes that binary field values are text fields with newlines in them. `systemd-journal` has the ability + to support binary fields, without specifying the nature of the binary data. However, binary fields are commonly used + to store log entries that include multiple lines of text. The plugin treats all binary fields are multi-line text. +- This plugin does not support multiple values per field for any given log entry. `systemd` journal has the ability to + accept the same field key, multiple times, with multiple values on a single log entry. This plugin will present the + last value and ignore the others for this log entry. +- This plugin will only read journal files located in `/var/log/journal` or `/run/log/journal`. `systemd-journal-remote` has the + ability to store journal files anywhere (user configured). If journal files are not located in `/var/log/journal` + or `/run/log/journal` (and any of their subdirectories), the plugin will not find them. A simple solution is to link + the other directories somewhere inside `/var/log/journal`. The plugin will pick them up, even if a sub-directory of + `/var/log/journal` is a link to a directory outside `/var/log/journal`. + +Other than the above, this plugin supports all features of `systemd` journals. + +## Journal Sources + +The plugin automatically detects the available journal sources, based on the journal files available in +`/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs). + +![journal-sources](https://github.com/netdata/netdata/assets/2662304/28e63a3e-6809-4586-b3b0-80755f340e31) + +The plugin, by default, merges all journal sources together, to provide a unified view of all log messages available. + +> To improve query performance, we recommend selecting the relevant journal source, before doing more analysis on the +> logs. + +### `system` journals + +`system` journals are the default journals available on all `systemd` based systems. + +`system` journals contain: + +- kernel log messages (via `kmsg`), +- audit records, originating from the kernel audit subsystem, +- messages received by `systemd-journald` via `syslog`, +- messages received via the standard output and error of service units, +- structured messages received via the native journal API. + +### `user` journals + +Unlike `journalctl`, the Netdata plugin allows viewing, exploring and querying the journal files of **all users**. + +By default, each user, with a UID outside the range of system users (0 - 999), dynamic service users, +and the nobody user (65534), will get their own set of `user` journal files. For more information about +this policy check [Users, Groups, UIDs and GIDs on systemd Systems](https://systemd.io/UIDS-GIDS/). + +Keep in mind that `user` journals are merged with the `system` journals when they are propagated to a journal +centralization server. So, at the centralization server, the `remote` journals contain both the `system` and `user` +journals of the sender. + +### `namespaces` journals + +The plugin auto-detects the namespaces available and provides a list of all namespaces at the "sources" list on the UI. + +Journal namespaces are both a mechanism for logically isolating the log stream of projects consisting +of one or more services from the rest of the system and a mechanism for improving performance. + +`systemd` service units may be assigned to a specific journal namespace through the `LogNamespace=` unit file setting. + +Keep in mind that namespaces require special configuration to be propagated to a journal centralization server. +This makes them a little more difficult to handle, from the administration perspective. + +### `remote` journals + +Remote journals are created by `systemd-journal-remote`. This `systemd` feature allows creating logs centralization +points within your infrastructure, based exclusively on `systemd`. + +Usually `remote` journals are named by the IP of the server sending these logs. The Netdata plugin automatically +extracts these IPs and performs a reverse DNS lookup to find their hostnames. When this is successful, +`remote` journals are named by the hostnames of the origin servers. + +For information about configuring a journal centralization server, +check [this FAQ item](#how-do-i-configure-a-journal-centralization-server). + +## Journal Fields + +`systemd` journals are designed to support multiple fields per log entry. The power of `systemd` journals is that, +unlike other log management systems, it supports dynamic and variable fields for each log message, +while all fields and their values are indexed for fast querying. + +This means that each application can log messages annotated with its own unique fields and values, and `systemd` +journals will automatically index all of them, without any configuration or manual action. + +For a description of the most frequent fields found in `systemd` journals, check `man systemd.journal-fields`. + +Fields found in the journal files are automatically added to the UI in multiple places to help you explore +and filter the data. + +The plugin automatically enriches certain fields to make them more user-friendly: + +- `_BOOT_ID`: the hex value is annotated with the timestamp of the first message encountered for this boot id. +- `PRIORITY`: the numeric value is replaced with the human-readable name of each priority. +- `SYSLOG_FACILITY`: the encoded value is replaced with the human-readable name of each facility. +- `ERRNO`: the numeric value is annotated with the short name of each value. +- `_UID` `_AUDIT_LOGINUID`, `_SYSTEMD_OWNER_UID`, `OBJECT_UID`, `OBJECT_SYSTEMD_OWNER_UID`, `OBJECT_AUDIT_LOGINUID`: + the local user database is consulted to annotate them with usernames. +- `_GID`, `OBJECT_GID`: the local group database is consulted to annotate them with group names. +- `_CAP_EFFECTIVE`: the encoded value is annotated with a human-readable list of the linux capabilities. +- `_SOURCE_REALTIME_TIMESTAMP`: the numeric value is annotated with human-readable datetime in UTC. +- `MESSAGE_ID`: for the known `MESSAGE_ID`s, the value is replaced with the well known name of the event. + +The values of all other fields are presented as found in the journals. + +> IMPORTANT: +> The UID and GID annotations are added during presentation and are taken from the server running the plugin. +> For `remote` sources, the names presented may not reflect the actual user and group names on the origin server. +> The numeric value will still be visible though, as-is on the origin server. + +The annotations are not searchable with full-text search. They are only added for the presentation of the fields. + +### Journal fields as columns in the table + +All journal fields available in the journal files are offered as columns on the UI. Use the gear button above the table: + +![image](https://github.com/netdata/netdata/assets/2662304/cd75fb55-6821-43d4-a2aa-033792c7f7ac) + +### Journal fields as additional info to each log entry + +When you click a log line, the `info` sidebar will open on the right of the screen, to provide the full list of fields +related to this log line. You can close this `info` sidebar, by selecting the filter icon at its top. + +![image](https://github.com/netdata/netdata/assets/2662304/3207794c-a61b-444c-8ffe-6c07cbc90ae2) + +### Journal fields as filters + +The plugin presents a select list of fields as filters to the query, with counters for each of the possible values +for the field. This list can used to quickly check which fields and values are available for the entire time-frame +of the query. + +Internally the plugin has: + +1. A white-list of fields, to be presented as filters. +2. A black-list of fields, to prevent them from becoming filters. This list includes fields with a very high + cardinality, like timestamps, unique message ids, etc. This is mainly for protecting the server's performance, + to avoid building in memory indexes for the fields that almost each of their values is unique. + +Keep in mind that the values presented in the filters, and their sorting is affected by the "full data queries" +setting: + +![image](https://github.com/netdata/netdata/assets/2662304/ac710d46-07c2-487b-8ce3-e7f767b9ae0f) + +When "full data queries" is off, empty values are hidden and cannot be selected. This is due to a limitation of +`libsystemd` that does not allow negative or empty matches. Also, values with zero counters may appear in the list. + +When "full data queries" is on, Netdata is applying all filtering to the data (not `libsystemd`), but this means +that all the data of the entire time-frame, without any filtering applied, have to be read by the plugin to prepare +the response required. So, "full data queries" can be significantly slower over long time-frames. + +### Journal fields as histogram sources + +The plugin presents a histogram of the number of log entries across time. + +The data source of this histogram can be any of the fields that are available as filters. +For each of the values this field has, across the entire time-frame of the query, the histogram will get corresponding +dimensions, showing the number of log entries, per value, over time. + +The granularity of the histogram is adjusted automatically to have about 150 columns visible on screen. + +The histogram presented by the plugin is interactive: + +- **Zoom**, either with the global date-time picker, or the zoom tool in the histogram's toolbox. +- **Pan**, either with global date-time picker, or by dragging with the mouse the chart to the left or the right. +- **Click**, to quickly jump to the highlighted point in time in the log entries. + +![image](https://github.com/netdata/netdata/assets/2662304/d3dcb1d1-daf4-49cf-9663-91b5b3099c2d) + +## PLAY mode + +The plugin supports PLAY mode, to continuously update the screen with new log entries found in the journal files. +Just hit the "play" button at the top of the Netdata dashboard screen. + +On centralized log servers, PLAY mode provides a unified view of all the new logs encountered across the entire +infrastructure, +from all hosts sending logs to the central logs server via `systemd-remote`. + +## Full-text search + +The plugin supports searching for any text on all fields of the log entries. + +Full text search is combined with the selected filters. + +The text box accepts asterisks `*` as wildcards. So, `a*b*c` means match anything that contains `a`, then `b` and +then `c` with anything between them. + +Spaces are treated as OR expressions. So that `a*b c*d` means `a*b OR c*d`. + +Negative expressions are supported, by prefixing any string with `!`. Example: `!systemd *` means match anything that +does not contain `systemd` on any of its fields. + +## Query performance + +Journal files are designed to be accessed by multiple readers and one writer, concurrently. + +Readers (like this Netdata plugin), open the journal files and `libsystemd`, behind the scenes, maps regions +of the files into memory, to satisfy each query. + +On logs aggregation servers, the performance of the queries depend on the following factors: + +1. The **number of files** involved in each query. + + This is why we suggest to select a source when possible. + +2. The **speed of the disks** hosting the journal files. + + Journal files perform a lot of reading while querying, so the fastest the disks, the faster the query will finish. + +3. The **memory available** for caching parts of the files. + + Increased memory will help the kernel cache the most frequently used parts of the journal files, avoiding disk I/O + and speeding up queries. + +4. The **number of filters** applied. + + Queries are significantly faster when just a few filters are selected. + +In general, for a faster experience, **keep a low number of rows within the visible timeframe**. + +Even on long timeframes, selecting a couple of filters that will result in a **few dozen thousand** log entries +will provide fast / rapid responses, usually less than a second. To the contrary, viewing timeframes with **millions +of entries** may result in longer delays. + +The plugin aborts journal queries when your browser cancels inflight requests. This allows you to work on the UI +while there are background queries running. + +At the time of this writing, this Netdata plugin is about 25-30 times faster than `journalctl` on queries that access +multiple journal files, over long time-frames. + +During the development of this plugin, we submitted, to `systemd`, a number of patches to improve `journalctl` +performance by a factor of 14: + +- +- +- + +However, even after these patches are merged, `journalctl` will still be 2x slower than this Netdata plugin, +on multi-journal queries. + +The problem lies in the way `libsystemd` handles multi-journal file queries. To overcome this problem, +the Netdata plugin queries each file individually and it then it merges the results to be returned. +This is transparent, thanks to the `facets` library in `libnetdata` that handles on-the-fly indexing, filtering, +and searching of any dataset, independently of its source. + +## Performance at scale + +On busy logs servers, or when querying long timeframes that match millions of log entries, the plugin has a sampling +algorithm to allow it respond promptly. It works like this: + +1. The latest 500k log entries are queried in full, evaluating all the fields of every single log entry. This evaluation + allows counting the unique values per field, updating the counters next to each value at the filters section of the + dashboard. +2. When the latest 500k log entries have been processed and there are more data to read, the plugin divides evenly 500k + more log entries to the number of journal files matched by the query. So, it will continue to evaluate all the fields + of all log entries, up to the budget per file, aiming to fully query 1 million log entries in total. +3. When the budget is hit for a given file, the plugin continues to scan log entries, but this time it does not evaluate + the fields and their values, so the counters per field and value are not updated. These unsampled log entries are + shown in the histogram with the label `[unsampled]`. +4. The plugin continues to count `[unsampled]` entries until as many as sampled entries have been evaluated and at least + 1% of the journal file has been processed. +5. When the `[unsampled]` budget is exhausted, the plugin stops processing the journal file and based on the processing + completed so far and the number of entries in the journal file, it estimates the remaining number of log entries in + that file. This is shown as `[estimated]` at the histogram. +6. In systemd versions 254 or later, the plugin fetches the unique sequence number of each log entry and calculates the + the percentage of the file matched by the query, versus the total number of the log entries in the journal file. +7. In systemd versions prior to 254, the plugin estimates the number of entries the journal file contributes to the + query, using the amount of log entries matched it vs. the total duration the log file has entries for. + +The above allow the plugin to respond promptly even when the number of log entries in the journal files is several +dozens millions, while providing accurate estimations of the log entries over time at the histogram and enough counters +at the fields filtering section to help users get an overview of the whole timeframe. + +The fact that the latest 500k log entries and 1% of all journal files (which are spread over time) have been fully +evaluated, including counting the number of appearances for each field value, the plugin usually provides an accurate +representation of the whole timeframe. + +Keep in mind that although the plugin is quite effective and responds promptly when there are hundreds of journal files +matching a query, response times may be longer when there are several thousands of smaller files. systemd versions 254+ +attempt to solve this problem by allowing `systemd-journal-remote` to create larger files. However, for systemd +versions prior to 254, `systemd-journal-remote` creates files of up to 32MB each, which when running very busy +journals centralization servers aggregating several thousands of log entries per second, the number of files can grow +to several dozens of thousands quickly. In such setups, the plugin should ideally skip processing journal files +entirely, relying solely on the estimations of the sequence of files each file is part of. However, this has not been +implemented yet. To improve the query performance in such setups, the user has to query smaller timeframes. + +Another optimization taking place in huge journal centralization points, is the initial scan of the database. The plugin +needs to know the list of all journal files available, including the details of the first and the last message in each +of them. When there are several thousands of files in a directory (like it usually happens in `/var/log/journal/remote`), +directory listing and examination of each file can take a considerable amount of time (even `ls -l` takes minutes). +To work around this problem, the plugin uses `inotify` to receive file updates immediately and scans the library from +the newest to the oldest file, allowing the user interface to work immediately after startup, for the most recent +timeframes. + +### Best practices for better performance + +systemd-journal has been designed **first to be reliable** and then to be fast. It includes several mechanisms to ensure +minimal data loss under all conditions (e.g. disk corruption, tampering, forward secure sealing) and despite the fact +that it utilizes several techniques to require minimal disk footprint (like deduplication of log entries, linking of +values and fields, compression) the disk footprint of journal files remains significantly higher compared to other log +management solutions. + +The higher disk footprint results in higher disk I/O during querying, since a lot more data have to read from disk to +evaluate a query. Query performance at scale can greatly benefit by utilizing a compressed filesystem (ext4, btrfs, zfs) +to store systemd-journal files. + +systemd-journal files are cached by the operating system. There is no database server to serve queries. Each file is +opened and the query runs by directly accessing the data in it. + +Therefore systemd-journal relies on the caching layer of the operating system to optimize query performance. The more +RAM the system has, although it will not be reported as `used` (it will be reported as `cache`), the faster the queries +will get. The first time a timeframe is accessed the query performance will be slower, but further queries on the same +timeframe will be significantly faster since journal data are now cached in memory. + +So, on busy logs centralization systems, queries performance can be improved significantly by using a compressed +filesystem for storing the journal files, and higher amounts of RAM. + +## Configuration and maintenance + +This Netdata plugin does not require any configuration or maintenance. + +## FAQ + +### Can I use this plugin on journal centralization servers? + +Yes. You can centralize your logs using `systemd-journal-remote`, and then install Netdata +on this logs centralization server to explore the logs of all your infrastructure. + +This plugin will automatically provide multi-node views of your logs and also give you the ability to combine the logs +of multiple servers, as you see fit. + +Check [configuring a logs centralization server](#how-do-i-configure-a-journal-centralization-server). + +### Can I use this plugin from a parent Netdata? + +Yes. When your nodes are connected to a Netdata parent, all their functions are available +via the parent's UI. So, from the parent UI, you can access the functions of all your nodes. + +Keep in mind that to protect your privacy, in order to access Netdata functions, you need a +free Netdata Cloud account. + +### Is any of my data exposed to Netdata Cloud from this plugin? + +No. When you access the agent directly, none of your data passes through Netdata Cloud. +You need a free Netdata Cloud account only to verify your identity and enable the use of +Netdata Functions. Once this is done, all the data flow directly from your Netdata agent +to your web browser. + +Also check [this discussion](https://github.com/netdata/netdata/discussions/16136). + +When you access Netdata via `https://app.netdata.cloud`, your data travel via Netdata Cloud, +but they are not stored in Netdata Cloud. This is to allow you access your Netdata agents from +anywhere. All communication from/to Netdata Cloud is encrypted. + +### What are `volatile` and `persistent` journals? + +`systemd` `journald` allows creating both `volatile` journals in a `tmpfs` ram drive, +and `persistent` journals stored on disk. + +`volatile` journals are particularly useful when the system monitored is sensitive to +disk I/O, or does not have any writable disks at all. + +For more information check `man systemd-journald`. + +### I centralize my logs with Loki. Why to use Netdata for my journals? + +`systemd` journals have almost infinite cardinality at their labels and all of them are indexed, +even if every single message has unique fields and values. + +When you send `systemd` journal logs to Loki, even if you use the `relabel_rules` argument to +`loki.source.journal` with a JSON format, you need to specify which of the fields from journald +you want inherited by Loki. This means you need to know the most important fields beforehand. +At the same time you loose all the flexibility `systemd` journal provides: +**indexing on all fields and all their values**. + +Loki generally assumes that all logs are like a table. All entries in a stream share the same +fields. But journald does exactly the opposite. Each log entry is unique and may have its own unique fields. + +So, Loki and `systemd-journal` are good for different use cases. + +`systemd-journal` already runs in your systems. You use it today. It is there inside all your systems +collecting the system and applications logs. And for its use case, it has advantages over other +centralization solutions. So, why not use it? + +### Is it worth to build a `systemd` logs centralization server? + +Yes. It is simple, fast and the software to do it is already in your systems. + +For application and system logs, `systemd` journal is ideal and the visibility you can get +by centralizing your system logs and the use of this Netdata plugin, is unparalleled. + +### How do I configure a journal centralization server? + +A short summary to get journal server running can be found below. +There are two strategies you can apply, when it comes down to a centralized server for `systemd` journal logs. + +1. _Active sources_, where the centralized server fetches the logs from each individual server +2. _Passive sources_, where the centralized server accepts a log stream from an individual server. + +For more options and reference to documentation, check `man systemd-journal-remote` and `man systemd-journal-upload`. + +#### _passive_ journal centralization without encryption + +If you want to setup your own passive journal centralization setup without encryption, [check out guide on it](https://github.com/netdata/netdata/blob/master/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md). + +#### _passive_ journal centralization with encryption using self-signed certificates + +If you want to setup your own passive journal centralization setup using self-signed certificates for encryption, [check out guide on it](https://github.com/netdata/netdata/blob/master/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md). + +#### Limitations when using a logs centralization server + +As of this writing `namespaces` support by `systemd` is limited: + +- Docker containers cannot log to namespaces. Check [this issue](https://github.com/moby/moby/issues/41879). +- `systemd-journal-upload` automatically uploads `system` and `user` journals, but not `namespaces` journals. For this + you need to spawn a `systemd-journal-upload` per namespace. diff --git a/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md b/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md new file mode 100644 index 00000000000000..cbed1e81e5a7b2 --- /dev/null +++ b/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md @@ -0,0 +1,126 @@ +# Active journal source without encryption + +This page will guide you through creating an active journal source without the use of encryption. + +Once you enable an active journal source on a server, `systemd-journal-gatewayd` will expose an REST API on TCP port 19531. This API can be used for querying the logs, exporting the logs, or monitoring new log entries, remotely. + +> ⚠️ **IMPORTANT**
+> These instructions will expose your logs to the network, without any encryption or authorization.
+> DO NOT USE THIS ON NON-TRUSTED NETWORKS. + +## Configuring an active journal source + +On the server you want to expose their logs, install `systemd-journal-gateway`. + +```bash +# change this according to your distro +sudo apt-get install systemd-journal-gateway +``` + +Optionally, if you want to change the port (the default is `19531`), edit `systemd-journal-gatewayd.socket` + +```bash +# edit the socket file +sudo systemctl edit systemd-journal-gatewayd.socket +``` + +and add the following lines into the instructed place, and choose your desired port; save and exit. + +```bash +[Socket] +ListenStream= +``` + +Finally, enable it, so that it will start automatically upon receiving a connection: + +```bash +# enable systemd-journal-remote +sudo systemctl daemon-reload +sudo systemctl enable --now systemd-journal-gatewayd.socket +``` + +## Using the active journal source + +### Simple Logs Explorer + +`systemd-journal-gateway` provides a simple HTML5 application to browse the logs. + +To use it, open your web browser and navigate to: + +``` +http://server.ip:19531/browse +``` + +A simple page like this will be presented: + +![image](https://github.com/netdata/netdata/assets/2662304/4da88bf8-6398-468b-a359-68db0c9ad419) + +### Use it with `curl` + +`man systemd-journal-gatewayd` documents the supported API methods and provides examples to query the API using `curl` commands. + +### Copying the logs to a central journals server + +`systemd-journal-remote` has the ability to query instances of `systemd-journal-gatewayd` to fetch their logs, so that the central server fetches the logs, instead of waiting for the individual servers to push their logs to it. + +However, this kind of logs centralization has a key problem: **there is no guarantee that there will be no gaps in the logs replicated**. Theoretically, the REST API of `systemd-journal-gatewayd` supports querying past data, and `systemd-journal-remote` could keep track of the state of replication and automatically continue from the point it stopped last time. But it does not. So, currently the best logs centralization option is to use a **passive** centralization, where the clients push their logs to the server. + +Given these limitations, if you still want to configure an **active** journals centralization, this is what you need to do: + +On the centralization server install `systemd-journal-remote`: + +```bash +# change this according to your distro +sudo apt-get install systemd-journal-remote +``` + +Then, copy `systemd-journal-remote.service` to configure it for querying the active source: + +```bash +# replace "clientX" with the name of the active client node +sudo cp /lib/systemd/system/systemd-journal-remote.service /etc/systemd/system/systemd-journal-remote-clientX.service + +# edit it to make sure it the ExecStart line is like this: +# ExecStart=/usr/lib/systemd/systemd-journal-remote --url http://clientX:19531/entries?follow +sudo nano /etc/systemd/system/systemd-journal-remote-clientX.service + +# reload systemd +sudo systemctl daemon-reload +``` + +```bash +# enable systemd-journal-remote +sudo systemctl enable --now systemd-journal-remote-clientX.service +``` + +You can repeat this process to create as many `systemd-journal-remote` services, as the active source you have. + +## Verify it works + +To verify the central server is receiving logs, run this on the central server: + +```bash +sudo ls -l /var/log/journal/remote/ +``` + +You should see new files from the client's hostname or IP. + +Also, any of the new service files (`systemctl status systemd-journal-clientX`) should show something like this: + +```bash +● systemd-journal-clientX.service - Fetching systemd journal logs from 192.168.2.146 + Loaded: loaded (/etc/systemd/system/systemd-journal-clientX.service; enabled; preset: disabled) + Drop-In: /usr/lib/systemd/system/service.d + └─10-timeout-abort.conf + Active: active (running) since Wed 2023-10-18 07:35:52 EEST; 23min ago + Main PID: 77959 (systemd-journal) + Tasks: 2 (limit: 6928) + Memory: 7.7M + CPU: 518ms + CGroup: /system.slice/systemd-journal-clientX.service + ├─77959 /usr/lib/systemd/systemd-journal-remote --url "http://192.168.2.146:19531/entries?follow" + └─77962 curl "-HAccept: application/vnd.fdo.journal" --silent --show-error "http://192.168.2.146:19531/entries?follow" + +Oct 18 07:35:52 systemd-journal-server systemd[1]: Started systemd-journal-clientX.service - Fetching systemd journal logs from 192.168.2.146. +Oct 18 07:35:52 systemd-journal-server systemd-journal-remote[77959]: Spawning curl http://192.168.2.146:19531/entries?follow... +``` diff --git a/collectors/systemd-journal.plugin/forward_secure_sealing.md b/collectors/systemd-journal.plugin/forward_secure_sealing.md new file mode 100644 index 00000000000000..b41570d68c29ea --- /dev/null +++ b/collectors/systemd-journal.plugin/forward_secure_sealing.md @@ -0,0 +1,80 @@ +# Forward Secure Sealing (FSS) in Systemd-Journal + +Forward Secure Sealing (FSS) is a feature in the systemd journal designed to detect log file tampering. +Given that attackers often try to hide their actions by modifying or deleting log file entries, +FSS provides administrators with a mechanism to identify any such unauthorized alterations. + +## Importance +Logs are a crucial component of system monitoring and auditing. Ensuring their integrity means administrators can trust +the data, detect potential breaches, and trace actions back to their origins. Traditional methods to maintain this +integrity involve writing logs to external systems or printing them out. While these methods are effective, they are +not foolproof. FSS offers a more streamlined approach, allowing for log verification directly on the local system. + +## How FSS Works +FSS operates by "sealing" binary logs at regular intervals. This seal is a cryptographic operation, ensuring that any +tampering with the logs prior to the sealing can be detected. If an attacker modifies logs before they are sealed, +these changes become a permanent part of the sealed record, highlighting any malicious activity. + +The technology behind FSS is based on "Forward Secure Pseudo Random Generators" (FSPRG), a concept stemming from +academic research. + +Two keys are central to FSS: + +- **Sealing Key**: Kept on the system, used to seal the logs. +- **Verification Key**: Stored securely off-system, used to verify the sealed logs. + +Every so often, the sealing key is regenerated in a non-reversible process, ensuring that old keys are obsolete and the +latest logs are sealed with a fresh key. The off-site verification key can regenerate any past sealing key, allowing +administrators to verify older seals. If logs are tampered with, verification will fail, alerting administrators to the +breach. + +## Enabling FSS +To enable FSS, use the following command: + +```bash +journalctl --setup-keys +``` + +By default, systemd will seal the logs every 15 minutes. However, this interval can be adjusted using a flag during key +generation. For example, to seal logs every 10 seconds: + +```bash +journalctl --setup-keys --interval=10s +``` + +## Verifying Journals +After enabling FSS, you can verify the integrity of your logs using the verification key: + +```bash +journalctl --verify +``` + +If any discrepancies are found, you'll be alerted, indicating potential tampering. + +## Disabling FSS +Should you wish to disable FSS: + +**Delete the Sealing Key**: This stops new log entries from being sealed. + +```bash +journalctl --rotate +``` + +**Rotate and Prune the Journals**: This will start a new unsealed journal and can remove old sealed journals. + +```bash +journalctl --vacuum-time=1s +``` + + +**Adjust Systemd Configuration (Optional)**: If you've made changes to facilitate FSS in `/etc/systemd/journald.conf`, +consider reverting or adjusting those. Restart the systemd-journald service afterward: + +```bash +systemctl restart systemd-journald +``` + +## Conclusion +FSS is a significant advancement in maintaining log integrity. While not a replacement for all traditional integrity +methods, it offers a valuable tool in the battle against unauthorized log tampering. By integrating FSS into your log +management strategy, you ensure a more transparent, reliable, and tamper-evident logging system. diff --git a/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md b/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md new file mode 100644 index 00000000000000..b70c22033e1aef --- /dev/null +++ b/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md @@ -0,0 +1,150 @@ +# Passive journal centralization without encryption + +This page will guide you through creating a passive journal centralization setup without the use of encryption. + +Once you centralize your infrastructure logs to a server, Netdata will automatically detects all the logs from all servers and organize them in sources. +With the setup described in this document, journal files are identified by the IPs of the clients sending the logs. Netdata will automatically do +reverse DNS lookups to find the names of the server and name the sources on the dashboard accordingly. + +A _passive_ journal server waits for clients to push their metrics to it, so in this setup we will: + +1. configure `systemd-journal-remote` on the server, to listen for incoming connections. +2. configure `systemd-journal-upload` on the clients, to push their logs to the server. + +> ⚠️ **IMPORTANT**
+> These instructions will copy your logs to a central server, without any encryption or authorization.
+> DO NOT USE THIS ON NON-TRUSTED NETWORKS. + +## Server configuration + +On the centralization server install `systemd-journal-remote`: + +```bash +# change this according to your distro +sudo apt-get install systemd-journal-remote +``` + +Make sure the journal transfer protocol is `http`: + +```bash +sudo cp /lib/systemd/system/systemd-journal-remote.service /etc/systemd/system/ + +# edit it to make sure it says: +# --listen-http=-3 +# not: +# --listen-https=-3 +sudo nano /etc/systemd/system/systemd-journal-remote.service + +# reload systemd +sudo systemctl daemon-reload +``` + +Optionally, if you want to change the port (the default is `19532`), edit `systemd-journal-remote.socket` + +```bash +# edit the socket file +sudo systemctl edit systemd-journal-remote.socket +``` + +and add the following lines into the instructed place, and choose your desired port; save and exit. + +```bash +[Socket] +ListenStream= +``` + +Finally, enable it, so that it will start automatically upon receiving a connection: + +```bash +# enable systemd-journal-remote +sudo systemctl enable --now systemd-journal-remote.socket +sudo systemctl enable systemd-journal-remote.service +``` + +`systemd-journal-remote` is now listening for incoming journals from remote hosts. + +## Client configuration + +On the clients, install `systemd-journal-remote` (it includes `systemd-journal-upload`): + +```bash +# change this according to your distro +sudo apt-get install systemd-journal-remote +``` + +Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so: + +```conf +[Upload] +URL=http://centralization.server.ip:19532 +``` + +Edit `systemd-journal-upload`, and add `Restart=always` to make sure the client will keep trying to push logs, even if the server is temporarily not there, like this: + +```bash +sudo systemctl edit systemd-journal-upload +``` + +At the top, add: + +```conf +[Service] +Restart=always +``` + +Enable and start `systemd-journal-upload`, like this: + +```bash +sudo systemctl enable systemd-journal-upload +sudo systemctl start systemd-journal-upload +``` + +## Verify it works + +To verify the central server is receiving logs, run this on the central server: + +```bash +sudo ls -l /var/log/journal/remote/ +``` + +You should see new files from the client's IP. + +Also, `systemctl status systemd-journal-remote` should show something like this: + +```bash +systemd-journal-remote.service - Journal Remote Sink Service + Loaded: loaded (/etc/systemd/system/systemd-journal-remote.service; indirect; preset: disabled) + Active: active (running) since Sun 2023-10-15 14:29:46 EEST; 2h 24min ago +TriggeredBy: ● systemd-journal-remote.socket + Docs: man:systemd-journal-remote(8) + man:journal-remote.conf(5) + Main PID: 2118153 (systemd-journal) + Status: "Processing requests..." + Tasks: 1 (limit: 154152) + Memory: 2.2M + CPU: 71ms + CGroup: /system.slice/systemd-journal-remote.service + └─2118153 /usr/lib/systemd/systemd-journal-remote --listen-http=-3 --output=/var/log/journal/remote/ +``` + +Note the `status: "Processing requests..."` and the PID under `CGroup`. + +On the client `systemctl status systemd-journal-upload` should show something like this: + +```bash +● systemd-journal-upload.service - Journal Remote Upload Service + Loaded: loaded (/lib/systemd/system/systemd-journal-upload.service; enabled; vendor preset: disabled) + Drop-In: /etc/systemd/system/systemd-journal-upload.service.d + └─override.conf + Active: active (running) since Sun 2023-10-15 10:39:04 UTC; 3h 17min ago + Docs: man:systemd-journal-upload(8) + Main PID: 4169 (systemd-journal) + Status: "Processing input..." + Tasks: 1 (limit: 13868) + Memory: 3.5M + CPU: 1.081s + CGroup: /system.slice/systemd-journal-upload.service + └─4169 /lib/systemd/systemd-journal-upload --save-state +``` + +Note the `Status: "Processing input..."` and the PID under `CGroup`. diff --git a/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md b/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md new file mode 100644 index 00000000000000..722d1ceae4e546 --- /dev/null +++ b/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md @@ -0,0 +1,250 @@ +# Passive journal centralization with encryption using self-signed certificates + +This page will guide you through creating a **passive** journal centralization setup using **self-signed certificates** for encryption and authorization. + +Once you centralize your infrastructure logs to a server, Netdata will automatically detect all the logs from all servers and organize them in sources. With the setup described in this document, on recent systemd versions, Netdata will automatically name all remote sources using the names of the clients, as they are described at their certificates (on older versions, the names will be IPs or reverse DNS lookups of the IPs). + +A **passive** journal server waits for clients to push their metrics to it, so in this setup we will: + +1. configure a certificates authority and issue self-signed certificates for your servers. +2. configure `systemd-journal-remote` on the server, to listen for incoming connections. +3. configure `systemd-journal-upload` on the clients, to push their logs to the server. + +Keep in mind that the authorization involved works like this: + +1. The server (`systemd-journal-remote`) validates that the client (`systemd-journal-upload`) uses a trusted certificate (a certificate issued by the same certificate authority as its own). + So, **the server will accept logs from any client having a valid certificate**. +2. The client (`systemd-journal-upload`) validates that the receiver (`systemd-journal-remote`) uses a trusted certificate (like the server does) and it also checks that the hostname or IP of the URL specified to its configuration, matches one of the names or IPs of the server it gets connected to. So, **the client does a validation that it connected to the right server**, using the URL hostname against the names and IPs of the server on its certificate. + +This means, that if both certificates are issued by the same certificate authority, only the client can potentially reject the server. + +## Self-signed certificates + +To simplify the process of creating and managing self-signed certificates, we have created [this bash script](https://github.com/netdata/netdata/blob/master/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh). + +This helps to also automate the distribution of the certificates to your servers (it generates a new bash script for each of your servers, which includes everything required, including the certificates). + +We suggest to keep this script and all the involved certificates at the journals centralization server, in the directory `/etc/ssl/systemd-journal`, so that you can make future changes as required. If you prefer to keep the certificate authority and all the certificates at a more secure location, just use the script on that location. + +On the server that will issue the certificates (usually the centralizaton server), do the following: + +```bash +# install systemd-journal-remote to add the users and groups required and openssl for the certs +# change this according to your distro +sudo apt-get install systemd-journal-remote openssl + +# download the script and make it executable +curl >systemd-journal-self-signed-certs.sh "https://raw.githubusercontent.com/netdata/netdata/master/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh" +chmod 750 systemd-journal-self-signed-certs.sh +``` + +To create certificates for your servers, run this: + +```bash +sudo ./systemd-journal-self-signed-certs.sh "server1" "DNS:hostname1" "IP:10.0.0.1" +``` + +Where: + + - `server1` is the canonical name of the server. On newer systemd version, this name will be used by `systemd-journal-remote` and Netdata when you view the logs on the dashboard. + - `DNS:hostname1` is a DNS name that the server is reachable at. Add `"DNS:xyz"` multiple times to define multiple DNS names for the server. + - `IP:10.0.0.1` is an IP that the server is reachable at. Add `"IP:xyz"` multiple times to define multiple IPs for the server. + +Repeat this process to create the certificates for all your servers. You can add servers as required, at any time in the future. + +Existing certificates are never re-generated. Typically certificates need to be revoked and new ones to be issued. But `systemd-journal-remote` tools do not support handling revocations. So, the only option you have to re-issue a certificate is to delete its files in `/etc/ssl/systemd-journal` and run the script again to create a new one. + +Once you run the script of each of your servers, in `/etc/ssl/systemd-journal` you will find shell scripts named `runme-on-XXX.sh`, where `XXX` are the canonical names of your servers. + +These `runme-on-XXX.sh` include everything to install the certificates, fix their file permissions to be accessible by `systemd-journal-remote` and `systemd-journal-upload`, and update `/etc/systemd/journal-remote.conf` and `/etc/systemd/journal-upload.conf`. + +You can copy and paste (or `scp`) these scripts on your server and each of your clients: + +```bash +sudo scp /etc/ssl/systemd-journal/runme-on-XXX.sh XXX:/tmp/ +``` + +For the rest of this guide, we assume that you have copied the right `runme-on-XXX.sh` at the `/tmp` of all the servers for which you issued certificates. + +### note about certificates file permissions + +It is worth noting that `systemd-journal` certificates need to be owned by `systemd-journal-remote:systemd-journal`. + +Both the user `systemd-journal-remote` and the group `systemd-journal` are automatically added by the `systemd-journal-remote` package. However, `systemd-journal-upload` (and `systemd-journal-gatewayd` - that is not used in this guide) use dynamic users. Thankfully they are added to the `systemd-journal` remote group. + +So, by having the certificates owned by `systemd-journal-remote:systemd-journal`, satisfies both `systemd-journal-remote` which is not in the `systemd-journal` group, and `systemd-journal-upload` (and `systemd-journal-gatewayd`) which use dynamic users. + +You don't need to do anything about it (the scripts take care of everything), but it is worth noting how this works. + +## Server configuration + +On the centralization server install `systemd-journal-remote`: + +```bash +# change this according to your distro +sudo apt-get install systemd-journal-remote +``` + +Make sure the journal transfer protocol is `https`: + +```bash +sudo cp /lib/systemd/system/systemd-journal-remote.service /etc/systemd/system/ + +# edit it to make sure it says: +# --listen-https=-3 +# not: +# --listen-http=-3 +sudo nano /etc/systemd/system/systemd-journal-remote.service + +# reload systemd +sudo systemctl daemon-reload +``` + +Optionally, if you want to change the port (the default is `19532`), edit `systemd-journal-remote.socket` + +```bash +# edit the socket file +sudo systemctl edit systemd-journal-remote.socket +``` + +and add the following lines into the instructed place, and choose your desired port; save and exit. + +```bash +[Socket] +ListenStream= +``` + +Next, run the `runme-on-XXX.sh` script on the server: + +```bash +# if you run the certificate authority on the server: +sudo /etc/ssl/systemd-journal/runme-on-XXX.sh + +# if you run the certificate authority elsewhere, +# assuming you have coped the runme-on-XXX.sh script (as described above): +sudo bash /tmp/runme-on-XXX.sh +``` + +This will install the certificates in `/etc/ssl/systemd-journal`, set the right file permissions, and update `/etc/systemd/journal-remote.conf` and `/etc/systemd/journal-upload.conf` to use the right certificate files. + +Finally, enable it, so that it will start automatically upon receiving a connection: + +```bash +# enable systemd-journal-remote +sudo systemctl enable --now systemd-journal-remote.socket +sudo systemctl enable systemd-journal-remote.service +``` + +`systemd-journal-remote` is now listening for incoming journals from remote hosts. + +> When done, remember to `rm /tmp/runme-on-*.sh` to make sure your certificates are secure. + +## Client configuration + +On the clients, install `systemd-journal-remote` (it includes `systemd-journal-upload`): + +```bash +# change this according to your distro +sudo apt-get install systemd-journal-remote +``` + +Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so: + +```conf +[Upload] +URL=https://centralization.server.ip:19532 +``` + +Make sure that `centralization.server.ip` is one of the `DNS:` or `IP:` parameters you defined when you created the centralization server certificates. If it is not, the client may reject to connect. + +Next, edit `systemd-journal-upload.service`, and add `Restart=always` to make sure the client will keep trying to push logs, even if the server is temporarily not there, like this: + +```bash +sudo systemctl edit systemd-journal-upload.service +``` + +At the top, add: + +```conf +[Service] +Restart=always +``` + +Enable `systemd-journal-upload.service`, like this: + +```bash +sudo systemctl enable systemd-journal-upload.service +``` + +Assuming that you have in `/tmp` the relevant `runme-on-XXX.sh` script for this client, run: + +```bash +sudo bash /tmp/runme-on-XXX.sh +``` + +This will install the certificates in `/etc/ssl/systemd-journal`, set the right file permissions, and update `/etc/systemd/journal-remote.conf` and `/etc/systemd/journal-upload.conf` to use the right certificate files. + +Finally, restart `systemd-journal-upload.service`: + +```bash +sudo systemctl restart systemd-journal-upload.service +``` + +The client should now be pushing logs to the central server. + +> When done, remember to `rm /tmp/runme-on-*.sh` to make sure your certificates are secure. + +Here it is in action, in Netdata: + +![2023-10-18 16-23-05](https://github.com/netdata/netdata/assets/2662304/83bec232-4770-455b-8f1c-46b5de5f93a2) + + +## Verify it works + +To verify the central server is receiving logs, run this on the central server: + +```bash +sudo ls -l /var/log/journal/remote/ +``` + +Depending on the `systemd` version you use, you should see new files from the clients' canonical names (as defined at their certificates) or IPs. + +Also, `systemctl status systemd-journal-remote` should show something like this: + +```bash +systemd-journal-remote.service - Journal Remote Sink Service + Loaded: loaded (/etc/systemd/system/systemd-journal-remote.service; indirect; preset: disabled) + Active: active (running) since Sun 2023-10-15 14:29:46 EEST; 2h 24min ago +TriggeredBy: ● systemd-journal-remote.socket + Docs: man:systemd-journal-remote(8) + man:journal-remote.conf(5) + Main PID: 2118153 (systemd-journal) + Status: "Processing requests..." + Tasks: 1 (limit: 154152) + Memory: 2.2M + CPU: 71ms + CGroup: /system.slice/systemd-journal-remote.service + └─2118153 /usr/lib/systemd/systemd-journal-remote --listen-https=-3 --output=/var/log/journal/remote/ +``` + +Note the `status: "Processing requests..."` and the PID under `CGroup`. + +On the client `systemctl status systemd-journal-upload` should show something like this: + +```bash +● systemd-journal-upload.service - Journal Remote Upload Service + Loaded: loaded (/lib/systemd/system/systemd-journal-upload.service; enabled; vendor preset: disabled) + Drop-In: /etc/systemd/system/systemd-journal-upload.service.d + └─override.conf + Active: active (running) since Sun 2023-10-15 10:39:04 UTC; 3h 17min ago + Docs: man:systemd-journal-upload(8) + Main PID: 4169 (systemd-journal) + Status: "Processing input..." + Tasks: 1 (limit: 13868) + Memory: 3.5M + CPU: 1.081s + CGroup: /system.slice/systemd-journal-upload.service + └─4169 /lib/systemd/systemd-journal-upload --save-state +``` + +Note the `Status: "Processing input..."` and the PID under `CGroup`. diff --git a/collectors/systemd-journal.plugin/systemd-internals.h b/collectors/systemd-journal.plugin/systemd-internals.h new file mode 100644 index 00000000000000..e1ae44d4f1b058 --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-internals.h @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H +#define NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H + +#include "collectors/all.h" +#include "libnetdata/libnetdata.h" + +#include +#include +#include + +#define SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION "View, search and analyze systemd journal entries." +#define SYSTEMD_JOURNAL_FUNCTION_NAME "systemd-journal" +#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60 +#define SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE 0.01 +#define SYSTEMD_JOURNAL_EXECUTE_WATCHER_PENDING_EVERY_MS 250 +#define SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC (5 * 60 * USEC_PER_SEC) + +#define SYSTEMD_UNITS_FUNCTION_DESCRIPTION "View the status of systemd units" +#define SYSTEMD_UNITS_FUNCTION_NAME "systemd-list-units" +#define SYSTEMD_UNITS_DEFAULT_TIMEOUT 30 + +extern __thread size_t fstat_thread_calls; +extern __thread size_t fstat_thread_cached_responses; +void fstat_cache_enable_on_thread(void); +void fstat_cache_disable_on_thread(void); + +extern netdata_mutex_t stdout_mutex; + +typedef enum { + ND_SD_JOURNAL_NO_FILE_MATCHED, + ND_SD_JOURNAL_FAILED_TO_OPEN, + ND_SD_JOURNAL_FAILED_TO_SEEK, + ND_SD_JOURNAL_TIMED_OUT, + ND_SD_JOURNAL_OK, + ND_SD_JOURNAL_NOT_MODIFIED, + ND_SD_JOURNAL_CANCELLED, +} ND_SD_JOURNAL_STATUS; + +typedef enum { + SDJF_NONE = 0, + SDJF_ALL = (1 << 0), + SDJF_LOCAL_ALL = (1 << 1), + SDJF_REMOTE_ALL = (1 << 2), + SDJF_LOCAL_SYSTEM = (1 << 3), + SDJF_LOCAL_USER = (1 << 4), + SDJF_LOCAL_NAMESPACE = (1 << 5), + SDJF_LOCAL_OTHER = (1 << 6), +} SD_JOURNAL_FILE_SOURCE_TYPE; + +struct journal_file { + const char *filename; + size_t filename_len; + STRING *source; + SD_JOURNAL_FILE_SOURCE_TYPE source_type; + usec_t file_last_modified_ut; + usec_t msg_first_ut; + usec_t msg_last_ut; + size_t size; + bool logged_failure; + bool logged_journalctl_failure; + usec_t max_journal_vs_realtime_delta_ut; + + usec_t last_scan_monotonic_ut; + usec_t last_scan_header_vs_last_modified_ut; + + uint64_t first_seqnum; + uint64_t last_seqnum; + sd_id128_t first_writer_id; + sd_id128_t last_writer_id; + + uint64_t messages_in_file; +}; + +#define SDJF_SOURCE_ALL_NAME "all" +#define SDJF_SOURCE_LOCAL_NAME "all-local-logs" +#define SDJF_SOURCE_LOCAL_SYSTEM_NAME "all-local-system-logs" +#define SDJF_SOURCE_LOCAL_USERS_NAME "all-local-user-logs" +#define SDJF_SOURCE_LOCAL_OTHER_NAME "all-uncategorized" +#define SDJF_SOURCE_NAMESPACES_NAME "all-local-namespaces" +#define SDJF_SOURCE_REMOTES_NAME "all-remote-systems" + +#define ND_SD_JOURNAL_OPEN_FLAGS (0) + +#define JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT (5 * USEC_PER_SEC) // assume always 5 seconds latency +#define JOURNAL_VS_REALTIME_DELTA_MAX_UT (2 * 60 * USEC_PER_SEC) // up to 2 minutes latency + +extern DICTIONARY *journal_files_registry; +extern DICTIONARY *used_hashes_registry; +extern DICTIONARY *function_query_status_dict; +extern DICTIONARY *boot_ids_to_first_ut; + +int journal_file_dict_items_backward_compar(const void *a, const void *b); +int journal_file_dict_items_forward_compar(const void *a, const void *b); +void buffer_json_journal_versions(BUFFER *wb); +void available_journal_file_sources_to_json_array(BUFFER *wb); +bool journal_files_completed_once(void); +void journal_files_registry_update(void); +void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth); + +FACET_ROW_SEVERITY syslog_priority_to_facet_severity(FACETS *facets, FACET_ROW *row, void *data); + +void netdata_systemd_journal_dynamic_row_id(FACETS *facets, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row, void *data); +void netdata_systemd_journal_transform_priority(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); +void netdata_systemd_journal_transform_syslog_facility(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); +void netdata_systemd_journal_transform_errno(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); +void netdata_systemd_journal_transform_boot_id(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); +void netdata_systemd_journal_transform_uid(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); +void netdata_systemd_journal_transform_gid(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); +void netdata_systemd_journal_transform_cap_effective(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); +void netdata_systemd_journal_transform_timestamp_usec(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data); + +usec_t journal_file_update_annotation_boot_id(sd_journal *j, struct journal_file *jf, const char *boot_id); + +#define MAX_JOURNAL_DIRECTORIES 100 +struct journal_directory { + char *path; +}; +extern struct journal_directory journal_directories[MAX_JOURNAL_DIRECTORIES]; + +void journal_init_files_and_directories(void); +void journal_init_query_status(void); +void function_systemd_journal(const char *transaction, char *function, int timeout, bool *cancelled); +void journal_file_update_header(const char *filename, struct journal_file *jf); + +void netdata_systemd_journal_message_ids_init(void); +void netdata_systemd_journal_transform_message_id(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused); + +void *journal_watcher_main(void *arg); + +#ifdef ENABLE_SYSTEMD_DBUS +void function_systemd_units(const char *transaction, char *function, int timeout, bool *cancelled); +#endif + +static inline void send_newline_and_flush(void) { + netdata_mutex_lock(&stdout_mutex); + fprintf(stdout, "\n"); + fflush(stdout); + netdata_mutex_unlock(&stdout_mutex); +} + +static inline bool parse_journal_field(const char *data, size_t data_length, const char **key, size_t *key_length, const char **value, size_t *value_length) { + const char *k = data; + const char *equal = strchr(k, '='); + if(unlikely(!equal)) + return false; + + size_t kl = equal - k; + + const char *v = ++equal; + size_t vl = data_length - kl - 1; + + *key = k; + *key_length = kl; + *value = v; + *value_length = vl; + + return true; +} + +#endif //NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H diff --git a/collectors/systemd-journal.plugin/systemd-journal-annotations.c b/collectors/systemd-journal.plugin/systemd-journal-annotations.c new file mode 100644 index 00000000000000..b12356110c90b9 --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-journal-annotations.c @@ -0,0 +1,719 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "systemd-internals.h" + +const char *errno_map[] = { + [1] = "1 (EPERM)", // "Operation not permitted", + [2] = "2 (ENOENT)", // "No such file or directory", + [3] = "3 (ESRCH)", // "No such process", + [4] = "4 (EINTR)", // "Interrupted system call", + [5] = "5 (EIO)", // "Input/output error", + [6] = "6 (ENXIO)", // "No such device or address", + [7] = "7 (E2BIG)", // "Argument list too long", + [8] = "8 (ENOEXEC)", // "Exec format error", + [9] = "9 (EBADF)", // "Bad file descriptor", + [10] = "10 (ECHILD)", // "No child processes", + [11] = "11 (EAGAIN)", // "Resource temporarily unavailable", + [12] = "12 (ENOMEM)", // "Cannot allocate memory", + [13] = "13 (EACCES)", // "Permission denied", + [14] = "14 (EFAULT)", // "Bad address", + [15] = "15 (ENOTBLK)", // "Block device required", + [16] = "16 (EBUSY)", // "Device or resource busy", + [17] = "17 (EEXIST)", // "File exists", + [18] = "18 (EXDEV)", // "Invalid cross-device link", + [19] = "19 (ENODEV)", // "No such device", + [20] = "20 (ENOTDIR)", // "Not a directory", + [21] = "21 (EISDIR)", // "Is a directory", + [22] = "22 (EINVAL)", // "Invalid argument", + [23] = "23 (ENFILE)", // "Too many open files in system", + [24] = "24 (EMFILE)", // "Too many open files", + [25] = "25 (ENOTTY)", // "Inappropriate ioctl for device", + [26] = "26 (ETXTBSY)", // "Text file busy", + [27] = "27 (EFBIG)", // "File too large", + [28] = "28 (ENOSPC)", // "No space left on device", + [29] = "29 (ESPIPE)", // "Illegal seek", + [30] = "30 (EROFS)", // "Read-only file system", + [31] = "31 (EMLINK)", // "Too many links", + [32] = "32 (EPIPE)", // "Broken pipe", + [33] = "33 (EDOM)", // "Numerical argument out of domain", + [34] = "34 (ERANGE)", // "Numerical result out of range", + [35] = "35 (EDEADLK)", // "Resource deadlock avoided", + [36] = "36 (ENAMETOOLONG)", // "File name too long", + [37] = "37 (ENOLCK)", // "No locks available", + [38] = "38 (ENOSYS)", // "Function not implemented", + [39] = "39 (ENOTEMPTY)", // "Directory not empty", + [40] = "40 (ELOOP)", // "Too many levels of symbolic links", + [42] = "42 (ENOMSG)", // "No message of desired type", + [43] = "43 (EIDRM)", // "Identifier removed", + [44] = "44 (ECHRNG)", // "Channel number out of range", + [45] = "45 (EL2NSYNC)", // "Level 2 not synchronized", + [46] = "46 (EL3HLT)", // "Level 3 halted", + [47] = "47 (EL3RST)", // "Level 3 reset", + [48] = "48 (ELNRNG)", // "Link number out of range", + [49] = "49 (EUNATCH)", // "Protocol driver not attached", + [50] = "50 (ENOCSI)", // "No CSI structure available", + [51] = "51 (EL2HLT)", // "Level 2 halted", + [52] = "52 (EBADE)", // "Invalid exchange", + [53] = "53 (EBADR)", // "Invalid request descriptor", + [54] = "54 (EXFULL)", // "Exchange full", + [55] = "55 (ENOANO)", // "No anode", + [56] = "56 (EBADRQC)", // "Invalid request code", + [57] = "57 (EBADSLT)", // "Invalid slot", + [59] = "59 (EBFONT)", // "Bad font file format", + [60] = "60 (ENOSTR)", // "Device not a stream", + [61] = "61 (ENODATA)", // "No data available", + [62] = "62 (ETIME)", // "Timer expired", + [63] = "63 (ENOSR)", // "Out of streams resources", + [64] = "64 (ENONET)", // "Machine is not on the network", + [65] = "65 (ENOPKG)", // "Package not installed", + [66] = "66 (EREMOTE)", // "Object is remote", + [67] = "67 (ENOLINK)", // "Link has been severed", + [68] = "68 (EADV)", // "Advertise error", + [69] = "69 (ESRMNT)", // "Srmount error", + [70] = "70 (ECOMM)", // "Communication error on send", + [71] = "71 (EPROTO)", // "Protocol error", + [72] = "72 (EMULTIHOP)", // "Multihop attempted", + [73] = "73 (EDOTDOT)", // "RFS specific error", + [74] = "74 (EBADMSG)", // "Bad message", + [75] = "75 (EOVERFLOW)", // "Value too large for defined data type", + [76] = "76 (ENOTUNIQ)", // "Name not unique on network", + [77] = "77 (EBADFD)", // "File descriptor in bad state", + [78] = "78 (EREMCHG)", // "Remote address changed", + [79] = "79 (ELIBACC)", // "Can not access a needed shared library", + [80] = "80 (ELIBBAD)", // "Accessing a corrupted shared library", + [81] = "81 (ELIBSCN)", // ".lib section in a.out corrupted", + [82] = "82 (ELIBMAX)", // "Attempting to link in too many shared libraries", + [83] = "83 (ELIBEXEC)", // "Cannot exec a shared library directly", + [84] = "84 (EILSEQ)", // "Invalid or incomplete multibyte or wide character", + [85] = "85 (ERESTART)", // "Interrupted system call should be restarted", + [86] = "86 (ESTRPIPE)", // "Streams pipe error", + [87] = "87 (EUSERS)", // "Too many users", + [88] = "88 (ENOTSOCK)", // "Socket operation on non-socket", + [89] = "89 (EDESTADDRREQ)", // "Destination address required", + [90] = "90 (EMSGSIZE)", // "Message too long", + [91] = "91 (EPROTOTYPE)", // "Protocol wrong type for socket", + [92] = "92 (ENOPROTOOPT)", // "Protocol not available", + [93] = "93 (EPROTONOSUPPORT)", // "Protocol not supported", + [94] = "94 (ESOCKTNOSUPPORT)", // "Socket type not supported", + [95] = "95 (ENOTSUP)", // "Operation not supported", + [96] = "96 (EPFNOSUPPORT)", // "Protocol family not supported", + [97] = "97 (EAFNOSUPPORT)", // "Address family not supported by protocol", + [98] = "98 (EADDRINUSE)", // "Address already in use", + [99] = "99 (EADDRNOTAVAIL)", // "Cannot assign requested address", + [100] = "100 (ENETDOWN)", // "Network is down", + [101] = "101 (ENETUNREACH)", // "Network is unreachable", + [102] = "102 (ENETRESET)", // "Network dropped connection on reset", + [103] = "103 (ECONNABORTED)", // "Software caused connection abort", + [104] = "104 (ECONNRESET)", // "Connection reset by peer", + [105] = "105 (ENOBUFS)", // "No buffer space available", + [106] = "106 (EISCONN)", // "Transport endpoint is already connected", + [107] = "107 (ENOTCONN)", // "Transport endpoint is not connected", + [108] = "108 (ESHUTDOWN)", // "Cannot send after transport endpoint shutdown", + [109] = "109 (ETOOMANYREFS)", // "Too many references: cannot splice", + [110] = "110 (ETIMEDOUT)", // "Connection timed out", + [111] = "111 (ECONNREFUSED)", // "Connection refused", + [112] = "112 (EHOSTDOWN)", // "Host is down", + [113] = "113 (EHOSTUNREACH)", // "No route to host", + [114] = "114 (EALREADY)", // "Operation already in progress", + [115] = "115 (EINPROGRESS)", // "Operation now in progress", + [116] = "116 (ESTALE)", // "Stale file handle", + [117] = "117 (EUCLEAN)", // "Structure needs cleaning", + [118] = "118 (ENOTNAM)", // "Not a XENIX named type file", + [119] = "119 (ENAVAIL)", // "No XENIX semaphores available", + [120] = "120 (EISNAM)", // "Is a named type file", + [121] = "121 (EREMOTEIO)", // "Remote I/O error", + [122] = "122 (EDQUOT)", // "Disk quota exceeded", + [123] = "123 (ENOMEDIUM)", // "No medium found", + [124] = "124 (EMEDIUMTYPE)", // "Wrong medium type", + [125] = "125 (ECANCELED)", // "Operation canceled", + [126] = "126 (ENOKEY)", // "Required key not available", + [127] = "127 (EKEYEXPIRED)", // "Key has expired", + [128] = "128 (EKEYREVOKED)", // "Key has been revoked", + [129] = "129 (EKEYREJECTED)", // "Key was rejected by service", + [130] = "130 (EOWNERDEAD)", // "Owner died", + [131] = "131 (ENOTRECOVERABLE)", // "State not recoverable", + [132] = "132 (ERFKILL)", // "Operation not possible due to RF-kill", + [133] = "133 (EHWPOISON)", // "Memory page has hardware error", +}; + +const char *linux_capabilities[] = { + [CAP_CHOWN] = "CHOWN", + [CAP_DAC_OVERRIDE] = "DAC_OVERRIDE", + [CAP_DAC_READ_SEARCH] = "DAC_READ_SEARCH", + [CAP_FOWNER] = "FOWNER", + [CAP_FSETID] = "FSETID", + [CAP_KILL] = "KILL", + [CAP_SETGID] = "SETGID", + [CAP_SETUID] = "SETUID", + [CAP_SETPCAP] = "SETPCAP", + [CAP_LINUX_IMMUTABLE] = "LINUX_IMMUTABLE", + [CAP_NET_BIND_SERVICE] = "NET_BIND_SERVICE", + [CAP_NET_BROADCAST] = "NET_BROADCAST", + [CAP_NET_ADMIN] = "NET_ADMIN", + [CAP_NET_RAW] = "NET_RAW", + [CAP_IPC_LOCK] = "IPC_LOCK", + [CAP_IPC_OWNER] = "IPC_OWNER", + [CAP_SYS_MODULE] = "SYS_MODULE", + [CAP_SYS_RAWIO] = "SYS_RAWIO", + [CAP_SYS_CHROOT] = "SYS_CHROOT", + [CAP_SYS_PTRACE] = "SYS_PTRACE", + [CAP_SYS_PACCT] = "SYS_PACCT", + [CAP_SYS_ADMIN] = "SYS_ADMIN", + [CAP_SYS_BOOT] = "SYS_BOOT", + [CAP_SYS_NICE] = "SYS_NICE", + [CAP_SYS_RESOURCE] = "SYS_RESOURCE", + [CAP_SYS_TIME] = "SYS_TIME", + [CAP_SYS_TTY_CONFIG] = "SYS_TTY_CONFIG", + [CAP_MKNOD] = "MKNOD", + [CAP_LEASE] = "LEASE", + [CAP_AUDIT_WRITE] = "AUDIT_WRITE", + [CAP_AUDIT_CONTROL] = "AUDIT_CONTROL", + [CAP_SETFCAP] = "SETFCAP", + [CAP_MAC_OVERRIDE] = "MAC_OVERRIDE", + [CAP_MAC_ADMIN] = "MAC_ADMIN", + [CAP_SYSLOG] = "SYSLOG", + [CAP_WAKE_ALARM] = "WAKE_ALARM", + [CAP_BLOCK_SUSPEND] = "BLOCK_SUSPEND", + [37 /*CAP_AUDIT_READ*/] = "AUDIT_READ", + [38 /*CAP_PERFMON*/] = "PERFMON", + [39 /*CAP_BPF*/] = "BPF", + [40 /* CAP_CHECKPOINT_RESTORE */] = "CHECKPOINT_RESTORE", +}; + +static const char *syslog_facility_to_name(int facility) { + switch (facility) { + case LOG_FAC(LOG_KERN): return "kern"; + case LOG_FAC(LOG_USER): return "user"; + case LOG_FAC(LOG_MAIL): return "mail"; + case LOG_FAC(LOG_DAEMON): return "daemon"; + case LOG_FAC(LOG_AUTH): return "auth"; + case LOG_FAC(LOG_SYSLOG): return "syslog"; + case LOG_FAC(LOG_LPR): return "lpr"; + case LOG_FAC(LOG_NEWS): return "news"; + case LOG_FAC(LOG_UUCP): return "uucp"; + case LOG_FAC(LOG_CRON): return "cron"; + case LOG_FAC(LOG_AUTHPRIV): return "authpriv"; + case LOG_FAC(LOG_FTP): return "ftp"; + case LOG_FAC(LOG_LOCAL0): return "local0"; + case LOG_FAC(LOG_LOCAL1): return "local1"; + case LOG_FAC(LOG_LOCAL2): return "local2"; + case LOG_FAC(LOG_LOCAL3): return "local3"; + case LOG_FAC(LOG_LOCAL4): return "local4"; + case LOG_FAC(LOG_LOCAL5): return "local5"; + case LOG_FAC(LOG_LOCAL6): return "local6"; + case LOG_FAC(LOG_LOCAL7): return "local7"; + default: return NULL; + } +} + +static const char *syslog_priority_to_name(int priority) { + switch (priority) { + case LOG_ALERT: return "alert"; + case LOG_CRIT: return "critical"; + case LOG_DEBUG: return "debug"; + case LOG_EMERG: return "panic"; + case LOG_ERR: return "error"; + case LOG_INFO: return "info"; + case LOG_NOTICE: return "notice"; + case LOG_WARNING: return "warning"; + default: return NULL; + } +} + +FACET_ROW_SEVERITY syslog_priority_to_facet_severity(FACETS *facets __maybe_unused, FACET_ROW *row, void *data __maybe_unused) { + // same to + // https://github.com/systemd/systemd/blob/aab9e4b2b86905a15944a1ac81e471b5b7075932/src/basic/terminal-util.c#L1501 + // function get_log_colors() + + FACET_ROW_KEY_VALUE *priority_rkv = dictionary_get(row->dict, "PRIORITY"); + if(!priority_rkv || priority_rkv->empty) + return FACET_ROW_SEVERITY_NORMAL; + + int priority = str2i(buffer_tostring(priority_rkv->wb)); + + if(priority <= LOG_ERR) + return FACET_ROW_SEVERITY_CRITICAL; + + else if (priority <= LOG_WARNING) + return FACET_ROW_SEVERITY_WARNING; + + else if(priority <= LOG_NOTICE) + return FACET_ROW_SEVERITY_NOTICE; + + else if(priority >= LOG_DEBUG) + return FACET_ROW_SEVERITY_DEBUG; + + return FACET_ROW_SEVERITY_NORMAL; +} + +static char *uid_to_username(uid_t uid, char *buffer, size_t buffer_size) { + static __thread char tmp[1024 + 1]; + struct passwd pw, *result = NULL; + + if (getpwuid_r(uid, &pw, tmp, sizeof(tmp), &result) != 0 || !result || !pw.pw_name || !(*pw.pw_name)) + snprintfz(buffer, buffer_size - 1, "%u", uid); + else + snprintfz(buffer, buffer_size - 1, "%u (%s)", uid, pw.pw_name); + + return buffer; +} + +static char *gid_to_groupname(gid_t gid, char* buffer, size_t buffer_size) { + static __thread char tmp[1024]; + struct group grp, *result = NULL; + + if (getgrgid_r(gid, &grp, tmp, sizeof(tmp), &result) != 0 || !result || !grp.gr_name || !(*grp.gr_name)) + snprintfz(buffer, buffer_size - 1, "%u", gid); + else + snprintfz(buffer, buffer_size - 1, "%u (%s)", gid, grp.gr_name); + + return buffer; +} + +void netdata_systemd_journal_transform_syslog_facility(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + const char *v = buffer_tostring(wb); + if(*v && isdigit(*v)) { + int facility = str2i(buffer_tostring(wb)); + const char *name = syslog_facility_to_name(facility); + if (name) { + buffer_flush(wb); + buffer_strcat(wb, name); + } + } +} + +void netdata_systemd_journal_transform_priority(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + if(scope == FACETS_TRANSFORM_FACET_SORT) + return; + + const char *v = buffer_tostring(wb); + if(*v && isdigit(*v)) { + int priority = str2i(buffer_tostring(wb)); + const char *name = syslog_priority_to_name(priority); + if (name) { + buffer_flush(wb); + buffer_strcat(wb, name); + } + } +} + +void netdata_systemd_journal_transform_errno(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + if(scope == FACETS_TRANSFORM_FACET_SORT) + return; + + const char *v = buffer_tostring(wb); + if(*v && isdigit(*v)) { + unsigned err_no = str2u(buffer_tostring(wb)); + if(err_no > 0 && err_no < sizeof(errno_map) / sizeof(*errno_map)) { + const char *name = errno_map[err_no]; + if(name) { + buffer_flush(wb); + buffer_strcat(wb, name); + } + } + } +} + +// ---------------------------------------------------------------------------- +// UID and GID transformation + +#define UID_GID_HASHTABLE_SIZE 10000 + +struct word_t2str_hashtable_entry { + struct word_t2str_hashtable_entry *next; + Word_t hash; + size_t len; + char str[]; +}; + +struct word_t2str_hashtable { + SPINLOCK spinlock; + size_t size; + struct word_t2str_hashtable_entry *hashtable[UID_GID_HASHTABLE_SIZE]; +}; + +struct word_t2str_hashtable uid_hashtable = { + .size = UID_GID_HASHTABLE_SIZE, +}; + +struct word_t2str_hashtable gid_hashtable = { + .size = UID_GID_HASHTABLE_SIZE, +}; + +struct word_t2str_hashtable_entry **word_t2str_hashtable_slot(struct word_t2str_hashtable *ht, Word_t hash) { + size_t slot = hash % ht->size; + struct word_t2str_hashtable_entry **e = &ht->hashtable[slot]; + + while(*e && (*e)->hash != hash) + e = &((*e)->next); + + return e; +} + +const char *uid_to_username_cached(uid_t uid, size_t *length) { + spinlock_lock(&uid_hashtable.spinlock); + + struct word_t2str_hashtable_entry **e = word_t2str_hashtable_slot(&uid_hashtable, uid); + if(!(*e)) { + static __thread char buf[1024]; + const char *name = uid_to_username(uid, buf, sizeof(buf)); + size_t size = strlen(name) + 1; + + *e = callocz(1, sizeof(struct word_t2str_hashtable_entry) + size); + (*e)->len = size - 1; + (*e)->hash = uid; + memcpy((*e)->str, name, size); + } + + spinlock_unlock(&uid_hashtable.spinlock); + + *length = (*e)->len; + return (*e)->str; +} + +const char *gid_to_groupname_cached(gid_t gid, size_t *length) { + spinlock_lock(&gid_hashtable.spinlock); + + struct word_t2str_hashtable_entry **e = word_t2str_hashtable_slot(&gid_hashtable, gid); + if(!(*e)) { + static __thread char buf[1024]; + const char *name = gid_to_groupname(gid, buf, sizeof(buf)); + size_t size = strlen(name) + 1; + + *e = callocz(1, sizeof(struct word_t2str_hashtable_entry) + size); + (*e)->len = size - 1; + (*e)->hash = gid; + memcpy((*e)->str, name, size); + } + + spinlock_unlock(&gid_hashtable.spinlock); + + *length = (*e)->len; + return (*e)->str; +} + +DICTIONARY *boot_ids_to_first_ut = NULL; + +void netdata_systemd_journal_transform_boot_id(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + const char *boot_id = buffer_tostring(wb); + if(*boot_id && isxdigit(*boot_id)) { + usec_t ut = UINT64_MAX; + usec_t *p_ut = dictionary_get(boot_ids_to_first_ut, boot_id); + if(!p_ut) { +#ifndef HAVE_SD_JOURNAL_RESTART_FIELDS + struct journal_file *jf; + dfe_start_read(journal_files_registry, jf) { + const char *files[2] = { + [0] = jf_dfe.name, + [1] = NULL, + }; + + sd_journal *j = NULL; + int r = sd_journal_open_files(&j, files, ND_SD_JOURNAL_OPEN_FLAGS); + if(r < 0 || !j) { + internal_error(true, "JOURNAL: while looking for the first timestamp of boot_id '%s', " + "sd_journal_open_files('%s') returned %d", + boot_id, jf_dfe.name, r); + continue; + } + + ut = journal_file_update_annotation_boot_id(j, jf, boot_id); + sd_journal_close(j); + } + dfe_done(jf); +#endif + } + else + ut = *p_ut; + + if(ut && ut != UINT64_MAX) { + char buffer[RFC3339_MAX_LENGTH]; + rfc3339_datetime_ut(buffer, sizeof(buffer), ut, 0, true); + + switch(scope) { + default: + case FACETS_TRANSFORM_DATA: + case FACETS_TRANSFORM_VALUE: + buffer_sprintf(wb, " (%s) ", buffer); + break; + + case FACETS_TRANSFORM_FACET: + case FACETS_TRANSFORM_FACET_SORT: + case FACETS_TRANSFORM_HISTOGRAM: + buffer_flush(wb); + buffer_sprintf(wb, "%s", buffer); + break; + } + } + } +} + +void netdata_systemd_journal_transform_uid(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + if(scope == FACETS_TRANSFORM_FACET_SORT) + return; + + const char *v = buffer_tostring(wb); + if(*v && isdigit(*v)) { + uid_t uid = str2i(buffer_tostring(wb)); + size_t len; + const char *name = uid_to_username_cached(uid, &len); + buffer_contents_replace(wb, name, len); + } +} + +void netdata_systemd_journal_transform_gid(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + if(scope == FACETS_TRANSFORM_FACET_SORT) + return; + + const char *v = buffer_tostring(wb); + if(*v && isdigit(*v)) { + gid_t gid = str2i(buffer_tostring(wb)); + size_t len; + const char *name = gid_to_groupname_cached(gid, &len); + buffer_contents_replace(wb, name, len); + } +} + +void netdata_systemd_journal_transform_cap_effective(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + if(scope == FACETS_TRANSFORM_FACET_SORT) + return; + + const char *v = buffer_tostring(wb); + if(*v && isdigit(*v)) { + uint64_t cap = strtoul(buffer_tostring(wb), NULL, 16); + if(cap) { + buffer_fast_strcat(wb, " (", 2); + for (size_t i = 0, added = 0; i < sizeof(linux_capabilities) / sizeof(linux_capabilities[0]); i++) { + if (linux_capabilities[i] && (cap & (1ULL << i))) { + + if (added) + buffer_fast_strcat(wb, " | ", 3); + + buffer_strcat(wb, linux_capabilities[i]); + added++; + } + } + buffer_fast_strcat(wb, ")", 1); + } + } +} + +void netdata_systemd_journal_transform_timestamp_usec(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + if(scope == FACETS_TRANSFORM_FACET_SORT) + return; + + const char *v = buffer_tostring(wb); + if(*v && isdigit(*v)) { + uint64_t ut = str2ull(buffer_tostring(wb), NULL); + if(ut) { + char buffer[RFC3339_MAX_LENGTH]; + rfc3339_datetime_ut(buffer, sizeof(buffer), ut, 6, true); + buffer_sprintf(wb, " (%s)", buffer); + } + } +} + +// ---------------------------------------------------------------------------- + +void netdata_systemd_journal_dynamic_row_id(FACETS *facets __maybe_unused, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row, void *data __maybe_unused) { + FACET_ROW_KEY_VALUE *pid_rkv = dictionary_get(row->dict, "_PID"); + const char *pid = pid_rkv ? buffer_tostring(pid_rkv->wb) : FACET_VALUE_UNSET; + + const char *identifier = NULL; + FACET_ROW_KEY_VALUE *container_name_rkv = dictionary_get(row->dict, "CONTAINER_NAME"); + if(container_name_rkv && !container_name_rkv->empty) + identifier = buffer_tostring(container_name_rkv->wb); + + if(!identifier) { + FACET_ROW_KEY_VALUE *syslog_identifier_rkv = dictionary_get(row->dict, "SYSLOG_IDENTIFIER"); + if(syslog_identifier_rkv && !syslog_identifier_rkv->empty) + identifier = buffer_tostring(syslog_identifier_rkv->wb); + + if(!identifier) { + FACET_ROW_KEY_VALUE *comm_rkv = dictionary_get(row->dict, "_COMM"); + if(comm_rkv && !comm_rkv->empty) + identifier = buffer_tostring(comm_rkv->wb); + } + } + + buffer_flush(rkv->wb); + + if(!identifier || !*identifier) + buffer_strcat(rkv->wb, FACET_VALUE_UNSET); + else if(!pid || !*pid) + buffer_sprintf(rkv->wb, "%s", identifier); + else + buffer_sprintf(rkv->wb, "%s[%s]", identifier, pid); + + buffer_json_add_array_item_string(json_array, buffer_tostring(rkv->wb)); +} + + +// ---------------------------------------------------------------------------- + +struct message_id_info { + const char *msg; +}; + +static DICTIONARY *known_journal_messages_ids = NULL; + +void netdata_systemd_journal_message_ids_init(void) { + known_journal_messages_ids = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE); + + struct message_id_info i = { 0 }; + i.msg = "Journal start"; dictionary_set(known_journal_messages_ids, "f77379a8490b408bbe5f6940505a777b", &i, sizeof(i)); + i.msg = "Journal stop"; dictionary_set(known_journal_messages_ids, "d93fb3c9c24d451a97cea615ce59c00b", &i, sizeof(i)); + i.msg = "Journal dropped"; dictionary_set(known_journal_messages_ids, "a596d6fe7bfa4994828e72309e95d61e", &i, sizeof(i)); + i.msg = "Journal missed"; dictionary_set(known_journal_messages_ids, "e9bf28e6e834481bb6f48f548ad13606", &i, sizeof(i)); + i.msg = "Journal usage"; dictionary_set(known_journal_messages_ids, "ec387f577b844b8fa948f33cad9a75e6", &i, sizeof(i)); + i.msg = "Coredump"; dictionary_set(known_journal_messages_ids, "fc2e22bc6ee647b6b90729ab34a250b1", &i, sizeof(i)); + i.msg = "Truncated core"; dictionary_set(known_journal_messages_ids, "5aadd8e954dc4b1a8c954d63fd9e1137", &i, sizeof(i)); + i.msg = "Backtrace"; dictionary_set(known_journal_messages_ids, "1f4e0a44a88649939aaea34fc6da8c95", &i, sizeof(i)); + i.msg = "Session start"; dictionary_set(known_journal_messages_ids, "8d45620c1a4348dbb17410da57c60c66", &i, sizeof(i)); + i.msg = "Session stop"; dictionary_set(known_journal_messages_ids, "3354939424b4456d9802ca8333ed424a", &i, sizeof(i)); + i.msg = "Seat start"; dictionary_set(known_journal_messages_ids, "fcbefc5da23d428093f97c82a9290f7b", &i, sizeof(i)); + i.msg = "Seat stop"; dictionary_set(known_journal_messages_ids, "e7852bfe46784ed0accde04bc864c2d5", &i, sizeof(i)); + i.msg = "Machine start"; dictionary_set(known_journal_messages_ids, "24d8d4452573402496068381a6312df2", &i, sizeof(i)); + i.msg = "Machine stop"; dictionary_set(known_journal_messages_ids, "58432bd3bace477cb514b56381b8a758", &i, sizeof(i)); + i.msg = "Time change"; dictionary_set(known_journal_messages_ids, "c7a787079b354eaaa9e77b371893cd27", &i, sizeof(i)); + i.msg = "Timezone change"; dictionary_set(known_journal_messages_ids, "45f82f4aef7a4bbf942ce861d1f20990", &i, sizeof(i)); + i.msg = "Tainted"; dictionary_set(known_journal_messages_ids, "50876a9db00f4c40bde1a2ad381c3a1b", &i, sizeof(i)); + i.msg = "Startup finished"; dictionary_set(known_journal_messages_ids, "b07a249cd024414a82dd00cd181378ff", &i, sizeof(i)); + i.msg = "User startup finished"; dictionary_set(known_journal_messages_ids, "eed00a68ffd84e31882105fd973abdd1", &i, sizeof(i)); + i.msg = "Sleep start"; dictionary_set(known_journal_messages_ids, "6bbd95ee977941e497c48be27c254128", &i, sizeof(i)); + i.msg = "Sleep stop"; dictionary_set(known_journal_messages_ids, "8811e6df2a8e40f58a94cea26f8ebf14", &i, sizeof(i)); + i.msg = "Shutdown"; dictionary_set(known_journal_messages_ids, "98268866d1d54a499c4e98921d93bc40", &i, sizeof(i)); + i.msg = "Factory reset"; dictionary_set(known_journal_messages_ids, "c14aaf76ec284a5fa1f105f88dfb061c", &i, sizeof(i)); + i.msg = "Crash exit"; dictionary_set(known_journal_messages_ids, "d9ec5e95e4b646aaaea2fd05214edbda", &i, sizeof(i)); + i.msg = "Crash failed"; dictionary_set(known_journal_messages_ids, "3ed0163e868a4417ab8b9e210407a96c", &i, sizeof(i)); + i.msg = "Crash freeze"; dictionary_set(known_journal_messages_ids, "645c735537634ae0a32b15a7c6cba7d4", &i, sizeof(i)); + i.msg = "Crash no coredump"; dictionary_set(known_journal_messages_ids, "5addb3a06a734d3396b794bf98fb2d01", &i, sizeof(i)); + i.msg = "Crash no fork"; dictionary_set(known_journal_messages_ids, "5c9e98de4ab94c6a9d04d0ad793bd903", &i, sizeof(i)); + i.msg = "Crash unknown signal"; dictionary_set(known_journal_messages_ids, "5e6f1f5e4db64a0eaee3368249d20b94", &i, sizeof(i)); + i.msg = "Crash systemd signal"; dictionary_set(known_journal_messages_ids, "83f84b35ee264f74a3896a9717af34cb", &i, sizeof(i)); + i.msg = "Crash process signal"; dictionary_set(known_journal_messages_ids, "3a73a98baf5b4b199929e3226c0be783", &i, sizeof(i)); + i.msg = "Crash waitpid failed"; dictionary_set(known_journal_messages_ids, "2ed18d4f78ca47f0a9bc25271c26adb4", &i, sizeof(i)); + i.msg = "Crash coredump failed"; dictionary_set(known_journal_messages_ids, "56b1cd96f24246c5b607666fda952356", &i, sizeof(i)); + i.msg = "Crash coredump pid"; dictionary_set(known_journal_messages_ids, "4ac7566d4d7548f4981f629a28f0f829", &i, sizeof(i)); + i.msg = "Crash shell fork failed"; dictionary_set(known_journal_messages_ids, "38e8b1e039ad469291b18b44c553a5b7", &i, sizeof(i)); + i.msg = "Crash execle failed"; dictionary_set(known_journal_messages_ids, "872729b47dbe473eb768ccecd477beda", &i, sizeof(i)); + i.msg = "Selinux failed"; dictionary_set(known_journal_messages_ids, "658a67adc1c940b3b3316e7e8628834a", &i, sizeof(i)); + i.msg = "Battery low warning"; dictionary_set(known_journal_messages_ids, "e6f456bd92004d9580160b2207555186", &i, sizeof(i)); + i.msg = "Battery low poweroff"; dictionary_set(known_journal_messages_ids, "267437d33fdd41099ad76221cc24a335", &i, sizeof(i)); + i.msg = "Core mainloop failed"; dictionary_set(known_journal_messages_ids, "79e05b67bc4545d1922fe47107ee60c5", &i, sizeof(i)); + i.msg = "Core no xdgdir path"; dictionary_set(known_journal_messages_ids, "dbb136b10ef4457ba47a795d62f108c9", &i, sizeof(i)); + i.msg = "Core capability bounding user"; dictionary_set(known_journal_messages_ids, "ed158c2df8884fa584eead2d902c1032", &i, sizeof(i)); + i.msg = "Core capability bounding"; dictionary_set(known_journal_messages_ids, "42695b500df048298bee37159caa9f2e", &i, sizeof(i)); + i.msg = "Core disable privileges"; dictionary_set(known_journal_messages_ids, "bfc2430724ab44499735b4f94cca9295", &i, sizeof(i)); + i.msg = "Core start target failed"; dictionary_set(known_journal_messages_ids, "59288af523be43a28d494e41e26e4510", &i, sizeof(i)); + i.msg = "Core isolate target failed"; dictionary_set(known_journal_messages_ids, "689b4fcc97b4486ea5da92db69c9e314", &i, sizeof(i)); + i.msg = "Core fd set failed"; dictionary_set(known_journal_messages_ids, "5ed836f1766f4a8a9fc5da45aae23b29", &i, sizeof(i)); + i.msg = "Core pid1 environment"; dictionary_set(known_journal_messages_ids, "6a40fbfbd2ba4b8db02fb40c9cd090d7", &i, sizeof(i)); + i.msg = "Core manager allocate"; dictionary_set(known_journal_messages_ids, "0e54470984ac419689743d957a119e2e", &i, sizeof(i)); + i.msg = "Smack failed write"; dictionary_set(known_journal_messages_ids, "d67fa9f847aa4b048a2ae33535331adb", &i, sizeof(i)); + i.msg = "Shutdown error"; dictionary_set(known_journal_messages_ids, "af55a6f75b544431b72649f36ff6d62c", &i, sizeof(i)); + i.msg = "Valgrind helper fork"; dictionary_set(known_journal_messages_ids, "d18e0339efb24a068d9c1060221048c2", &i, sizeof(i)); + i.msg = "Unit starting"; dictionary_set(known_journal_messages_ids, "7d4958e842da4a758f6c1cdc7b36dcc5", &i, sizeof(i)); + i.msg = "Unit started"; dictionary_set(known_journal_messages_ids, "39f53479d3a045ac8e11786248231fbf", &i, sizeof(i)); + i.msg = "Unit failed"; dictionary_set(known_journal_messages_ids, "be02cf6855d2428ba40df7e9d022f03d", &i, sizeof(i)); + i.msg = "Unit stopping"; dictionary_set(known_journal_messages_ids, "de5b426a63be47a7b6ac3eaac82e2f6f", &i, sizeof(i)); + i.msg = "Unit stopped"; dictionary_set(known_journal_messages_ids, "9d1aaa27d60140bd96365438aad20286", &i, sizeof(i)); + i.msg = "Unit reloading"; dictionary_set(known_journal_messages_ids, "d34d037fff1847e6ae669a370e694725", &i, sizeof(i)); + i.msg = "Unit reloaded"; dictionary_set(known_journal_messages_ids, "7b05ebc668384222baa8881179cfda54", &i, sizeof(i)); + i.msg = "Unit restart scheduled"; dictionary_set(known_journal_messages_ids, "5eb03494b6584870a536b337290809b3", &i, sizeof(i)); + i.msg = "Unit resources"; dictionary_set(known_journal_messages_ids, "ae8f7b866b0347b9af31fe1c80b127c0", &i, sizeof(i)); + i.msg = "Unit success"; dictionary_set(known_journal_messages_ids, "7ad2d189f7e94e70a38c781354912448", &i, sizeof(i)); + i.msg = "Unit skipped"; dictionary_set(known_journal_messages_ids, "0e4284a0caca4bfc81c0bb6786972673", &i, sizeof(i)); + i.msg = "Unit failure result"; dictionary_set(known_journal_messages_ids, "d9b373ed55a64feb8242e02dbe79a49c", &i, sizeof(i)); + i.msg = "Spawn failed"; dictionary_set(known_journal_messages_ids, "641257651c1b4ec9a8624d7a40a9e1e7", &i, sizeof(i)); + i.msg = "Unit process exit"; dictionary_set(known_journal_messages_ids, "98e322203f7a4ed290d09fe03c09fe15", &i, sizeof(i)); + i.msg = "Forward syslog missed"; dictionary_set(known_journal_messages_ids, "0027229ca0644181a76c4e92458afa2e", &i, sizeof(i)); + i.msg = "Overmounting"; dictionary_set(known_journal_messages_ids, "1dee0369c7fc4736b7099b38ecb46ee7", &i, sizeof(i)); + i.msg = "Unit oomd kill"; dictionary_set(known_journal_messages_ids, "d989611b15e44c9dbf31e3c81256e4ed", &i, sizeof(i)); + i.msg = "Unit out of memory"; dictionary_set(known_journal_messages_ids, "fe6faa94e7774663a0da52717891d8ef", &i, sizeof(i)); + i.msg = "Lid opened"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b06f", &i, sizeof(i)); + i.msg = "Lid closed"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b070", &i, sizeof(i)); + i.msg = "System docked"; dictionary_set(known_journal_messages_ids, "f5f416b862074b28927a48c3ba7d51ff", &i, sizeof(i)); + i.msg = "System undocked"; dictionary_set(known_journal_messages_ids, "51e171bd585248568110144c517cca53", &i, sizeof(i)); + i.msg = "Power key"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b071", &i, sizeof(i)); + i.msg = "Power key long press"; dictionary_set(known_journal_messages_ids, "3e0117101eb243c1b9a50db3494ab10b", &i, sizeof(i)); + i.msg = "Reboot key"; dictionary_set(known_journal_messages_ids, "9fa9d2c012134ec385451ffe316f97d0", &i, sizeof(i)); + i.msg = "Reboot key long press"; dictionary_set(known_journal_messages_ids, "f1c59a58c9d943668965c337caec5975", &i, sizeof(i)); + i.msg = "Suspend key"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b072", &i, sizeof(i)); + i.msg = "Suspend key long press"; dictionary_set(known_journal_messages_ids, "bfdaf6d312ab4007bc1fe40a15df78e8", &i, sizeof(i)); + i.msg = "Hibernate key"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b073", &i, sizeof(i)); + i.msg = "Hibernate key long press"; dictionary_set(known_journal_messages_ids, "167836df6f7f428e98147227b2dc8945", &i, sizeof(i)); + i.msg = "Invalid configuration"; dictionary_set(known_journal_messages_ids, "c772d24e9a884cbeb9ea12625c306c01", &i, sizeof(i)); + i.msg = "Dnssec failure"; dictionary_set(known_journal_messages_ids, "1675d7f172174098b1108bf8c7dc8f5d", &i, sizeof(i)); + i.msg = "Dnssec trust anchor revoked"; dictionary_set(known_journal_messages_ids, "4d4408cfd0d144859184d1e65d7c8a65", &i, sizeof(i)); + i.msg = "Dnssec downgrade"; dictionary_set(known_journal_messages_ids, "36db2dfa5a9045e1bd4af5f93e1cf057", &i, sizeof(i)); + i.msg = "Unsafe user name"; dictionary_set(known_journal_messages_ids, "b61fdac612e94b9182285b998843061f", &i, sizeof(i)); + i.msg = "Mount point path not suitable"; dictionary_set(known_journal_messages_ids, "1b3bb94037f04bbf81028e135a12d293", &i, sizeof(i)); + i.msg = "Device path not suitable"; dictionary_set(known_journal_messages_ids, "010190138f494e29a0ef6669749531aa", &i, sizeof(i)); + i.msg = "Nobody user unsuitable"; dictionary_set(known_journal_messages_ids, "b480325f9c394a7b802c231e51a2752c", &i, sizeof(i)); + i.msg = "Systemd udev settle deprecated"; dictionary_set(known_journal_messages_ids, "1c0454c1bd2241e0ac6fefb4bc631433", &i, sizeof(i)); + i.msg = "Time sync"; dictionary_set(known_journal_messages_ids, "7c8a41f37b764941a0e1780b1be2f037", &i, sizeof(i)); + i.msg = "Time bump"; dictionary_set(known_journal_messages_ids, "7db73c8af0d94eeb822ae04323fe6ab6", &i, sizeof(i)); + i.msg = "Shutdown scheduled"; dictionary_set(known_journal_messages_ids, "9e7066279dc8403da79ce4b1a69064b2", &i, sizeof(i)); + i.msg = "Shutdown canceled"; dictionary_set(known_journal_messages_ids, "249f6fb9e6e2428c96f3f0875681ffa3", &i, sizeof(i)); + i.msg = "TPM pcr extend"; dictionary_set(known_journal_messages_ids, "3f7d5ef3e54f4302b4f0b143bb270cab", &i, sizeof(i)); + i.msg = "Memory trim"; dictionary_set(known_journal_messages_ids, "f9b0be465ad540d0850ad32172d57c21", &i, sizeof(i)); + i.msg = "Sysv generator deprecated"; dictionary_set(known_journal_messages_ids, "a8fa8dacdb1d443e9503b8be367a6adb", &i, sizeof(i)); + + // gnome + // https://gitlab.gnome.org/GNOME/gnome-session/-/blob/main/gnome-session/gsm-manager.c + i.msg = "Gnome SM startup succeeded"; dictionary_set(known_journal_messages_ids, "0ce153587afa4095832d233c17a88001", &i, sizeof(i)); + i.msg = "Gnome SM unrecoverable failure"; dictionary_set(known_journal_messages_ids, "10dd2dc188b54a5e98970f56499d1f73", &i, sizeof(i)); + + // gnome-shell + // https://gitlab.gnome.org/GNOME/gnome-shell/-/blob/main/js/ui/main.js#L56 + i.msg = "Gnome shell started";dictionary_set(known_journal_messages_ids, "f3ea493c22934e26811cd62abe8e203a", &i, sizeof(i)); + + // flathub + // https://docs.flatpak.org/de/latest/flatpak-command-reference.html + i.msg = "Flatpak cache"; dictionary_set(known_journal_messages_ids, "c7b39b1e006b464599465e105b361485", &i, sizeof(i)); + + // ??? + i.msg = "Flathub pulls"; dictionary_set(known_journal_messages_ids, "75ba3deb0af041a9a46272ff85d9e73e", &i, sizeof(i)); + i.msg = "Flathub pull errors"; dictionary_set(known_journal_messages_ids, "f02bce89a54e4efab3a94a797d26204a", &i, sizeof(i)); + + // ?? + i.msg = "Boltd starting"; dictionary_set(known_journal_messages_ids, "dd11929c788e48bdbb6276fb5f26b08a", &i, sizeof(i)); + + // Netdata + i.msg = "Netdata connection from child"; dictionary_set(known_journal_messages_ids, "ed4cdb8f1beb4ad3b57cb3cae2d162fa", &i, sizeof(i)); + i.msg = "Netdata connection to parent"; dictionary_set(known_journal_messages_ids, "6e2e3839067648968b646045dbf28d66", &i, sizeof(i)); + i.msg = "Netdata alert transition"; dictionary_set(known_journal_messages_ids, "9ce0cb58ab8b44df82c4bf1ad9ee22de", &i, sizeof(i)); + i.msg = "Netdata alert notification"; dictionary_set(known_journal_messages_ids, "6db0018e83e34320ae2a659d78019fb7", &i, sizeof(i)); +} + +void netdata_systemd_journal_transform_message_id(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) { + const char *message_id = buffer_tostring(wb); + struct message_id_info *i = dictionary_get(known_journal_messages_ids, message_id); + + if(!i) + return; + + switch(scope) { + default: + case FACETS_TRANSFORM_DATA: + case FACETS_TRANSFORM_VALUE: + buffer_sprintf(wb, " (%s)", i->msg); + break; + + case FACETS_TRANSFORM_FACET: + case FACETS_TRANSFORM_FACET_SORT: + case FACETS_TRANSFORM_HISTOGRAM: + buffer_flush(wb); + buffer_strcat(wb, i->msg); + break; + } +} + +// ---------------------------------------------------------------------------- + +static void netdata_systemd_journal_rich_message(FACETS *facets __maybe_unused, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row __maybe_unused, void *data __maybe_unused) { + buffer_json_add_array_item_object(json_array); + buffer_json_member_add_string(json_array, "value", buffer_tostring(rkv->wb)); + buffer_json_object_close(json_array); +} diff --git a/collectors/systemd-journal.plugin/systemd-journal-files.c b/collectors/systemd-journal.plugin/systemd-journal-files.c new file mode 100644 index 00000000000000..56496df2258b3d --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-journal-files.c @@ -0,0 +1,857 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "systemd-internals.h" + +#define SYSTEMD_JOURNAL_MAX_SOURCE_LEN 64 +#define VAR_LOG_JOURNAL_MAX_DEPTH 10 + +struct journal_directory journal_directories[MAX_JOURNAL_DIRECTORIES] = { 0 }; +DICTIONARY *journal_files_registry = NULL; +DICTIONARY *used_hashes_registry = NULL; + +static usec_t systemd_journal_session = 0; + +void buffer_json_journal_versions(BUFFER *wb) { + buffer_json_member_add_object(wb, "versions"); + { + buffer_json_member_add_uint64(wb, "sources", + systemd_journal_session + dictionary_version(journal_files_registry)); + } + buffer_json_object_close(wb); +} + +static bool journal_sd_id128_parse(const char *in, sd_id128_t *ret) { + while(isspace(*in)) + in++; + + char uuid[33]; + strncpyz(uuid, in, 32); + uuid[32] = '\0'; + + if(strlen(uuid) == 32) { + sd_id128_t read; + if(sd_id128_from_string(uuid, &read) == 0) { + *ret = read; + return true; + } + } + + return false; +} + +static void journal_file_get_header_from_journalctl(const char *filename, struct journal_file *jf) { + // unfortunately, our capabilities are not inheritted by journalctl + // so, it fails to give us the information we need. + + bool read_writer = false, read_head = false, read_tail = false; + + char cmd[FILENAME_MAX * 2]; + snprintfz(cmd, sizeof(cmd), "journalctl --header --file '%s'", filename); + CLEAN_BUFFER *wb = run_command_and_get_output_to_buffer(cmd, 1024); + if(wb) { + const char *s = buffer_tostring(wb); + + const char *sequential_id_header = "Sequential Number ID:"; + const char *sequential_id_data = strcasestr(s, sequential_id_header); + if(sequential_id_data) { + sequential_id_data += strlen(sequential_id_header); + if(journal_sd_id128_parse(sequential_id_data, &jf->first_writer_id)) + read_writer = true; + } + + const char *head_sequential_number_header = "Head sequential number:"; + const char *head_sequential_number_data = strcasestr(s, head_sequential_number_header); + if(head_sequential_number_data) { + head_sequential_number_data += strlen(head_sequential_number_header); + + while(isspace(*head_sequential_number_data)) + head_sequential_number_data++; + + if(isdigit(*head_sequential_number_data)) { + jf->first_seqnum = strtoul(head_sequential_number_data, NULL, 10); + if(jf->first_seqnum) + read_head = true; + } + } + + const char *tail_sequential_number_header = "Tail sequential number:"; + const char *tail_sequential_number_data = strcasestr(s, tail_sequential_number_header); + if(tail_sequential_number_data) { + tail_sequential_number_data += strlen(tail_sequential_number_header); + + while(isspace(*tail_sequential_number_data)) + tail_sequential_number_data++; + + if(isdigit(*tail_sequential_number_data)) { + jf->last_seqnum = strtoul(tail_sequential_number_data, NULL, 10); + if(jf->last_seqnum) + read_tail = true; + } + } + + if(read_head && read_tail && jf->last_seqnum > jf->first_seqnum) + jf->messages_in_file = jf->last_seqnum - jf->first_seqnum; + } + + if(!jf->logged_journalctl_failure && (!read_head || !read_head || !read_tail)) { + + nd_log(NDLS_COLLECTORS, NDLP_NOTICE, + "Failed to read %s%s%s from journalctl's output on filename '%s', using the command: %s", + read_writer?"":"writer id,", + read_head?"":"head id,", + read_tail?"":"tail id,", + filename, cmd); + + jf->logged_journalctl_failure = true; + } +} + +usec_t journal_file_update_annotation_boot_id(sd_journal *j, struct journal_file *jf, const char *boot_id) { + usec_t ut = UINT64_MAX; + int r; + + char m[100]; + size_t len = snprintfz(m, sizeof(m), "_BOOT_ID=%s", boot_id); + + sd_journal_flush_matches(j); + + r = sd_journal_add_match(j, m, len); + if(r < 0) { + errno = -r; + internal_error(true, + "JOURNAL: while looking for the first timestamp of boot_id '%s', " + "sd_journal_add_match('%s') on file '%s' returned %d", + boot_id, m, jf->filename, r); + return UINT64_MAX; + } + + r = sd_journal_seek_head(j); + if(r < 0) { + errno = -r; + internal_error(true, + "JOURNAL: while looking for the first timestamp of boot_id '%s', " + "sd_journal_seek_head() on file '%s' returned %d", + boot_id, jf->filename, r); + return UINT64_MAX; + } + + r = sd_journal_next(j); + if(r < 0) { + errno = -r; + internal_error(true, + "JOURNAL: while looking for the first timestamp of boot_id '%s', " + "sd_journal_next() on file '%s' returned %d", + boot_id, jf->filename, r); + return UINT64_MAX; + } + + r = sd_journal_get_realtime_usec(j, &ut); + if(r < 0 || !ut || ut == UINT64_MAX) { + errno = -r; + internal_error(r != -EADDRNOTAVAIL, + "JOURNAL: while looking for the first timestamp of boot_id '%s', " + "sd_journal_get_realtime_usec() on file '%s' returned %d", + boot_id, jf->filename, r); + return UINT64_MAX; + } + + if(ut && ut != UINT64_MAX) { + dictionary_set(boot_ids_to_first_ut, boot_id, &ut, sizeof(ut)); + return ut; + } + + return UINT64_MAX; +} + +static void journal_file_get_boot_id_annotations(sd_journal *j __maybe_unused, struct journal_file *jf __maybe_unused) { +#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS + sd_journal_flush_matches(j); + + int r = sd_journal_query_unique(j, "_BOOT_ID"); + if (r < 0) { + errno = -r; + internal_error(true, + "JOURNAL: while querying for the unique _BOOT_ID values, " + "sd_journal_query_unique() on file '%s' returned %d", + jf->filename, r); + errno = -r; + return; + } + + const void *data = NULL; + size_t data_length; + + DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED); + + SD_JOURNAL_FOREACH_UNIQUE(j, data, data_length) { + const char *key, *value; + size_t key_length, value_length; + + if(!parse_journal_field(data, data_length, &key, &key_length, &value, &value_length)) + continue; + + if(value_length != 32) + continue; + + char buf[33]; + memcpy(buf, value, 32); + buf[32] = '\0'; + + dictionary_set(dict, buf, NULL, 0); + } + + void *nothing; + dfe_start_read(dict, nothing){ + journal_file_update_annotation_boot_id(j, jf, nothing_dfe.name); + } + dfe_done(nothing); + + dictionary_destroy(dict); +#endif +} + +void journal_file_update_header(const char *filename, struct journal_file *jf) { + if(jf->last_scan_header_vs_last_modified_ut == jf->file_last_modified_ut) + return; + + fstat_cache_enable_on_thread(); + + const char *files[2] = { + [0] = filename, + [1] = NULL, + }; + + sd_journal *j = NULL; + if(sd_journal_open_files(&j, files, ND_SD_JOURNAL_OPEN_FLAGS) < 0 || !j) { + netdata_log_error("JOURNAL: cannot open file '%s' to update msg_ut", filename); + fstat_cache_disable_on_thread(); + + if(!jf->logged_failure) { + netdata_log_error("cannot open journal file '%s', using file timestamps to understand time-frame.", filename); + jf->logged_failure = true; + } + + jf->msg_first_ut = 0; + jf->msg_last_ut = jf->file_last_modified_ut; + jf->last_scan_header_vs_last_modified_ut = jf->file_last_modified_ut; + return; + } + + usec_t first_ut = 0, last_ut = 0; + uint64_t first_seqnum = 0, last_seqnum = 0; + sd_id128_t first_writer_id = SD_ID128_NULL, last_writer_id = SD_ID128_NULL; + + if(sd_journal_seek_head(j) < 0 || sd_journal_next(j) < 0 || sd_journal_get_realtime_usec(j, &first_ut) < 0 || !first_ut) { + internal_error(true, "cannot find the timestamp of the first message in '%s'", filename); + first_ut = 0; + } +#ifdef HAVE_SD_JOURNAL_GET_SEQNUM + else { + if(sd_journal_get_seqnum(j, &first_seqnum, &first_writer_id) < 0 || !first_seqnum) { + internal_error(true, "cannot find the first seqnums of the first message in '%s'", filename); + first_seqnum = 0; + memset(&first_writer_id, 0, sizeof(first_writer_id)); + } + } +#endif + + if(sd_journal_seek_tail(j) < 0 || sd_journal_previous(j) < 0 || sd_journal_get_realtime_usec(j, &last_ut) < 0 || !last_ut) { + internal_error(true, "cannot find the timestamp of the last message in '%s'", filename); + last_ut = jf->file_last_modified_ut; + } +#ifdef HAVE_SD_JOURNAL_GET_SEQNUM + else { + if(sd_journal_get_seqnum(j, &last_seqnum, &last_writer_id) < 0 || !last_seqnum) { + internal_error(true, "cannot find the last seqnums of the first message in '%s'", filename); + last_seqnum = 0; + memset(&last_writer_id, 0, sizeof(last_writer_id)); + } + } +#endif + + if(first_ut > last_ut) { + internal_error(true, "timestamps are flipped in file '%s'", filename); + usec_t t = first_ut; + first_ut = last_ut; + last_ut = t; + } + + if(!first_seqnum || !first_ut) { + // extract these from the filename - if possible + + const char *at = strchr(filename, '@'); + if(at) { + const char *dash_seqnum = strchr(at + 1, '-'); + if(dash_seqnum) { + const char *dash_first_msg_ut = strchr(dash_seqnum + 1, '-'); + if(dash_first_msg_ut) { + const char *dot_journal = strstr(dash_first_msg_ut + 1, ".journal"); + if(dot_journal) { + if(dash_seqnum - at - 1 == 32 && + dash_first_msg_ut - dash_seqnum - 1 == 16 && + dot_journal - dash_first_msg_ut - 1 == 16) { + sd_id128_t writer; + if(journal_sd_id128_parse(at + 1, &writer)) { + char *endptr = NULL; + uint64_t seqnum = strtoul(dash_seqnum + 1, &endptr, 16); + if(endptr == dash_first_msg_ut) { + uint64_t ts = strtoul(dash_first_msg_ut + 1, &endptr, 16); + if(endptr == dot_journal) { + first_seqnum = seqnum; + first_writer_id = writer; + first_ut = ts; + } + } + } + } + } + } + } + } + } + + jf->first_seqnum = first_seqnum; + jf->last_seqnum = last_seqnum; + + jf->first_writer_id = first_writer_id; + jf->last_writer_id = last_writer_id; + + jf->msg_first_ut = first_ut; + jf->msg_last_ut = last_ut; + + if(!jf->msg_last_ut) + jf->msg_last_ut = jf->file_last_modified_ut; + + if(last_seqnum > first_seqnum) { + if(!sd_id128_equal(first_writer_id, last_writer_id)) { + jf->messages_in_file = 0; + nd_log(NDLS_COLLECTORS, NDLP_NOTICE, + "The writers of the first and the last message in file '%s' differ." + , filename); + } + else + jf->messages_in_file = last_seqnum - first_seqnum + 1; + } + else + jf->messages_in_file = 0; + +// if(!jf->messages_in_file) +// journal_file_get_header_from_journalctl(filename, jf); + + journal_file_get_boot_id_annotations(j, jf); + sd_journal_close(j); + fstat_cache_disable_on_thread(); + + jf->last_scan_header_vs_last_modified_ut = jf->file_last_modified_ut; + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "Journal file header updated '%s'", + jf->filename); +} + +static STRING *string_strdupz_source(const char *s, const char *e, size_t max_len, const char *prefix) { + char buf[max_len]; + size_t len; + char *dst = buf; + + if(prefix) { + len = strlen(prefix); + memcpy(buf, prefix, len); + dst = &buf[len]; + max_len -= len; + } + + len = e - s; + if(len >= max_len) + len = max_len - 1; + memcpy(dst, s, len); + dst[len] = '\0'; + buf[max_len - 1] = '\0'; + + for(size_t i = 0; buf[i] ;i++) + if(!isalnum(buf[i]) && buf[i] != '-' && buf[i] != '.' && buf[i] != ':') + buf[i] = '_'; + + return string_strdupz(buf); +} + +static void files_registry_insert_cb(const DICTIONARY_ITEM *item, void *value, void *data __maybe_unused) { + struct journal_file *jf = value; + jf->filename = dictionary_acquired_item_name(item); + jf->filename_len = strlen(jf->filename); + jf->source_type = SDJF_ALL; + + // based on the filename + // decide the source to show to the user + const char *s = strrchr(jf->filename, '/'); + if(s) { + if(strstr(jf->filename, "/remote/")) { + jf->source_type |= SDJF_REMOTE_ALL; + + if(strncmp(s, "/remote-", 8) == 0) { + s = &s[8]; // skip "/remote-" + + char *e = strchr(s, '@'); + if(!e) + e = strstr(s, ".journal"); + + if(e) { + const char *d = s; + for(; d < e && (isdigit(*d) || *d == '.' || *d == ':') ; d++) ; + if(d == e) { + // a valid IP address + char ip[e - s + 1]; + memcpy(ip, s, e - s); + ip[e - s] = '\0'; + char buf[SYSTEMD_JOURNAL_MAX_SOURCE_LEN]; + if(ip_to_hostname(ip, buf, sizeof(buf))) + jf->source = string_strdupz_source(buf, &buf[strlen(buf)], SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "remote-"); + else { + internal_error(true, "Cannot find the hostname for IP '%s'", ip); + jf->source = string_strdupz_source(s, e, SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "remote-"); + } + } + else + jf->source = string_strdupz_source(s, e, SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "remote-"); + } + } + } + else { + jf->source_type |= SDJF_LOCAL_ALL; + + const char *t = s - 1; + while(t >= jf->filename && *t != '.' && *t != '/') + t--; + + if(t >= jf->filename && *t == '.') { + jf->source_type |= SDJF_LOCAL_NAMESPACE; + jf->source = string_strdupz_source(t + 1, s, SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "namespace-"); + } + else if(strncmp(s, "/system", 7) == 0) + jf->source_type |= SDJF_LOCAL_SYSTEM; + + else if(strncmp(s, "/user", 5) == 0) + jf->source_type |= SDJF_LOCAL_USER; + + else + jf->source_type |= SDJF_LOCAL_OTHER; + } + } + else + jf->source_type |= SDJF_LOCAL_ALL | SDJF_LOCAL_OTHER; + + jf->msg_last_ut = jf->file_last_modified_ut; + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "Journal file added to the journal files registry: '%s'", + jf->filename); +} + +static bool files_registry_conflict_cb(const DICTIONARY_ITEM *item, void *old_value, void *new_value, void *data __maybe_unused) { + struct journal_file *jf = old_value; + struct journal_file *njf = new_value; + + if(njf->last_scan_monotonic_ut > jf->last_scan_monotonic_ut) + jf->last_scan_monotonic_ut = njf->last_scan_monotonic_ut; + + if(njf->file_last_modified_ut > jf->file_last_modified_ut) { + jf->file_last_modified_ut = njf->file_last_modified_ut; + jf->size = njf->size; + + jf->msg_last_ut = jf->file_last_modified_ut; + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "Journal file updated to the journal files registry '%s'", + jf->filename); + } + + return false; +} + +struct journal_file_source { + usec_t first_ut; + usec_t last_ut; + size_t count; + uint64_t size; +}; + +static void human_readable_size_ib(uint64_t size, char *dst, size_t dst_len) { + if(size > 1024ULL * 1024 * 1024 * 1024) + snprintfz(dst, dst_len, "%0.2f TiB", (double)size / 1024.0 / 1024.0 / 1024.0 / 1024.0); + else if(size > 1024ULL * 1024 * 1024) + snprintfz(dst, dst_len, "%0.2f GiB", (double)size / 1024.0 / 1024.0 / 1024.0); + else if(size > 1024ULL * 1024) + snprintfz(dst, dst_len, "%0.2f MiB", (double)size / 1024.0 / 1024.0); + else if(size > 1024ULL) + snprintfz(dst, dst_len, "%0.2f KiB", (double)size / 1024.0); + else + snprintfz(dst, dst_len, "%"PRIu64" B", size); +} + +#define print_duration(dst, dst_len, pos, remaining, duration, one, many, printed) do { \ + if((remaining) > (duration)) { \ + uint64_t _count = (remaining) / (duration); \ + uint64_t _rem = (remaining) - (_count * (duration)); \ + (pos) += snprintfz(&(dst)[pos], (dst_len) - (pos), "%s%s%"PRIu64" %s", (printed) ? ", " : "", _rem ? "" : "and ", _count, _count > 1 ? (many) : (one)); \ + (remaining) = _rem; \ + (printed) = true; \ + } \ +} while(0) + +static void human_readable_duration_s(time_t duration_s, char *dst, size_t dst_len) { + if(duration_s < 0) + duration_s = -duration_s; + + size_t pos = 0; + dst[0] = 0 ; + + bool printed = false; + print_duration(dst, dst_len, pos, duration_s, 86400 * 365, "year", "years", printed); + print_duration(dst, dst_len, pos, duration_s, 86400 * 30, "month", "months", printed); + print_duration(dst, dst_len, pos, duration_s, 86400 * 1, "day", "days", printed); + print_duration(dst, dst_len, pos, duration_s, 3600 * 1, "hour", "hours", printed); + print_duration(dst, dst_len, pos, duration_s, 60 * 1, "min", "mins", printed); + print_duration(dst, dst_len, pos, duration_s, 1, "sec", "secs", printed); +} + +static int journal_file_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry, void *data) { + struct journal_file_source *jfs = entry; + BUFFER *wb = data; + + const char *name = dictionary_acquired_item_name(item); + + buffer_json_add_array_item_object(wb); + { + char size_for_humans[100]; + human_readable_size_ib(jfs->size, size_for_humans, sizeof(size_for_humans)); + + char duration_for_humans[1024]; + human_readable_duration_s((time_t)((jfs->last_ut - jfs->first_ut) / USEC_PER_SEC), + duration_for_humans, sizeof(duration_for_humans)); + + char info[1024]; + snprintfz(info, sizeof(info), "%zu files, with a total size of %s, covering %s", + jfs->count, size_for_humans, duration_for_humans); + + buffer_json_member_add_string(wb, "id", name); + buffer_json_member_add_string(wb, "name", name); + buffer_json_member_add_string(wb, "pill", size_for_humans); + buffer_json_member_add_string(wb, "info", info); + } + buffer_json_object_close(wb); // options object + + return 1; +} + +static bool journal_file_merge_sizes(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value , void *data __maybe_unused) { + struct journal_file_source *jfs = old_value, *njfs = new_value; + jfs->count += njfs->count; + jfs->size += njfs->size; + + if(njfs->first_ut && njfs->first_ut < jfs->first_ut) + jfs->first_ut = njfs->first_ut; + + if(njfs->last_ut && njfs->last_ut > jfs->last_ut) + jfs->last_ut = njfs->last_ut; + + return false; +} + +void available_journal_file_sources_to_json_array(BUFFER *wb) { + DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_NAME_LINK_DONT_CLONE|DICT_OPTION_DONT_OVERWRITE_VALUE); + dictionary_register_conflict_callback(dict, journal_file_merge_sizes, NULL); + + struct journal_file_source t = { 0 }; + + struct journal_file *jf; + dfe_start_read(journal_files_registry, jf) { + t.first_ut = jf->msg_first_ut; + t.last_ut = jf->msg_last_ut; + t.count = 1; + t.size = jf->size; + + dictionary_set(dict, SDJF_SOURCE_ALL_NAME, &t, sizeof(t)); + + if(jf->source_type & SDJF_LOCAL_ALL) + dictionary_set(dict, SDJF_SOURCE_LOCAL_NAME, &t, sizeof(t)); + if(jf->source_type & SDJF_LOCAL_SYSTEM) + dictionary_set(dict, SDJF_SOURCE_LOCAL_SYSTEM_NAME, &t, sizeof(t)); + if(jf->source_type & SDJF_LOCAL_USER) + dictionary_set(dict, SDJF_SOURCE_LOCAL_USERS_NAME, &t, sizeof(t)); + if(jf->source_type & SDJF_LOCAL_OTHER) + dictionary_set(dict, SDJF_SOURCE_LOCAL_OTHER_NAME, &t, sizeof(t)); + if(jf->source_type & SDJF_LOCAL_NAMESPACE) + dictionary_set(dict, SDJF_SOURCE_NAMESPACES_NAME, &t, sizeof(t)); + if(jf->source_type & SDJF_REMOTE_ALL) + dictionary_set(dict, SDJF_SOURCE_REMOTES_NAME, &t, sizeof(t)); + if(jf->source) + dictionary_set(dict, string2str(jf->source), &t, sizeof(t)); + } + dfe_done(jf); + + dictionary_sorted_walkthrough_read(dict, journal_file_to_json_array_cb, wb); + + dictionary_destroy(dict); +} + +static void files_registry_delete_cb(const DICTIONARY_ITEM *item, void *value, void *data __maybe_unused) { + struct journal_file *jf = value; (void)jf; + const char *filename = dictionary_acquired_item_name(item); (void)filename; + + internal_error(true, "removed journal file '%s'", filename); + string_freez(jf->source); +} + +void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) { + static const char *ext = ".journal"; + static const ssize_t ext_len = sizeof(".journal") - 1; + + if (depth > VAR_LOG_JOURNAL_MAX_DEPTH) + return; + + DIR *dir; + struct dirent *entry; + char full_path[FILENAME_MAX]; + + // Open the directory. + if ((dir = opendir(dirname)) == NULL) { + if(errno != ENOENT && errno != ENOTDIR) + netdata_log_error("Cannot opendir() '%s'", dirname); + return; + } + + bool existing = false; + bool *found = dictionary_set(dirs, dirname, &existing, sizeof(existing)); + if(*found) return; + *found = true; + + // Read each entry in the directory. + while ((entry = readdir(dir)) != NULL) { + if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) + continue; + + ssize_t len = snprintfz(full_path, sizeof(full_path), "%s/%s", dirname, entry->d_name); + + if (entry->d_type == DT_DIR) { + journal_directory_scan_recursively(files, dirs, full_path, depth++); + } + else if (entry->d_type == DT_REG && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) { + if(files) + dictionary_set(files, full_path, NULL, 0); + + send_newline_and_flush(); + } + else if (entry->d_type == DT_LNK) { + struct stat info; + if (stat(full_path, &info) == -1) + continue; + + if (S_ISDIR(info.st_mode)) { + // The symbolic link points to a directory + char resolved_path[FILENAME_MAX + 1]; + if (realpath(full_path, resolved_path) != NULL) { + journal_directory_scan_recursively(files, dirs, resolved_path, depth++); + } + } + else if(S_ISREG(info.st_mode) && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) { + if(files) + dictionary_set(files, full_path, NULL, 0); + + send_newline_and_flush(); + } + } + } + + closedir(dir); +} + +static size_t journal_files_scans = 0; +bool journal_files_completed_once(void) { + return journal_files_scans > 0; +} + +int filenames_compar(const void *a, const void *b) { + const char *p1 = *(const char **)a; + const char *p2 = *(const char **)b; + + const char *at1 = strchr(p1, '@'); + const char *at2 = strchr(p2, '@'); + + if(!at1 && at2) + return -1; + + if(at1 && !at2) + return 1; + + if(!at1 && !at2) + return strcmp(p1, p2); + + const char *dash1 = strrchr(at1, '-'); + const char *dash2 = strrchr(at2, '-'); + + if(!dash1 || !dash2) + return strcmp(p1, p2); + + uint64_t ts1 = strtoul(dash1 + 1, NULL, 16); + uint64_t ts2 = strtoul(dash2 + 1, NULL, 16); + + if(ts1 > ts2) + return -1; + + if(ts1 < ts2) + return 1; + + return -strcmp(p1, p2); +} + +void journal_files_registry_update(void) { + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + + if(spinlock_trylock(&spinlock)) { + usec_t scan_monotonic_ut = now_monotonic_usec(); + + DICTIONARY *files = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); + DICTIONARY *dirs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); + + for(unsigned i = 0; i < MAX_JOURNAL_DIRECTORIES; i++) { + if(!journal_directories[i].path) break; + journal_directory_scan_recursively(files, dirs, journal_directories[i].path, 0); + } + + const char **array = mallocz(sizeof(const char *) * dictionary_entries(files)); + size_t used = 0; + + void *x; + dfe_start_read(files, x) { + if(used >= dictionary_entries(files)) continue; + array[used++] = x_dfe.name; + } + dfe_done(x); + + qsort(array, used, sizeof(const char *), filenames_compar); + + for(size_t i = 0; i < used ;i++) { + const char *full_path = array[i]; + + struct stat info; + if (stat(full_path, &info) == -1) + continue; + + struct journal_file t = { + .file_last_modified_ut = info.st_mtim.tv_sec * USEC_PER_SEC + info.st_mtim.tv_nsec / NSEC_PER_USEC, + .last_scan_monotonic_ut = scan_monotonic_ut, + .size = info.st_size, + .max_journal_vs_realtime_delta_ut = JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT, + }; + struct journal_file *jf = dictionary_set(journal_files_registry, full_path, &t, sizeof(t)); + journal_file_update_header(jf->filename, jf); + } + freez(array); + dictionary_destroy(files); + dictionary_destroy(dirs); + + struct journal_file *jf; + dfe_start_write(journal_files_registry, jf){ + if(jf->last_scan_monotonic_ut < scan_monotonic_ut) + dictionary_del(journal_files_registry, jf_dfe.name); + } + dfe_done(jf); + + journal_files_scans++; + spinlock_unlock(&spinlock); + + internal_error(true, + "Journal library scan completed in %.3f ms", + (double)(now_monotonic_usec() - scan_monotonic_ut) / (double)USEC_PER_MS); + } +} + +// ---------------------------------------------------------------------------- + +int journal_file_dict_items_backward_compar(const void *a, const void *b) { + const DICTIONARY_ITEM **ad = (const DICTIONARY_ITEM **)a, **bd = (const DICTIONARY_ITEM **)b; + struct journal_file *jfa = dictionary_acquired_item_value(*ad); + struct journal_file *jfb = dictionary_acquired_item_value(*bd); + + // compare the last message timestamps + if(jfa->msg_last_ut < jfb->msg_last_ut) + return 1; + + if(jfa->msg_last_ut > jfb->msg_last_ut) + return -1; + + // compare the file last modification timestamps + if(jfa->file_last_modified_ut < jfb->file_last_modified_ut) + return 1; + + if(jfa->file_last_modified_ut > jfb->file_last_modified_ut) + return -1; + + // compare the first message timestamps + if(jfa->msg_first_ut < jfb->msg_first_ut) + return 1; + + if(jfa->msg_first_ut > jfb->msg_first_ut) + return -1; + + return 0; +} + +int journal_file_dict_items_forward_compar(const void *a, const void *b) { + return -journal_file_dict_items_backward_compar(a, b); +} + +static bool boot_id_conflict_cb(const DICTIONARY_ITEM *item, void *old_value, void *new_value, void *data __maybe_unused) { + usec_t *old_usec = old_value; + usec_t *new_usec = new_value; + + if(*new_usec < *old_usec) { + *old_usec = *new_usec; + return true; + } + + return false; +} + +void journal_init_files_and_directories(void) { + unsigned d = 0; + + // ------------------------------------------------------------------------ + // setup the journal directories + + journal_directories[d++].path = strdupz("/run/log/journal"); + journal_directories[d++].path = strdupz("/var/log/journal"); + + if(*netdata_configured_host_prefix) { + char path[PATH_MAX]; + snprintfz(path, sizeof(path), "%s/var/log/journal", netdata_configured_host_prefix); + journal_directories[d++].path = strdupz(path); + snprintfz(path, sizeof(path), "%s/run/log/journal", netdata_configured_host_prefix); + journal_directories[d++].path = strdupz(path); + } + + // terminate the list + journal_directories[d].path = NULL; + + // ------------------------------------------------------------------------ + // initialize the used hashes files registry + + used_hashes_registry = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE); + + systemd_journal_session = (now_realtime_usec() / USEC_PER_SEC) * USEC_PER_SEC; + + journal_files_registry = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(struct journal_file)); + + dictionary_register_insert_callback(journal_files_registry, files_registry_insert_cb, NULL); + dictionary_register_delete_callback(journal_files_registry, files_registry_delete_cb, NULL); + dictionary_register_conflict_callback(journal_files_registry, files_registry_conflict_cb, NULL); + + boot_ids_to_first_ut = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(usec_t)); + + dictionary_register_conflict_callback(boot_ids_to_first_ut, boot_id_conflict_cb, NULL); + +} diff --git a/collectors/systemd-journal.plugin/systemd-journal-fstat.c b/collectors/systemd-journal.plugin/systemd-journal-fstat.c new file mode 100644 index 00000000000000..45ea78174c9be7 --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-journal-fstat.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "systemd-internals.h" + + +// ---------------------------------------------------------------------------- +// fstat64 overloading to speed up libsystemd +// https://github.com/systemd/systemd/pull/29261 + +#include +#include + +#define FSTAT_CACHE_MAX 1024 +struct fdstat64_cache_entry { + bool enabled; + bool updated; + int err_no; + struct stat64 stat; + int ret; + size_t cached_count; + size_t session; +}; + +struct fdstat64_cache_entry fstat64_cache[FSTAT_CACHE_MAX] = {0 }; +__thread size_t fstat_thread_calls = 0; +__thread size_t fstat_thread_cached_responses = 0; +static __thread bool enable_thread_fstat = false; +static __thread size_t fstat_caching_thread_session = 0; +static size_t fstat_caching_global_session = 0; + +void fstat_cache_enable_on_thread(void) { + fstat_caching_thread_session = __atomic_add_fetch(&fstat_caching_global_session, 1, __ATOMIC_ACQUIRE); + enable_thread_fstat = true; +} + +void fstat_cache_disable_on_thread(void) { + fstat_caching_thread_session = __atomic_add_fetch(&fstat_caching_global_session, 1, __ATOMIC_RELEASE); + enable_thread_fstat = false; +} + +int fstat64(int fd, struct stat64 *buf) { + static int (*real_fstat)(int, struct stat64 *) = NULL; + if (!real_fstat) + real_fstat = dlsym(RTLD_NEXT, "fstat64"); + + fstat_thread_calls++; + + if(fd >= 0 && fd < FSTAT_CACHE_MAX) { + if(enable_thread_fstat && fstat64_cache[fd].session != fstat_caching_thread_session) { + fstat64_cache[fd].session = fstat_caching_thread_session; + fstat64_cache[fd].enabled = true; + fstat64_cache[fd].updated = false; + } + + if(fstat64_cache[fd].enabled && fstat64_cache[fd].updated && fstat64_cache[fd].session == fstat_caching_thread_session) { + fstat_thread_cached_responses++; + errno = fstat64_cache[fd].err_no; + *buf = fstat64_cache[fd].stat; + fstat64_cache[fd].cached_count++; + return fstat64_cache[fd].ret; + } + } + + int ret = real_fstat(fd, buf); + + if(fd >= 0 && fd < FSTAT_CACHE_MAX && fstat64_cache[fd].enabled && fstat64_cache[fd].session == fstat_caching_thread_session) { + fstat64_cache[fd].ret = ret; + fstat64_cache[fd].updated = true; + fstat64_cache[fd].err_no = errno; + fstat64_cache[fd].stat = *buf; + } + + return ret; +} diff --git a/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh b/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh new file mode 100755 index 00000000000000..ada735f1fbc215 --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh @@ -0,0 +1,267 @@ +#!/usr/bin/env bash + +me="${0}" +dst="/etc/ssl/systemd-journal" + +show_usage() { + cat <&2 "directory set to: ${dst}" + shift + ;; + + *) + break 2 + ;; + esac + + shift +done + +if [ -z "${1}" ]; then + show_usage + exit 1 +fi + + +# Define a regular expression pattern for a valid canonical name +valid_canonical_name_pattern="^[a-zA-Z0-9][a-zA-Z0-9.-]+$" + +# Check if ${1} matches the pattern +if [[ ! "${1}" =~ ${valid_canonical_name_pattern} ]]; then + echo "Certificate name '${1}' is not valid." + exit 1 +fi + +# ----------------------------------------------------------------------------- +# Create the CA + +# stop on all errors +set -e + +if [ $UID -ne 0 ] +then + echo >&2 "Hey! sudo me: sudo ${me}" + exit 1 +fi + +if ! getent group systemd-journal >/dev/null 2>&1; then + echo >&2 "Missing system group: systemd-journal. Did you install systemd-journald?" + exit 1 +fi + +if ! getent passwd systemd-journal-remote >/dev/null 2>&1; then + echo >&2 "Missing system user: systemd-journal-remote. Did you install systemd-journal-remote?" + exit 1 +fi + +if [ ! -d "${dst}" ] +then + mkdir -p "${dst}" + chown systemd-journal-remote:systemd-journal "${dst}" + chmod 750 "${dst}" +fi + +cd "${dst}" + +test ! -f ca.conf && cat >ca.conf <serial + +if [ ! -f ca.pem -o ! -f ca.key ]; then + echo >&2 "Generating ca.pem ..." + + openssl req -newkey rsa:2048 -days 3650 -x509 -nodes -out ca.pem -keyout ca.key -subj "/CN=systemd-journal-remote-ca/" + chown systemd-journal-remote:systemd-journal ca.pem + chmod 0640 ca.pem +fi + +# ----------------------------------------------------------------------------- +# Create a server certificate + +generate_server_certificate() { + local cn="${1}"; shift + + if [ ! -f "${cn}.pem" -o ! -f "${cn}.key" ]; then + if [ -z "${*}" ]; then + echo >"${cn}.conf" + else + echo "subjectAltName = $(echo "${@}" | tr " " ",")" >"${cn}.conf" + fi + + echo >&2 "Generating server: ${cn}.pem and ${cn}.key ..." + + openssl req -newkey rsa:2048 -nodes -out "${cn}.csr" -keyout "${cn}.key" -subj "/CN=${cn}/" + openssl ca -batch -config ca.conf -notext -in "${cn}.csr" -out "${cn}.pem" -extfile "${cn}.conf" + else + echo >&2 "certificates for ${cn} are already available." + fi + + chown systemd-journal-remote:systemd-journal "${cn}.pem" "${cn}.key" + chmod 0640 "${cn}.pem" "${cn}.key" +} + + +# ----------------------------------------------------------------------------- +# Create a script to install the certificate on each server + +generate_install_script() { + local cn="${1}" + local dst="/etc/ssl/systemd-journal" + + cat >"runme-on-${cn}.sh" <&2 "Hey! sudo me: sudo \${0}" + exit 1 +fi + +# make sure the systemd-journal group exists +# all certificates will be owned by this group +if ! getent group systemd-journal >/dev/null 2>&1; then + echo >&2 "Missing system group: systemd-journal. Did you install systemd-journald?" + exit 1 +fi + +if ! getent passwd systemd-journal-remote >/dev/null 2>&1; then + echo >&2 "Missing system user: systemd-journal-remote. Did you install systemd-journal-remote?" + exit 1 +fi + +if [ ! -d ${dst} ]; then + echo >&2 "creating directory: ${dst}" + mkdir -p "${dst}" +fi +chown systemd-journal-remote:systemd-journal "${dst}" +chmod 750 "${dst}" +cd "${dst}" + +echo >&2 "saving trusted certificate file as: ${dst}/ca.pem" +cat >ca.pem <&2 "saving server ${cn} certificate file as: ${dst}/${cn}.pem" +cat >"${cn}.pem" <&2 "saving server ${cn} key file as: ${dst}/${cn}.key" +cat >"${cn}.key" <&2 "updating the certificates in \${cfg}" + sed -i "s|^#\\?\\s*ServerKeyFile=.*$|ServerKeyFile=${dst}/${cn}.key|" \${cfg} + sed -i "s|^#\\?\\s*ServerCertificateFile=.*$|ServerCertificateFile=${dst}/${cn}.pem|" \${cfg} + sed -i "s|^#\\?\\s*TrustedCertificateFile=.*$|TrustedCertificateFile=${dst}/ca.pem|" \${cfg} + fi +done + +echo >&2 "certificates installed - you may need to restart services to active them" +echo >&2 +echo >&2 "If this is a central server:" +echo >&2 "# systemctl restart systemd-journal-remote.socket" +echo >&2 +echo >&2 "If this is a passive client:" +echo >&2 "# systemctl restart systemd-journal-upload.service" +echo >&2 +echo >&2 "If this is an active client:" +echo >&2 "# systemctl restart systemd-journal-gateway.socket" +EOFC1 + + chmod 0700 "runme-on-${cn}.sh" +} + +# ----------------------------------------------------------------------------- +# Create the client certificates + +generate_server_certificate "${@}" +generate_install_script "${1}" + + +# Set ANSI escape code for colors +yellow_color="\033[1;33m" +green_color="\033[0;32m" +# Reset ANSI color after the message +reset_color="\033[0m" + + +echo >&2 -e "use this script to install it on ${1}: ${yellow_color}$(ls ${dst}/runme-on-${1}.sh)${reset_color}" +echo >&2 "copy it to your server ${1}, like this:" +echo >&2 -e "# ${green_color}scp ${dst}/runme-on-${1}.sh ${1}:/tmp/${reset_color}" +echo >&2 "and then run it on that server to install the certificates" +echo >&2 diff --git a/collectors/systemd-journal.plugin/systemd-journal-watcher.c b/collectors/systemd-journal.plugin/systemd-journal-watcher.c new file mode 100644 index 00000000000000..ed41f624744b01 --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-journal-watcher.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "systemd-internals.h" +#include + +#define EVENT_SIZE (sizeof(struct inotify_event)) +#define INITIAL_WATCHES 256 + +#define WATCH_FOR (IN_CREATE | IN_MODIFY | IN_DELETE | IN_DELETE_SELF | IN_MOVED_FROM | IN_MOVED_TO | IN_UNMOUNT) + +typedef struct watch_entry { + int slot; + + int wd; // Watch descriptor + char *path; // Dynamically allocated path + + struct watch_entry *next; // for the free list +} WatchEntry; + +typedef struct { + WatchEntry *watchList; + WatchEntry *freeList; + int watchCount; + int watchListSize; + + size_t errors; + + DICTIONARY *pending; +} Watcher; + +static WatchEntry *get_slot(Watcher *watcher) { + WatchEntry *t; + + if (watcher->freeList != NULL) { + t = watcher->freeList; + watcher->freeList = t->next; + t->next = NULL; + return t; + } + + if (watcher->watchCount == watcher->watchListSize) { + watcher->watchListSize *= 2; + watcher->watchList = reallocz(watcher->watchList, watcher->watchListSize * sizeof(WatchEntry)); + } + + watcher->watchList[watcher->watchCount] = (WatchEntry){ + .slot = watcher->watchCount, + .wd = -1, + .path = NULL, + .next = NULL, + }; + t = &watcher->watchList[watcher->watchCount]; + watcher->watchCount++; + + return t; +} + +static void free_slot(Watcher *watcher, WatchEntry *t) { + t->wd = -1; + freez(t->path); + t->path = NULL; + + // link it to the free list + t->next = watcher->freeList; + watcher->freeList = t; +} + +static int add_watch(Watcher *watcher, int inotifyFd, const char *path) { + WatchEntry *t = get_slot(watcher); + + t->wd = inotify_add_watch(inotifyFd, path, WATCH_FOR); + if (t->wd == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "JOURNAL WATCHER: cannot watch directory: '%s'", + path); + + free_slot(watcher, t); + + struct stat info; + if(stat(path, &info) == 0 && S_ISDIR(info.st_mode)) { + // the directory exists, but we failed to add the watch + // increase errors + watcher->errors++; + } + } + else { + t->path = strdupz(path); + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "JOURNAL WATCHER: watching directory: '%s'", + path); + + } + return t->wd; +} + +static void remove_watch(Watcher *watcher, int inotifyFd, int wd) { + int i; + for (i = 0; i < watcher->watchCount; ++i) { + if (watcher->watchList[i].wd == wd) { + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "JOURNAL WATCHER: removing watch from directory: '%s'", + watcher->watchList[i].path); + + inotify_rm_watch(inotifyFd, watcher->watchList[i].wd); + free_slot(watcher, &watcher->watchList[i]); + return; + } + } + + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "JOURNAL WATCHER: cannot find directory watch %d to remove.", + wd); +} + +static void free_watches(Watcher *watcher, int inotifyFd) { + for (int i = 0; i < watcher->watchCount; ++i) { + if (watcher->watchList[i].wd != -1) { + inotify_rm_watch(inotifyFd, watcher->watchList[i].wd); + free_slot(watcher, &watcher->watchList[i]); + } + } + freez(watcher->watchList); + watcher->watchList = NULL; + + dictionary_destroy(watcher->pending); + watcher->pending = NULL; +} + +static char* get_path_from_wd(Watcher *watcher, int wd) { + for (int i = 0; i < watcher->watchCount; ++i) { + if (watcher->watchList[i].wd == wd) + return watcher->watchList[i].path; + } + return NULL; +} + +static bool is_directory_watched(Watcher *watcher, const char *path) { + for (int i = 0; i < watcher->watchCount; ++i) { + if (watcher->watchList[i].wd != -1 && strcmp(watcher->watchList[i].path, path) == 0) { + return true; + } + } + return false; +} + +static void watch_directory_and_subdirectories(Watcher *watcher, int inotifyFd, const char *basePath) { + DICTIONARY *dirs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); + + journal_directory_scan_recursively(NULL, dirs, basePath, 0); + + void *x; + dfe_start_read(dirs, x) { + const char *dirname = x_dfe.name; + // Check if this directory is already being watched + if (!is_directory_watched(watcher, dirname)) { + add_watch(watcher, inotifyFd, dirname); + } + } + dfe_done(x); + + dictionary_destroy(dirs); +} + +static bool is_subpath(const char *path, const char *subpath) { + // Use strncmp to compare the paths + if (strncmp(path, subpath, strlen(path)) == 0) { + // Ensure that the next character is a '/' or '\0' + char next_char = subpath[strlen(path)]; + return next_char == '/' || next_char == '\0'; + } + + return false; +} + +void remove_directory_watch(Watcher *watcher, int inotifyFd, const char *dirPath) { + for (int i = 0; i < watcher->watchCount; ++i) { + WatchEntry *t = &watcher->watchList[i]; + if (t->wd != -1 && is_subpath(t->path, dirPath)) { + inotify_rm_watch(inotifyFd, t->wd); + free_slot(watcher, t); + } + } + + struct journal_file *jf; + dfe_start_write(journal_files_registry, jf) { + if(is_subpath(jf->filename, dirPath)) + dictionary_del(journal_files_registry, jf->filename); + } + dfe_done(jf); + + dictionary_garbage_collect(journal_files_registry); +} + +void process_event(Watcher *watcher, int inotifyFd, struct inotify_event *event) { + if(!event->len) { + nd_log(NDLS_COLLECTORS, NDLP_NOTICE + , "JOURNAL WATCHER: received event with mask %u and len %u (this is zero) for path: '%s' - ignoring it." + , event->mask, event->len, event->name); + return; + } + + char *dirPath = get_path_from_wd(watcher, event->wd); + if(!dirPath) { + nd_log(NDLS_COLLECTORS, NDLP_NOTICE, + "JOURNAL WATCHER: received event with mask %u and len %u for path: '%s' - " + "but we can't find its watch descriptor - ignoring it." + , event->mask, event->len, event->name); + return; + } + + if(event->mask & IN_DELETE_SELF) { + remove_watch(watcher, inotifyFd, event->wd); + return; + } + + static __thread char fullPath[PATH_MAX]; + snprintfz(fullPath, sizeof(fullPath), "%s/%s", dirPath, event->name); + // fullPath contains the full path to the file + + size_t len = strlen(event->name); + + if(event->mask & IN_ISDIR) { + if (event->mask & (IN_DELETE | IN_MOVED_FROM)) { + // A directory is deleted or moved out + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "JOURNAL WATCHER: Directory deleted or moved out: '%s'", + fullPath); + + // Remove the watch - implement this function based on how you manage your watches + remove_directory_watch(watcher, inotifyFd, fullPath); + } + else if (event->mask & (IN_CREATE | IN_MOVED_TO)) { + // A new directory is created or moved in + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "JOURNAL WATCHER: New directory created or moved in: '%s'", + fullPath); + + // Start watching the new directory - recursive watch + watch_directory_and_subdirectories(watcher, inotifyFd, fullPath); + } + else + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "JOURNAL WATCHER: Received unhandled event with mask %u for directory '%s'", + event->mask, fullPath); + } + else if(len > sizeof(".journal") - 1 && strcmp(&event->name[len - (sizeof(".journal") - 1)], ".journal") == 0) { + // It is a file that ends in .journal + // add it to our pending list + dictionary_set(watcher->pending, fullPath, NULL, 0); + } + else + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "JOURNAL WATCHER: ignoring event with mask %u for file '%s'", + event->mask, fullPath); +} + +static void process_pending(Watcher *watcher) { + void *x; + dfe_start_write(watcher->pending, x) { + struct stat info; + const char *fullPath = x_dfe.name; + + if(stat(fullPath, &info) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "JOURNAL WATCHER: file '%s' no longer exists, removing it from the registry", + fullPath); + + dictionary_del(journal_files_registry, fullPath); + } + else if(S_ISREG(info.st_mode)) { + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, + "JOURNAL WATCHER: file '%s' has been added/updated, updating the registry", + fullPath); + + struct journal_file t = { + .file_last_modified_ut = info.st_mtim.tv_sec * USEC_PER_SEC + + info.st_mtim.tv_nsec / NSEC_PER_USEC, + .last_scan_monotonic_ut = now_monotonic_usec(), + .size = info.st_size, + .max_journal_vs_realtime_delta_ut = JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT, + }; + struct journal_file *jf = dictionary_set(journal_files_registry, fullPath, &t, sizeof(t)); + journal_file_update_header(jf->filename, jf); + } + + dictionary_del(watcher->pending, fullPath); + } + dfe_done(x); + + dictionary_garbage_collect(watcher->pending); +} + +void *journal_watcher_main(void *arg __maybe_unused) { + while(1) { + Watcher watcher = { + .watchList = mallocz(INITIAL_WATCHES * sizeof(WatchEntry)), + .freeList = NULL, + .watchCount = 0, + .watchListSize = INITIAL_WATCHES, + .pending = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE|DICT_OPTION_SINGLE_THREADED), + .errors = 0, + }; + + int inotifyFd = inotify_init(); + if (inotifyFd < 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "inotify_init() failed."); + free_watches(&watcher, inotifyFd); + return NULL; + } + + for (unsigned i = 0; i < MAX_JOURNAL_DIRECTORIES; i++) { + if (!journal_directories[i].path) break; + watch_directory_and_subdirectories(&watcher, inotifyFd, journal_directories[i].path); + } + + usec_t last_headers_update_ut = now_monotonic_usec(); + struct buffered_reader reader; + while (1) { + buffered_reader_ret_t rc = buffered_reader_read_timeout( + &reader, inotifyFd, SYSTEMD_JOURNAL_EXECUTE_WATCHER_PENDING_EVERY_MS, false); + + if (rc != BUFFERED_READER_READ_OK && rc != BUFFERED_READER_READ_POLL_TIMEOUT) { + nd_log(NDLS_COLLECTORS, NDLP_CRIT, + "JOURNAL WATCHER: cannot read inotify events, buffered_reader_read_timeout() returned %d - " + "restarting the watcher.", + rc); + break; + } + + if(rc == BUFFERED_READER_READ_OK) { + bool unmount_event = false; + + ssize_t i = 0; + while (i < reader.read_len) { + struct inotify_event *event = (struct inotify_event *) &reader.read_buffer[i]; + + if(event->mask & IN_UNMOUNT) { + unmount_event = true; + break; + } + + process_event(&watcher, inotifyFd, event); + i += (ssize_t)EVENT_SIZE + event->len; + } + + reader.read_buffer[0] = '\0'; + reader.read_len = 0; + reader.pos = 0; + + if(unmount_event) + break; + } + + usec_t ut = now_monotonic_usec(); + if (dictionary_entries(watcher.pending) && (rc == BUFFERED_READER_READ_POLL_TIMEOUT || + last_headers_update_ut + (SYSTEMD_JOURNAL_EXECUTE_WATCHER_PENDING_EVERY_MS * USEC_PER_MS) <= ut)) { + process_pending(&watcher); + last_headers_update_ut = ut; + } + + if(watcher.errors) { + nd_log(NDLS_COLLECTORS, NDLP_NOTICE, + "JOURNAL WATCHER: there were errors in setting up inotify watches - restarting the watcher."); + } + } + + close(inotifyFd); + free_watches(&watcher, inotifyFd); + + // this will scan the directories and cleanup the registry + journal_files_registry_update(); + + sleep_usec(5 * USEC_PER_SEC); + } + + return NULL; +} diff --git a/collectors/systemd-journal.plugin/systemd-journal.c b/collectors/systemd-journal.plugin/systemd-journal.c index 304ff244ada4ed..f812b2161ecdad 100644 --- a/collectors/systemd-journal.plugin/systemd-journal.c +++ b/collectors/systemd-journal.plugin/systemd-journal.c @@ -5,400 +5,1903 @@ * GPL v3+ */ -// TODO - 1) MARKDOC +#include "systemd-internals.h" -#include "collectors/all.h" -#include "libnetdata/libnetdata.h" -#include "libnetdata/required_dummies.h" +/* + * TODO + * + * _UDEV_DEVLINK is frequently set more than once per field - support multi-value faces + * + */ -#ifndef SD_JOURNAL_ALL_NAMESPACES -#define JOURNAL_NAMESPACE SD_JOURNAL_LOCAL_ONLY -#else -#define JOURNAL_NAMESPACE SD_JOURNAL_ALL_NAMESPACES +#define FACET_MAX_VALUE_LENGTH 8192 + +#define SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION "View, search and analyze systemd journal entries." +#define SYSTEMD_JOURNAL_FUNCTION_NAME "systemd-journal" +#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60 +#define SYSTEMD_JOURNAL_MAX_PARAMS 1000 +#define SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION (1 * 3600) +#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY 200 +#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING 1000000 +#define SYSTEMD_JOURNAL_SAMPLING_SLOTS 1000 +#define SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE 10000 + +#define JOURNAL_PARAMETER_HELP "help" +#define JOURNAL_PARAMETER_AFTER "after" +#define JOURNAL_PARAMETER_BEFORE "before" +#define JOURNAL_PARAMETER_ANCHOR "anchor" +#define JOURNAL_PARAMETER_LAST "last" +#define JOURNAL_PARAMETER_QUERY "query" +#define JOURNAL_PARAMETER_FACETS "facets" +#define JOURNAL_PARAMETER_HISTOGRAM "histogram" +#define JOURNAL_PARAMETER_DIRECTION "direction" +#define JOURNAL_PARAMETER_IF_MODIFIED_SINCE "if_modified_since" +#define JOURNAL_PARAMETER_DATA_ONLY "data_only" +#define JOURNAL_PARAMETER_SOURCE "source" +#define JOURNAL_PARAMETER_INFO "info" +#define JOURNAL_PARAMETER_ID "id" +#define JOURNAL_PARAMETER_PROGRESS "progress" +#define JOURNAL_PARAMETER_SLICE "slice" +#define JOURNAL_PARAMETER_DELTA "delta" +#define JOURNAL_PARAMETER_TAIL "tail" +#define JOURNAL_PARAMETER_SAMPLING "sampling" + +#define JOURNAL_KEY_ND_JOURNAL_FILE "ND_JOURNAL_FILE" +#define JOURNAL_KEY_ND_JOURNAL_PROCESS "ND_JOURNAL_PROCESS" + +#define JOURNAL_DEFAULT_SLICE_MODE true +#define JOURNAL_DEFAULT_DIRECTION FACETS_ANCHOR_DIRECTION_BACKWARD + +#define SYSTEMD_ALWAYS_VISIBLE_KEYS NULL + +#define SYSTEMD_KEYS_EXCLUDED_FROM_FACETS \ + "!MESSAGE_ID" \ + "|*MESSAGE*" \ + "|*_RAW" \ + "|*_USEC" \ + "|*_NSEC" \ + "|*TIMESTAMP*" \ + "|*_ID" \ + "|*_ID_*" \ + "|__*" \ + "" + +#define SYSTEMD_KEYS_INCLUDED_IN_FACETS \ + \ + /* --- USER JOURNAL FIELDS --- */ \ + \ + /* "|MESSAGE" */ \ + "|MESSAGE_ID" \ + "|PRIORITY" \ + "|CODE_FILE" \ + /* "|CODE_LINE" */ \ + "|CODE_FUNC" \ + "|ERRNO" \ + /* "|INVOCATION_ID" */ \ + /* "|USER_INVOCATION_ID" */ \ + "|SYSLOG_FACILITY" \ + "|SYSLOG_IDENTIFIER" \ + /* "|SYSLOG_PID" */ \ + /* "|SYSLOG_TIMESTAMP" */ \ + /* "|SYSLOG_RAW" */ \ + /* "!DOCUMENTATION" */ \ + /* "|TID" */ \ + "|UNIT" \ + "|USER_UNIT" \ + "|UNIT_RESULT" /* undocumented */ \ + \ + \ + /* --- TRUSTED JOURNAL FIELDS --- */ \ + \ + /* "|_PID" */ \ + "|_UID" \ + "|_GID" \ + "|_COMM" \ + "|_EXE" \ + /* "|_CMDLINE" */ \ + "|_CAP_EFFECTIVE" \ + /* "|_AUDIT_SESSION" */ \ + "|_AUDIT_LOGINUID" \ + "|_SYSTEMD_CGROUP" \ + "|_SYSTEMD_SLICE" \ + "|_SYSTEMD_UNIT" \ + "|_SYSTEMD_USER_UNIT" \ + "|_SYSTEMD_USER_SLICE" \ + "|_SYSTEMD_SESSION" \ + "|_SYSTEMD_OWNER_UID" \ + "|_SELINUX_CONTEXT" \ + /* "|_SOURCE_REALTIME_TIMESTAMP" */ \ + "|_BOOT_ID" \ + "|_MACHINE_ID" \ + /* "|_SYSTEMD_INVOCATION_ID" */ \ + "|_HOSTNAME" \ + "|_TRANSPORT" \ + "|_STREAM_ID" \ + /* "|LINE_BREAK" */ \ + "|_NAMESPACE" \ + "|_RUNTIME_SCOPE" \ + \ + \ + /* --- KERNEL JOURNAL FIELDS --- */ \ + \ + /* "|_KERNEL_DEVICE" */ \ + "|_KERNEL_SUBSYSTEM" \ + /* "|_UDEV_SYSNAME" */ \ + "|_UDEV_DEVNODE" \ + /* "|_UDEV_DEVLINK" */ \ + \ + \ + /* --- LOGGING ON BEHALF --- */ \ + \ + "|OBJECT_UID" \ + "|OBJECT_GID" \ + "|OBJECT_COMM" \ + "|OBJECT_EXE" \ + /* "|OBJECT_CMDLINE" */ \ + /* "|OBJECT_AUDIT_SESSION" */ \ + "|OBJECT_AUDIT_LOGINUID" \ + "|OBJECT_SYSTEMD_CGROUP" \ + "|OBJECT_SYSTEMD_SESSION" \ + "|OBJECT_SYSTEMD_OWNER_UID" \ + "|OBJECT_SYSTEMD_UNIT" \ + "|OBJECT_SYSTEMD_USER_UNIT" \ + \ + \ + /* --- CORE DUMPS --- */ \ + \ + "|COREDUMP_COMM" \ + "|COREDUMP_UNIT" \ + "|COREDUMP_USER_UNIT" \ + "|COREDUMP_SIGNAL_NAME" \ + "|COREDUMP_CGROUP" \ + \ + \ + /* --- DOCKER --- */ \ + \ + "|CONTAINER_ID" \ + /* "|CONTAINER_ID_FULL" */ \ + "|CONTAINER_NAME" \ + "|CONTAINER_TAG" \ + "|IMAGE_NAME" /* undocumented */ \ + /* "|CONTAINER_PARTIAL_MESSAGE" */ \ + \ + \ + /* --- NETDATA --- */ \ + \ + "|ND_NIDL_NODE" \ + "|ND_NIDL_CONTEXT" \ + "|ND_LOG_SOURCE" \ + /*"|ND_MODULE" */ \ + "|ND_ALERT_NAME" \ + "|ND_ALERT_CLASS" \ + "|ND_ALERT_COMPONENT" \ + "|ND_ALERT_TYPE" \ + \ + "" + +// ---------------------------------------------------------------------------- + +typedef struct function_query_status { + bool *cancelled; // a pointer to the cancelling boolean + usec_t stop_monotonic_ut; + + usec_t started_monotonic_ut; + + // request + SD_JOURNAL_FILE_SOURCE_TYPE source_type; + SIMPLE_PATTERN *sources; + usec_t after_ut; + usec_t before_ut; + + struct { + usec_t start_ut; + usec_t stop_ut; + } anchor; + + FACETS_ANCHOR_DIRECTION direction; + size_t entries; + usec_t if_modified_since; + bool delta; + bool tail; + bool data_only; + bool slice; + size_t sampling; + size_t filters; + usec_t last_modified; + const char *query; + const char *histogram; + + struct { + usec_t start_ut; // the starting time of the query - we start from this + usec_t stop_ut; // the ending time of the query - we stop at this + usec_t first_msg_ut; + + sd_id128_t first_msg_writer; + uint64_t first_msg_seqnum; + } query_file; + + struct { + uint32_t enable_after_samples; + uint32_t slots; + uint32_t sampled; + uint32_t unsampled; + uint32_t estimated; + } samples; + + struct { + uint32_t enable_after_samples; + uint32_t every; + uint32_t skipped; + uint32_t recalibrate; + uint32_t sampled; + uint32_t unsampled; + uint32_t estimated; + } samples_per_file; + + struct { + usec_t start_ut; + usec_t end_ut; + usec_t step_ut; + uint32_t enable_after_samples; + uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; + uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; + } samples_per_time_slot; + + // per file progress info + // size_t cached_count; + + // progress statistics + usec_t matches_setup_ut; + size_t rows_useful; + size_t rows_read; + size_t bytes_read; + size_t files_matched; + size_t file_working; +} FUNCTION_QUERY_STATUS; + +static void log_fqs(FUNCTION_QUERY_STATUS *fqs, const char *msg) { + netdata_log_error("ERROR: %s, on query " + "timeframe [%"PRIu64" - %"PRIu64"], " + "anchor [%"PRIu64" - %"PRIu64"], " + "if_modified_since %"PRIu64", " + "data_only:%s, delta:%s, tail:%s, direction:%s" + , msg + , fqs->after_ut, fqs->before_ut + , fqs->anchor.start_ut, fqs->anchor.stop_ut + , fqs->if_modified_since + , fqs->data_only ? "true" : "false" + , fqs->delta ? "true" : "false" + , fqs->tail ? "tail" : "false" + , fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward"); +} + +static inline bool netdata_systemd_journal_seek_to(sd_journal *j, usec_t timestamp) { + if(sd_journal_seek_realtime_usec(j, timestamp) < 0) { + netdata_log_error("SYSTEMD-JOURNAL: Failed to seek to %" PRIu64, timestamp); + if(sd_journal_seek_tail(j) < 0) { + netdata_log_error("SYSTEMD-JOURNAL: Failed to seek to journal's tail"); + return false; + } + } + + return true; +} + +#define JD_SOURCE_REALTIME_TIMESTAMP "_SOURCE_REALTIME_TIMESTAMP" + +// ---------------------------------------------------------------------------- +// sampling support + +static void sampling_query_init(FUNCTION_QUERY_STATUS *fqs, FACETS *facets) { + if(!fqs->sampling) + return; + + if(!fqs->slice) { + // the user is doing a full data query + // disable sampling + fqs->sampling = 0; + return; + } + + if(fqs->data_only) { + // the user is doing a data query + // disable sampling + fqs->sampling = 0; + return; + } + + if(!fqs->files_matched) { + // no files have been matched + // disable sampling + fqs->sampling = 0; + return; + } + + fqs->samples.slots = facets_histogram_slots(facets); + if(fqs->samples.slots < 2) fqs->samples.slots = 2; + if(fqs->samples.slots > SYSTEMD_JOURNAL_SAMPLING_SLOTS) + fqs->samples.slots = SYSTEMD_JOURNAL_SAMPLING_SLOTS; + + if(!fqs->after_ut || !fqs->before_ut || fqs->after_ut >= fqs->before_ut) { + // we don't have enough information for sampling + fqs->sampling = 0; + return; + } + + usec_t delta = fqs->before_ut - fqs->after_ut; + usec_t step = delta / facets_histogram_slots(facets) - 1; + if(step < 1) step = 1; + + fqs->samples_per_time_slot.start_ut = fqs->after_ut; + fqs->samples_per_time_slot.end_ut = fqs->before_ut; + fqs->samples_per_time_slot.step_ut = step; + + // the minimum number of rows to enable sampling + fqs->samples.enable_after_samples = fqs->sampling / 2; + + size_t files_matched = fqs->files_matched; + if(!files_matched) + files_matched = 1; + + // the minimum number of rows per file to enable sampling + fqs->samples_per_file.enable_after_samples = (fqs->sampling / 4) / files_matched; + if(fqs->samples_per_file.enable_after_samples < fqs->entries) + fqs->samples_per_file.enable_after_samples = fqs->entries; + + // the minimum number of rows per time slot to enable sampling + fqs->samples_per_time_slot.enable_after_samples = (fqs->sampling / 4) / fqs->samples.slots; + if(fqs->samples_per_time_slot.enable_after_samples < fqs->entries) + fqs->samples_per_time_slot.enable_after_samples = fqs->entries; +} + +static void sampling_file_init(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf __maybe_unused) { + fqs->samples_per_file.sampled = 0; + fqs->samples_per_file.unsampled = 0; + fqs->samples_per_file.estimated = 0; + fqs->samples_per_file.every = 0; + fqs->samples_per_file.skipped = 0; + fqs->samples_per_file.recalibrate = 0; +} + +static size_t sampling_file_lines_scanned_so_far(FUNCTION_QUERY_STATUS *fqs) { + size_t sampled = fqs->samples_per_file.sampled + fqs->samples_per_file.unsampled; + if(!sampled) sampled = 1; + return sampled; +} + +static void sampling_running_file_query_overlapping_timeframe_ut( + FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, + usec_t msg_ut, usec_t *after_ut, usec_t *before_ut) { + + // find the overlap of the query and file timeframes + // taking into account the first message we encountered + + usec_t oldest_ut, newest_ut; + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) { + // the first message we know (oldest) + oldest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_first_ut; + if(!oldest_ut) oldest_ut = fqs->query_file.start_ut; + + if(jf->msg_last_ut) + newest_ut = MIN(fqs->query_file.stop_ut, jf->msg_last_ut); + else if(jf->file_last_modified_ut) + newest_ut = MIN(fqs->query_file.stop_ut, jf->file_last_modified_ut); + else + newest_ut = fqs->query_file.stop_ut; + + if(msg_ut < oldest_ut) + oldest_ut = msg_ut - 1; + } + else /* BACKWARD */ { + // the latest message we know (newest) + newest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_last_ut; + if(!newest_ut) newest_ut = fqs->query_file.start_ut; + + if(jf->msg_first_ut) + oldest_ut = MAX(fqs->query_file.stop_ut, jf->msg_first_ut); + else + oldest_ut = fqs->query_file.stop_ut; + + if(newest_ut < msg_ut) + newest_ut = msg_ut + 1; + } + + *after_ut = oldest_ut; + *before_ut = newest_ut; +} + +static double sampling_running_file_query_progress_by_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, + FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { + + usec_t after_ut, before_ut, elapsed_ut; + sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut); + + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) + elapsed_ut = msg_ut - after_ut; + else + elapsed_ut = before_ut - msg_ut; + + usec_t total_ut = before_ut - after_ut; + double progress = (double)elapsed_ut / (double)total_ut; + + return progress; +} + +static usec_t sampling_running_file_query_remaining_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, + FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut, + usec_t *total_time_ut, usec_t *remaining_start_ut, + usec_t *remaining_end_ut) { + usec_t after_ut, before_ut; + sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut); + + // since we have a timestamp in msg_ut + // this timestamp can extend the overlap + if(msg_ut <= after_ut) + after_ut = msg_ut - 1; + + if(msg_ut >= before_ut) + before_ut = msg_ut + 1; + + // return the remaining duration + usec_t remaining_from_ut, remaining_to_ut; + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) { + remaining_from_ut = msg_ut; + remaining_to_ut = before_ut; + } + else { + remaining_from_ut = after_ut; + remaining_to_ut = msg_ut; + } + + usec_t remaining_ut = remaining_to_ut - remaining_from_ut; + + if(total_time_ut) + *total_time_ut = (before_ut > after_ut) ? before_ut - after_ut : 1; + + if(remaining_start_ut) + *remaining_start_ut = remaining_from_ut; + + if(remaining_end_ut) + *remaining_end_ut = remaining_to_ut; + + return remaining_ut; +} + +static size_t sampling_running_file_query_estimate_remaining_lines_by_time(FUNCTION_QUERY_STATUS *fqs, + struct journal_file *jf, + FACETS_ANCHOR_DIRECTION direction, + usec_t msg_ut) { + size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs); + + // Calculate the proportion of time covered + usec_t total_time_ut, remaining_start_ut, remaining_end_ut; + usec_t remaining_time_ut = sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut, + &remaining_start_ut, &remaining_end_ut); + if (total_time_ut == 0) total_time_ut = 1; + + double proportion_by_time = (double) (total_time_ut - remaining_time_ut) / (double) total_time_ut; + + if (proportion_by_time == 0 || proportion_by_time > 1.0 || !isfinite(proportion_by_time)) + proportion_by_time = 1.0; + + // Estimate the total number of lines in the file + size_t expected_matching_logs_by_time = (size_t)((double)scanned_lines / proportion_by_time); + + if(jf->messages_in_file && expected_matching_logs_by_time > jf->messages_in_file) + expected_matching_logs_by_time = jf->messages_in_file; + + // Calculate the estimated number of remaining lines + size_t remaining_logs_by_time = expected_matching_logs_by_time - scanned_lines; + if (remaining_logs_by_time < 1) remaining_logs_by_time = 1; + +// nd_log(NDLS_COLLECTORS, NDLP_INFO, +// "JOURNAL ESTIMATION: '%s' " +// "scanned_lines=%zu [sampled=%zu, unsampled=%zu, estimated=%zu], " +// "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], " +// "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], " +// "first message read from the file at %"PRIu64", current message at %"PRIu64", " +// "proportion of time %.2f %%, " +// "expected total lines in file %zu, " +// "remaining lines %zu, " +// "remaining time %"PRIu64" [%"PRIu64" - %"PRIu64", duration %"PRId64"]" +// , jf->filename +// , scanned_lines, fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated +// , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file +// , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut +// , fqs->query_file.first_msg_ut, msg_ut +// , proportion_by_time * 100.0 +// , expected_matching_logs_by_time +// , remaining_logs_by_time +// , remaining_time_ut, remaining_start_ut, remaining_end_ut, remaining_end_ut - remaining_start_ut +// ); + + return remaining_logs_by_time; +} + +static size_t sampling_running_file_query_estimate_remaining_lines(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { + size_t expected_matching_logs_by_seqnum = 0; + double proportion_by_seqnum = 0.0; + size_t remaining_logs_by_seqnum = 0; + +#ifdef HAVE_SD_JOURNAL_GET_SEQNUM + uint64_t current_msg_seqnum; + sd_id128_t current_msg_writer; + if(!fqs->query_file.first_msg_seqnum || sd_journal_get_seqnum(j, ¤t_msg_seqnum, ¤t_msg_writer) < 0) { + fqs->query_file.first_msg_seqnum = 0; + fqs->query_file.first_msg_writer = SD_ID128_NULL; + } + else if(jf->messages_in_file) { + size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs); + + double proportion_of_all_lines_so_far; + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) + proportion_of_all_lines_so_far = (double)scanned_lines / (double)(current_msg_seqnum - jf->first_seqnum); + else + proportion_of_all_lines_so_far = (double)scanned_lines / (double)(jf->last_seqnum - current_msg_seqnum); + + if(proportion_of_all_lines_so_far > 1.0) + proportion_of_all_lines_so_far = 1.0; + + expected_matching_logs_by_seqnum = (size_t)(proportion_of_all_lines_so_far * (double)jf->messages_in_file); + + proportion_by_seqnum = (double)scanned_lines / (double)expected_matching_logs_by_seqnum; + + if (proportion_by_seqnum == 0 || proportion_by_seqnum > 1.0 || !isfinite(proportion_by_seqnum)) + proportion_by_seqnum = 1.0; + + remaining_logs_by_seqnum = expected_matching_logs_by_seqnum - scanned_lines; + if(!remaining_logs_by_seqnum) remaining_logs_by_seqnum = 1; + } #endif -#include -#include + if(remaining_logs_by_seqnum) + return remaining_logs_by_seqnum; -#define FACET_MAX_VALUE_LENGTH 8192 + return sampling_running_file_query_estimate_remaining_lines_by_time(fqs, jf, direction, msg_ut); +} + +static void sampling_decide_file_sampling_every(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { + size_t files_matched = fqs->files_matched; + if(!files_matched) files_matched = 1; + + size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut); + size_t wanted_samples = (fqs->sampling / 2) / files_matched; + if(!wanted_samples) wanted_samples = 1; + + fqs->samples_per_file.every = remaining_lines / wanted_samples; + + if(fqs->samples_per_file.every < 1) + fqs->samples_per_file.every = 1; +} + +typedef enum { + SAMPLING_STOP_AND_ESTIMATE = -1, + SAMPLING_FULL = 0, + SAMPLING_SKIP_FIELDS = 1, +} sampling_t; + +static inline sampling_t is_row_in_sample(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction, bool candidate_to_keep) { + if(!fqs->sampling || candidate_to_keep) + return SAMPLING_FULL; + + if(unlikely(msg_ut < fqs->samples_per_time_slot.start_ut)) + msg_ut = fqs->samples_per_time_slot.start_ut; + if(unlikely(msg_ut > fqs->samples_per_time_slot.end_ut)) + msg_ut = fqs->samples_per_time_slot.end_ut; + + size_t slot = (msg_ut - fqs->samples_per_time_slot.start_ut) / fqs->samples_per_time_slot.step_ut; + if(slot >= fqs->samples.slots) + slot = fqs->samples.slots - 1; + + bool should_sample = false; + + if(fqs->samples.sampled < fqs->samples.enable_after_samples || + fqs->samples_per_file.sampled < fqs->samples_per_file.enable_after_samples || + fqs->samples_per_time_slot.sampled[slot] < fqs->samples_per_time_slot.enable_after_samples) + should_sample = true; + + else if(fqs->samples_per_file.recalibrate >= SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE || !fqs->samples_per_file.every) { + // this is the first to be unsampled for this file + sampling_decide_file_sampling_every(j, fqs, jf, direction, msg_ut); + fqs->samples_per_file.recalibrate = 0; + should_sample = true; + } + else { + // we sample 1 every fqs->samples_per_file.every + if(fqs->samples_per_file.skipped >= fqs->samples_per_file.every) { + fqs->samples_per_file.skipped = 0; + should_sample = true; + } + else + fqs->samples_per_file.skipped++; + } + + if(should_sample) { + fqs->samples.sampled++; + fqs->samples_per_file.sampled++; + fqs->samples_per_time_slot.sampled[slot]++; + + return SAMPLING_FULL; + } + + fqs->samples_per_file.recalibrate++; + + fqs->samples.unsampled++; + fqs->samples_per_file.unsampled++; + fqs->samples_per_time_slot.unsampled[slot]++; + + if(fqs->samples_per_file.unsampled > fqs->samples_per_file.sampled) { + double progress_by_time = sampling_running_file_query_progress_by_time(fqs, jf, direction, msg_ut); + + if(progress_by_time > SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE) + return SAMPLING_STOP_AND_ESTIMATE; + } + + return SAMPLING_SKIP_FIELDS; +} + +static void sampling_update_running_query_file_estimates(FACETS *facets, sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction) { + usec_t total_time_ut, remaining_start_ut, remaining_end_ut; + sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut, + &remaining_end_ut); + size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut); + facets_update_estimations(facets, remaining_start_ut, remaining_end_ut, remaining_lines); + fqs->samples.estimated += remaining_lines; + fqs->samples_per_file.estimated += remaining_lines; +} + +// ---------------------------------------------------------------------------- + +static inline size_t netdata_systemd_journal_process_row(sd_journal *j, FACETS *facets, struct journal_file *jf, usec_t *msg_ut) { + const void *data; + size_t length, bytes = 0; + + facets_add_key_value_length(facets, JOURNAL_KEY_ND_JOURNAL_FILE, sizeof(JOURNAL_KEY_ND_JOURNAL_FILE) - 1, jf->filename, jf->filename_len); + + SD_JOURNAL_FOREACH_DATA(j, data, length) { + const char *key, *value; + size_t key_length, value_length; + + if(!parse_journal_field(data, length, &key, &key_length, &value, &value_length)) + continue; + +#ifdef NETDATA_INTERNAL_CHECKS + usec_t origin_journal_ut = *msg_ut; +#endif + if(unlikely(key_length == sizeof(JD_SOURCE_REALTIME_TIMESTAMP) - 1 && + memcmp(key, JD_SOURCE_REALTIME_TIMESTAMP, sizeof(JD_SOURCE_REALTIME_TIMESTAMP) - 1) == 0)) { + usec_t ut = str2ull(value, NULL); + if(ut && ut < *msg_ut) { + usec_t delta = *msg_ut - ut; + *msg_ut = ut; + + if(delta > JOURNAL_VS_REALTIME_DELTA_MAX_UT) + delta = JOURNAL_VS_REALTIME_DELTA_MAX_UT; + + // update max_journal_vs_realtime_delta_ut if the delta increased + usec_t expected = jf->max_journal_vs_realtime_delta_ut; + do { + if(delta <= expected) + break; + } while(!__atomic_compare_exchange_n(&jf->max_journal_vs_realtime_delta_ut, &expected, delta, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); + + internal_error(delta > expected, + "increased max_journal_vs_realtime_delta_ut from %"PRIu64" to %"PRIu64", " + "journal %"PRIu64", actual %"PRIu64" (delta %"PRIu64")" + , expected, delta, origin_journal_ut, *msg_ut, origin_journal_ut - (*msg_ut)); + } + } + + bytes += length; + facets_add_key_value_length(facets, key, key_length, value, value_length <= FACET_MAX_VALUE_LENGTH ? value_length : FACET_MAX_VALUE_LENGTH); + } + + return bytes; +} + +#define FUNCTION_PROGRESS_UPDATE_ROWS(rows_read, rows) __atomic_fetch_add(&(rows_read), rows, __ATOMIC_RELAXED) +#define FUNCTION_PROGRESS_UPDATE_BYTES(bytes_read, bytes) __atomic_fetch_add(&(bytes_read), bytes, __ATOMIC_RELAXED) +#define FUNCTION_PROGRESS_EVERY_ROWS (1ULL << 13) +#define FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS (1ULL << 7) + +static inline ND_SD_JOURNAL_STATUS check_stop(const bool *cancelled, const usec_t *stop_monotonic_ut) { + if(cancelled && __atomic_load_n(cancelled, __ATOMIC_RELAXED)) { + internal_error(true, "Function has been cancelled"); + return ND_SD_JOURNAL_CANCELLED; + } + + if(now_monotonic_usec() > __atomic_load_n(stop_monotonic_ut, __ATOMIC_RELAXED)) { + internal_error(true, "Function timed out"); + return ND_SD_JOURNAL_TIMED_OUT; + } + + return ND_SD_JOURNAL_OK; +} + +ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward( + sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets, + struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { + + usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED); + + usec_t start_ut = ((fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->before_ut) + anchor_delta; + usec_t stop_ut = (fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->after_ut; + bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut); + + fqs->query_file.start_ut = start_ut; + fqs->query_file.stop_ut = stop_ut; + + if(!netdata_systemd_journal_seek_to(j, start_ut)) + return ND_SD_JOURNAL_FAILED_TO_SEEK; + + size_t errors_no_timestamp = 0; + usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far + usec_t first_msg_ut = 0; // the first message we got from the db + size_t row_counter = 0, last_row_counter = 0, rows_useful = 0; + size_t bytes = 0, last_bytes = 0; + + usec_t last_usec_from = 0; + usec_t last_usec_to = 0; + + ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_OK; + + facets_rows_begin(facets); + while (status == ND_SD_JOURNAL_OK && sd_journal_previous(j) > 0) { + usec_t msg_ut = 0; + if(sd_journal_get_realtime_usec(j, &msg_ut) < 0 || !msg_ut) { + errors_no_timestamp++; + continue; + } + + if (unlikely(msg_ut > start_ut)) + continue; + + if (unlikely(msg_ut < stop_ut)) + break; + + if(unlikely(msg_ut > latest_msg_ut)) + latest_msg_ut = msg_ut; + + if(unlikely(!first_msg_ut)) { + first_msg_ut = msg_ut; + fqs->query_file.first_msg_ut = msg_ut; + +#ifdef HAVE_SD_JOURNAL_GET_SEQNUM + if(sd_journal_get_seqnum(j, &fqs->query_file.first_msg_seqnum, &fqs->query_file.first_msg_writer) < 0) { + fqs->query_file.first_msg_seqnum = 0; + fqs->query_file.first_msg_writer = SD_ID128_NULL; + } +#endif + } + + sampling_t sample = is_row_in_sample(j, fqs, jf, msg_ut, + FACETS_ANCHOR_DIRECTION_BACKWARD, + facets_row_candidate_to_keep(facets, msg_ut)); + + if(sample == SAMPLING_FULL) { + bytes += netdata_systemd_journal_process_row(j, facets, jf, &msg_ut); + + // make sure each line gets a unique timestamp + if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to)) + msg_ut = --last_usec_from; + else + last_usec_from = last_usec_to = msg_ut; + + if(facets_row_finished(facets, msg_ut)) + rows_useful++; + + row_counter++; + if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 && + stop_when_full && + facets_rows(facets) >= fqs->entries)) { + // stop the data only query + usec_t oldest = facets_row_oldest_ut(facets); + if(oldest && msg_ut < (oldest - anchor_delta)) + break; + } + + if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) { + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); + last_row_counter = row_counter; + + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + last_bytes = bytes; + + status = check_stop(fqs->cancelled, &fqs->stop_monotonic_ut); + } + } + else if(sample == SAMPLING_SKIP_FIELDS) + facets_row_finished_unsampled(facets, msg_ut); + else { + sampling_update_running_query_file_estimates(facets, j, fqs, jf, msg_ut, FACETS_ANCHOR_DIRECTION_BACKWARD); + break; + } + } + + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + + fqs->rows_useful += rows_useful; + + if(errors_no_timestamp) + netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp); + + if(latest_msg_ut > fqs->last_modified) + fqs->last_modified = latest_msg_ut; + + return status; +} + +ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward( + sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets, + struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { + + usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED); + + usec_t start_ut = (fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->after_ut; + usec_t stop_ut = ((fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->before_ut) + anchor_delta; + bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut); + + fqs->query_file.start_ut = start_ut; + fqs->query_file.stop_ut = stop_ut; + + if(!netdata_systemd_journal_seek_to(j, start_ut)) + return ND_SD_JOURNAL_FAILED_TO_SEEK; + + size_t errors_no_timestamp = 0; + usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far + usec_t first_msg_ut = 0; // the first message we got from the db + size_t row_counter = 0, last_row_counter = 0, rows_useful = 0; + size_t bytes = 0, last_bytes = 0; + + usec_t last_usec_from = 0; + usec_t last_usec_to = 0; + + ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_OK; + + facets_rows_begin(facets); + while (status == ND_SD_JOURNAL_OK && sd_journal_next(j) > 0) { + usec_t msg_ut = 0; + if(sd_journal_get_realtime_usec(j, &msg_ut) < 0 || !msg_ut) { + errors_no_timestamp++; + continue; + } + + if (unlikely(msg_ut < start_ut)) + continue; + + if (unlikely(msg_ut > stop_ut)) + break; + + if(likely(msg_ut > latest_msg_ut)) + latest_msg_ut = msg_ut; + + if(unlikely(!first_msg_ut)) { + first_msg_ut = msg_ut; + fqs->query_file.first_msg_ut = msg_ut; + } + + sampling_t sample = is_row_in_sample(j, fqs, jf, msg_ut, + FACETS_ANCHOR_DIRECTION_FORWARD, + facets_row_candidate_to_keep(facets, msg_ut)); + + if(sample == SAMPLING_FULL) { + bytes += netdata_systemd_journal_process_row(j, facets, jf, &msg_ut); + + // make sure each line gets a unique timestamp + if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to)) + msg_ut = ++last_usec_to; + else + last_usec_from = last_usec_to = msg_ut; + + if(facets_row_finished(facets, msg_ut)) + rows_useful++; + + row_counter++; + if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 && + stop_when_full && + facets_rows(facets) >= fqs->entries)) { + // stop the data only query + usec_t newest = facets_row_newest_ut(facets); + if(newest && msg_ut > (newest + anchor_delta)) + break; + } + + if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) { + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); + last_row_counter = row_counter; + + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + last_bytes = bytes; + + status = check_stop(fqs->cancelled, &fqs->stop_monotonic_ut); + } + } + else if(sample == SAMPLING_SKIP_FIELDS) + facets_row_finished_unsampled(facets, msg_ut); + else { + sampling_update_running_query_file_estimates(facets, j, fqs, jf, msg_ut, FACETS_ANCHOR_DIRECTION_FORWARD); + break; + } + } + + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + + fqs->rows_useful += rows_useful; + + if(errors_no_timestamp) + netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp); + + if(latest_msg_ut > fqs->last_modified) + fqs->last_modified = latest_msg_ut; + + return status; +} + +bool netdata_systemd_journal_check_if_modified_since(sd_journal *j, usec_t seek_to, usec_t last_modified) { + // return true, if data have been modified since the timestamp + + if(!last_modified || !seek_to) + return false; + + if(!netdata_systemd_journal_seek_to(j, seek_to)) + return false; + + usec_t first_msg_ut = 0; + while (sd_journal_previous(j) > 0) { + usec_t msg_ut; + if(sd_journal_get_realtime_usec(j, &msg_ut) < 0) + continue; + + first_msg_ut = msg_ut; + break; + } + + return first_msg_ut != last_modified; +} + +#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS +static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) { + const char *field = NULL; + const void *data = NULL; + size_t data_length; + size_t added_keys = 0; + size_t failures = 0; + size_t filters_added = 0; + + SD_JOURNAL_FOREACH_FIELD(j, field) { // for each key + bool interesting; + + if(fqs->data_only) + interesting = facets_key_name_is_filter(facets, field); + else + interesting = facets_key_name_is_facet(facets, field); + + if(interesting) { + if(sd_journal_query_unique(j, field) >= 0) { + bool added_this_key = false; + size_t added_values = 0; + + SD_JOURNAL_FOREACH_UNIQUE(j, data, data_length) { // for each value of the key + const char *key, *value; + size_t key_length, value_length; + + if(!parse_journal_field(data, data_length, &key, &key_length, &value, &value_length)) + continue; + + facets_add_possible_value_name_to_key(facets, key, key_length, value, value_length); + + if(!facets_key_name_value_length_is_selected(facets, key, key_length, value, value_length)) + continue; + + if(added_keys && !added_this_key) { + if(sd_journal_add_conjunction(j) < 0) // key AND key AND key + failures++; + + added_this_key = true; + added_keys++; + } + else if(added_values) + if(sd_journal_add_disjunction(j) < 0) // value OR value OR value + failures++; + + if(sd_journal_add_match(j, data, data_length) < 0) + failures++; + + if(!added_keys) { + added_keys++; + added_this_key = true; + } + + added_values++; + filters_added++; + } + } + } + } + + if(failures) { + log_fqs(fqs, "failed to setup journal filter, will run the full query."); + sd_journal_flush_matches(j); + return true; + } + + return filters_added ? true : false; +} +#endif // HAVE_SD_JOURNAL_RESTART_FIELDS + +static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file( + const char *filename, BUFFER *wb, FACETS *facets, + struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { + + sd_journal *j = NULL; + errno = 0; + + fstat_cache_enable_on_thread(); + + const char *paths[2] = { + [0] = filename, + [1] = NULL, + }; + + if(sd_journal_open_files(&j, paths, ND_SD_JOURNAL_OPEN_FLAGS) < 0 || !j) { + netdata_log_error("JOURNAL: cannot open file '%s' for query", filename); + fstat_cache_disable_on_thread(); + return ND_SD_JOURNAL_FAILED_TO_OPEN; + } + + ND_SD_JOURNAL_STATUS status; + bool matches_filters = true; + +#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS + if(fqs->slice) { + usec_t started = now_monotonic_usec(); + + matches_filters = netdata_systemd_filtering_by_journal(j, facets, fqs) || !fqs->filters; + usec_t ended = now_monotonic_usec(); + + fqs->matches_setup_ut += (ended - started); + } +#endif // HAVE_SD_JOURNAL_RESTART_FIELDS + + if(matches_filters) { + if(fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD) + status = netdata_systemd_journal_query_forward(j, wb, facets, jf, fqs); + else + status = netdata_systemd_journal_query_backward(j, wb, facets, jf, fqs); + } + else + status = ND_SD_JOURNAL_NO_FILE_MATCHED; + + sd_journal_close(j); + fstat_cache_disable_on_thread(); + + return status; +} + +static bool jf_is_mine(struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { + + if((fqs->source_type == SDJF_NONE && !fqs->sources) || (jf->source_type & fqs->source_type) || + (fqs->sources && simple_pattern_matches(fqs->sources, string2str(jf->source)))) { + + if(!jf->msg_last_ut || !jf->msg_last_ut) + // the file is not scanned yet, or the timestamps have not been updated, + // so we don't know if it can contribute or not - let's add it. + return true; + + usec_t anchor_delta = JOURNAL_VS_REALTIME_DELTA_MAX_UT; + usec_t first_ut = jf->msg_first_ut - anchor_delta; + usec_t last_ut = jf->msg_last_ut + anchor_delta; + + if(last_ut >= fqs->after_ut && first_ut <= fqs->before_ut) + return true; + } + + return false; +} + +static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) { + ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_NO_FILE_MATCHED; + struct journal_file *jf; + + fqs->files_matched = 0; + fqs->file_working = 0; + fqs->rows_useful = 0; + fqs->rows_read = 0; + fqs->bytes_read = 0; + + size_t files_used = 0; + size_t files_max = dictionary_entries(journal_files_registry); + const DICTIONARY_ITEM *file_items[files_max]; + + // count the files + bool files_are_newer = false; + dfe_start_read(journal_files_registry, jf) { + if(!jf_is_mine(jf, fqs)) + continue; + + file_items[files_used++] = dictionary_acquired_item_dup(journal_files_registry, jf_dfe.item); + + if(jf->msg_last_ut > fqs->if_modified_since) + files_are_newer = true; + } + dfe_done(jf); + + fqs->files_matched = files_used; + + if(fqs->if_modified_since && !files_are_newer) { + buffer_flush(wb); + return HTTP_RESP_NOT_MODIFIED; + } -#define SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION "View, search and analyze systemd journal entries." -#define SYSTEMD_JOURNAL_FUNCTION_NAME "systemd-journal" -#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 30 -#define SYSTEMD_JOURNAL_MAX_PARAMS 100 -#define SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION (3 * 3600) -#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY 200 + // sort the files, so that they are optimal for facets + if(files_used >= 2) { + if (fqs->direction == FACETS_ANCHOR_DIRECTION_BACKWARD) + qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *), + journal_file_dict_items_backward_compar); + else + qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *), + journal_file_dict_items_forward_compar); + } -#define JOURNAL_PARAMETER_HELP "help" -#define JOURNAL_PARAMETER_AFTER "after" -#define JOURNAL_PARAMETER_BEFORE "before" -#define JOURNAL_PARAMETER_ANCHOR "anchor" -#define JOURNAL_PARAMETER_LAST "last" -#define JOURNAL_PARAMETER_QUERY "query" + bool partial = false; + usec_t query_started_ut = now_monotonic_usec(); + usec_t started_ut = query_started_ut; + usec_t ended_ut = started_ut; + usec_t duration_ut = 0, max_duration_ut = 0; -#define SYSTEMD_ALWAYS_VISIBLE_KEYS NULL -#define SYSTEMD_KEYS_EXCLUDED_FROM_FACETS NULL -#define SYSTEMD_KEYS_INCLUDED_IN_FACETS \ - "_TRANSPORT" \ - "|SYSLOG_IDENTIFIER" \ - "|SYSLOG_FACILITY" \ - "|PRIORITY" \ - "|_HOSTNAME" \ - "|_RUNTIME_SCOPE" \ - "|_PID" \ - "|_UID" \ - "|_GID" \ - "|_SYSTEMD_UNIT" \ - "|_SYSTEMD_SLICE" \ - "|_SYSTEMD_USER_SLICE" \ - "|_COMM" \ - "|_EXE" \ - "|_SYSTEMD_CGROUP" \ - "|_SYSTEMD_USER_UNIT" \ - "|USER_UNIT" \ - "|UNIT" \ - "" + sampling_query_init(fqs, facets); -static netdata_mutex_t mutex = NETDATA_MUTEX_INITIALIZER; -static bool plugin_should_exit = false; + buffer_json_member_add_array(wb, "_journal_files"); + for(size_t f = 0; f < files_used ;f++) { + const char *filename = dictionary_acquired_item_name(file_items[f]); + jf = dictionary_acquired_item_value(file_items[f]); -DICTIONARY *uids = NULL; -DICTIONARY *gids = NULL; + if(!jf_is_mine(jf, fqs)) + continue; + started_ut = ended_ut; -// ---------------------------------------------------------------------------- + // do not even try to do the query if we expect it to pass the timeout + if(ended_ut > (query_started_ut + (fqs->stop_monotonic_ut - query_started_ut) * 3 / 4) && + ended_ut + max_duration_ut * 2 >= fqs->stop_monotonic_ut) { -int systemd_journal_query(BUFFER *wb, FACETS *facets, usec_t after_ut, usec_t before_ut, usec_t stop_monotonic_ut) { - sd_journal *j; - int r; + partial = true; + status = ND_SD_JOURNAL_TIMED_OUT; + break; + } - // Open the system journal for reading - r = sd_journal_open(&j, JOURNAL_NAMESPACE); - if (r < 0) - return HTTP_RESP_INTERNAL_SERVER_ERROR; + fqs->file_working++; + // fqs->cached_count = 0; + + size_t fs_calls = fstat_thread_calls; + size_t fs_cached = fstat_thread_cached_responses; + size_t rows_useful = fqs->rows_useful; + size_t rows_read = fqs->rows_read; + size_t bytes_read = fqs->bytes_read; + size_t matches_setup_ut = fqs->matches_setup_ut; + + sampling_file_init(fqs, jf); + + ND_SD_JOURNAL_STATUS tmp_status = netdata_systemd_journal_query_one_file(filename, wb, facets, jf, fqs); + +// nd_log(NDLS_COLLECTORS, NDLP_INFO, +// "JOURNAL ESTIMATION FINAL: '%s' " +// "total lines %zu [sampled=%zu, unsampled=%zu, estimated=%zu], " +// "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], " +// "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], " +// , jf->filename +// , fqs->samples_per_file.sampled + fqs->samples_per_file.unsampled + fqs->samples_per_file.estimated +// , fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated +// , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file +// , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut +// ); + + rows_useful = fqs->rows_useful - rows_useful; + rows_read = fqs->rows_read - rows_read; + bytes_read = fqs->bytes_read - bytes_read; + matches_setup_ut = fqs->matches_setup_ut - matches_setup_ut; + fs_calls = fstat_thread_calls - fs_calls; + fs_cached = fstat_thread_cached_responses - fs_cached; + + ended_ut = now_monotonic_usec(); + duration_ut = ended_ut - started_ut; + + if(duration_ut > max_duration_ut) + max_duration_ut = duration_ut; + + buffer_json_add_array_item_object(wb); // journal file + { + // information about the file + buffer_json_member_add_string(wb, "_filename", filename); + buffer_json_member_add_uint64(wb, "_source_type", jf->source_type); + buffer_json_member_add_string(wb, "_source", string2str(jf->source)); + buffer_json_member_add_uint64(wb, "_last_modified_ut", jf->file_last_modified_ut); + buffer_json_member_add_uint64(wb, "_msg_first_ut", jf->msg_first_ut); + buffer_json_member_add_uint64(wb, "_msg_last_ut", jf->msg_last_ut); + buffer_json_member_add_uint64(wb, "_journal_vs_realtime_delta_ut", jf->max_journal_vs_realtime_delta_ut); + + // information about the current use of the file + buffer_json_member_add_uint64(wb, "duration_ut", ended_ut - started_ut); + buffer_json_member_add_uint64(wb, "rows_read", rows_read); + buffer_json_member_add_uint64(wb, "rows_useful", rows_useful); + buffer_json_member_add_double(wb, "rows_per_second", (double) rows_read / (double) duration_ut * (double) USEC_PER_SEC); + buffer_json_member_add_uint64(wb, "bytes_read", bytes_read); + buffer_json_member_add_double(wb, "bytes_per_second", (double) bytes_read / (double) duration_ut * (double) USEC_PER_SEC); + buffer_json_member_add_uint64(wb, "duration_matches_ut", matches_setup_ut); + buffer_json_member_add_uint64(wb, "fstat_query_calls", fs_calls); + buffer_json_member_add_uint64(wb, "fstat_query_cached_responses", fs_cached); + + if(fqs->sampling) { + buffer_json_member_add_object(wb, "_sampling"); + { + buffer_json_member_add_uint64(wb, "sampled", fqs->samples_per_file.sampled); + buffer_json_member_add_uint64(wb, "unsampled", fqs->samples_per_file.unsampled); + buffer_json_member_add_uint64(wb, "estimated", fqs->samples_per_file.estimated); + } + buffer_json_object_close(wb); // _sampling + } + } + buffer_json_object_close(wb); // journal file - facets_rows_begin(facets); + bool stop = false; + switch(tmp_status) { + case ND_SD_JOURNAL_OK: + case ND_SD_JOURNAL_NO_FILE_MATCHED: + status = (status == ND_SD_JOURNAL_OK) ? ND_SD_JOURNAL_OK : tmp_status; + break; - bool timed_out = false; - size_t row_counter = 0; - sd_journal_seek_realtime_usec(j, before_ut); - SD_JOURNAL_FOREACH_BACKWARDS(j) { - row_counter++; + case ND_SD_JOURNAL_FAILED_TO_OPEN: + case ND_SD_JOURNAL_FAILED_TO_SEEK: + partial = true; + if(status == ND_SD_JOURNAL_NO_FILE_MATCHED) + status = tmp_status; + break; - uint64_t msg_ut; - sd_journal_get_realtime_usec(j, &msg_ut); - if (msg_ut < after_ut) + case ND_SD_JOURNAL_CANCELLED: + case ND_SD_JOURNAL_TIMED_OUT: + partial = true; + stop = true; + status = tmp_status; break; - const void *data; - size_t length; - SD_JOURNAL_FOREACH_DATA(j, data, length) { - const char *key = data; - const char *equal = strchr(key, '='); - if(unlikely(!equal)) - continue; + case ND_SD_JOURNAL_NOT_MODIFIED: + internal_fatal(true, "this should never be returned here"); + break; + } - const char *value = ++equal; - size_t key_length = value - key; // including '\0' + if(stop) + break; + } + buffer_json_array_close(wb); // _journal_files - char key_copy[key_length]; - memcpy(key_copy, key, key_length - 1); - key_copy[key_length - 1] = '\0'; + // release the files + for(size_t f = 0; f < files_used ;f++) + dictionary_acquired_item_release(journal_files_registry, file_items[f]); - size_t value_length = length - key_length; // without '\0' - facets_add_key_value_length(facets, key_copy, value, value_length <= FACET_MAX_VALUE_LENGTH ? value_length : FACET_MAX_VALUE_LENGTH); + switch (status) { + case ND_SD_JOURNAL_OK: + if(fqs->if_modified_since && !fqs->rows_useful) { + buffer_flush(wb); + return HTTP_RESP_NOT_MODIFIED; } + break; - facets_row_finished(facets, msg_ut); + case ND_SD_JOURNAL_TIMED_OUT: + case ND_SD_JOURNAL_NO_FILE_MATCHED: + break; - if((row_counter % 100) == 0 && now_monotonic_usec() > stop_monotonic_ut) { - timed_out = true; - break; - } - } + case ND_SD_JOURNAL_CANCELLED: + buffer_flush(wb); + return HTTP_RESP_CLIENT_CLOSED_REQUEST; - sd_journal_close(j); + case ND_SD_JOURNAL_NOT_MODIFIED: + buffer_flush(wb); + return HTTP_RESP_NOT_MODIFIED; + + default: + case ND_SD_JOURNAL_FAILED_TO_OPEN: + case ND_SD_JOURNAL_FAILED_TO_SEEK: + buffer_flush(wb); + return HTTP_RESP_INTERNAL_SERVER_ERROR; + } buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); - buffer_json_member_add_boolean(wb, "partial", timed_out); + buffer_json_member_add_boolean(wb, "partial", partial); buffer_json_member_add_string(wb, "type", "table"); - buffer_json_member_add_time_t(wb, "update_every", 1); - buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION); - facets_report(facets, wb); + // build a message for the query + if(!fqs->data_only) { + CLEAN_BUFFER *msg = buffer_create(0, NULL); + CLEAN_BUFFER *msg_description = buffer_create(0, NULL); + ND_LOG_FIELD_PRIORITY msg_priority = NDLP_INFO; + + if(!journal_files_completed_once()) { + buffer_strcat(msg, "Journals are still being scanned. "); + buffer_strcat(msg_description + , "LIBRARY SCAN: The journal files are still being scanned, you are probably viewing incomplete data. "); + msg_priority = NDLP_WARNING; + } + + if(partial) { + buffer_strcat(msg, "Query timed-out, incomplete data. "); + buffer_strcat(msg_description + , "QUERY TIMEOUT: The query timed out and may not include all the data of the selected window. "); + msg_priority = NDLP_WARNING; + } + + if(fqs->samples.estimated || fqs->samples.unsampled) { + double percent = (double) (fqs->samples.sampled * 100.0 / + (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled)); + buffer_sprintf(msg, "%.2f%% real data", percent); + buffer_sprintf(msg_description, "ACTUAL DATA: The filters counters reflect %0.2f%% of the data. ", percent); + msg_priority = MIN(msg_priority, NDLP_NOTICE); + } + + if(fqs->samples.unsampled) { + double percent = (double) (fqs->samples.unsampled * 100.0 / + (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled)); + buffer_sprintf(msg, ", %.2f%% unsampled", percent); + buffer_sprintf(msg_description + , "UNSAMPLED DATA: %0.2f%% of the events exist and have been counted, but their values have not been evaluated, so they are not included in the filters counters. " + , percent); + msg_priority = MIN(msg_priority, NDLP_NOTICE); + } + + if(fqs->samples.estimated) { + double percent = (double) (fqs->samples.estimated * 100.0 / + (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled)); + buffer_sprintf(msg, ", %.2f%% estimated", percent); + buffer_sprintf(msg_description + , "ESTIMATED DATA: The query selected a large amount of data, so to avoid delaying too much, the presented data are estimated by %0.2f%%. " + , percent); + msg_priority = MIN(msg_priority, NDLP_NOTICE); + } + + buffer_json_member_add_object(wb, "message"); + if(buffer_tostring(msg)) { + buffer_json_member_add_string(wb, "title", buffer_tostring(msg)); + buffer_json_member_add_string(wb, "description", buffer_tostring(msg_description)); + buffer_json_member_add_string(wb, "status", nd_log_id2priority(msg_priority)); + } + // else send an empty object if there is nothing to tell + buffer_json_object_close(wb); // message + } + + if(!fqs->data_only) { + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION); + } + + if(!fqs->data_only || fqs->tail) + buffer_json_member_add_uint64(wb, "last_modified", fqs->last_modified); + + facets_sort_and_reorder_keys(facets); + facets_report(facets, wb, used_hashes_registry); + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + (fqs->data_only ? 3600 : 0)); + + buffer_json_member_add_object(wb, "_fstat_caching"); + { + buffer_json_member_add_uint64(wb, "calls", fstat_thread_calls); + buffer_json_member_add_uint64(wb, "cached", fstat_thread_cached_responses); + } + buffer_json_object_close(wb); // _fstat_caching + + if(fqs->sampling) { + buffer_json_member_add_object(wb, "_sampling"); + { + buffer_json_member_add_uint64(wb, "sampled", fqs->samples.sampled); + buffer_json_member_add_uint64(wb, "unsampled", fqs->samples.unsampled); + buffer_json_member_add_uint64(wb, "estimated", fqs->samples.estimated); + } + buffer_json_object_close(wb); // _sampling + } - buffer_json_member_add_time_t(wb, "expires", now_realtime_sec()); buffer_json_finalize(wb); return HTTP_RESP_OK; } -static void systemd_journal_function_help(const char *transaction) { - pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600); - fprintf(stdout, +static void netdata_systemd_journal_function_help(const char *transaction) { + BUFFER *wb = buffer_create(0, NULL); + buffer_sprintf(wb, "%s / %s\n" "\n" "%s\n" "\n" - "The following filters are supported:\n" + "The following parameters are supported:\n" "\n" - " help\n" + " "JOURNAL_PARAMETER_HELP"\n" " Shows this help message.\n" "\n" - " before:TIMESTAMP\n" + " "JOURNAL_PARAMETER_INFO"\n" + " Request initial configuration information about the plugin.\n" + " The key entity returned is the required_params array, which includes\n" + " all the available systemd journal sources.\n" + " When `"JOURNAL_PARAMETER_INFO"` is requested, all other parameters are ignored.\n" + "\n" + " "JOURNAL_PARAMETER_ID":STRING\n" + " Caller supplied unique ID of the request.\n" + " This can be used later to request a progress report of the query.\n" + " Optional, but if omitted no `"JOURNAL_PARAMETER_PROGRESS"` can be requested.\n" + "\n" + " "JOURNAL_PARAMETER_PROGRESS"\n" + " Request a progress report (the `id` of a running query is required).\n" + " When `"JOURNAL_PARAMETER_PROGRESS"` is requested, only parameter `"JOURNAL_PARAMETER_ID"` is used.\n" + "\n" + " "JOURNAL_PARAMETER_DATA_ONLY":true or "JOURNAL_PARAMETER_DATA_ONLY":false\n" + " Quickly respond with data requested, without generating a\n" + " `histogram`, `facets` counters and `items`.\n" + "\n" + " "JOURNAL_PARAMETER_DELTA":true or "JOURNAL_PARAMETER_DELTA":false\n" + " When doing data only queries, include deltas for histogram, facets and items.\n" + "\n" + " "JOURNAL_PARAMETER_TAIL":true or "JOURNAL_PARAMETER_TAIL":false\n" + " When doing data only queries, respond with the newest messages,\n" + " and up to the anchor, but calculate deltas (if requested) for\n" + " the duration [anchor - before].\n" + "\n" + " "JOURNAL_PARAMETER_SLICE":true or "JOURNAL_PARAMETER_SLICE":false\n" + " When it is turned on, the plugin is executing filtering via libsystemd,\n" + " utilizing all the available indexes of the journal files.\n" + " When it is off, only the time constraint is handled by libsystemd and\n" + " all filtering is done by the plugin.\n" + " The default is: %s\n" + "\n" + " "JOURNAL_PARAMETER_SOURCE":SOURCE\n" + " Query only the specified journal sources.\n" + " Do an `"JOURNAL_PARAMETER_INFO"` query to find the sources.\n" + "\n" + " "JOURNAL_PARAMETER_BEFORE":TIMESTAMP_IN_SECONDS\n" " Absolute or relative (to now) timestamp in seconds, to start the query.\n" " The query is always executed from the most recent to the oldest log entry.\n" " If not given the default is: now.\n" "\n" - " after:TIMESTAMP\n" + " "JOURNAL_PARAMETER_AFTER":TIMESTAMP_IN_SECONDS\n" " Absolute or relative (to `before`) timestamp in seconds, to end the query.\n" " If not given, the default is %d.\n" "\n" - " last:ITEMS\n" + " "JOURNAL_PARAMETER_LAST":ITEMS\n" " The number of items to return.\n" " The default is %d.\n" "\n" - " anchor:NUMBER\n" - " The `timestamp` of the item last received, to return log entries after that.\n" - " If not given, the query will return the top `ITEMS` from the most recent.\n" + " "JOURNAL_PARAMETER_SAMPLING":ITEMS\n" + " The number of log entries to sample to estimate facets counters and histogram.\n" + " The default is %d.\n" + "\n" + " "JOURNAL_PARAMETER_ANCHOR":TIMESTAMP_IN_MICROSECONDS\n" + " Return items relative to this timestamp.\n" + " The exact items to be returned depend on the query `"JOURNAL_PARAMETER_DIRECTION"`.\n" + "\n" + " "JOURNAL_PARAMETER_DIRECTION":forward or "JOURNAL_PARAMETER_DIRECTION":backward\n" + " When set to `backward` (default) the items returned are the newest before the\n" + " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_BEFORE"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n" + " When set to `forward` the items returned are the oldest after the\n" + " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_AFTER"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n" + " The default is: %s\n" + "\n" + " "JOURNAL_PARAMETER_QUERY":SIMPLE_PATTERN\n" + " Do a full text search to find the log entries matching the pattern given.\n" + " The plugin is searching for matches on all fields of the database.\n" + "\n" + " "JOURNAL_PARAMETER_IF_MODIFIED_SINCE":TIMESTAMP_IN_MICROSECONDS\n" + " Each successful response, includes a `last_modified` field.\n" + " By providing the timestamp to the `"JOURNAL_PARAMETER_IF_MODIFIED_SINCE"` parameter,\n" + " the plugin will return 200 with a successful response, or 304 if the source has not\n" + " been modified since that timestamp.\n" + "\n" + " "JOURNAL_PARAMETER_HISTOGRAM":facet_id\n" + " Use the given `facet_id` for the histogram.\n" + " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n" + "\n" + " "JOURNAL_PARAMETER_FACETS":facet_id1,facet_id2,facet_id3,...\n" + " Add the given facets to the list of fields for which analysis is required.\n" + " The plugin will offer both a histogram and facet value counters for its values.\n" + " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n" "\n" " facet_id:value_id1,value_id2,value_id3,...\n" " Apply filters to the query, based on the facet IDs returned.\n" " Each `facet_id` can be given once, but multiple `facet_ids` can be given.\n" "\n" - "Filters can be combined. Each filter can be given only one time.\n" , program_name , SYSTEMD_JOURNAL_FUNCTION_NAME , SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION + , JOURNAL_DEFAULT_SLICE_MODE ? "true" : "false" // slice , -SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION , SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY + , SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING + , JOURNAL_DEFAULT_DIRECTION == FACETS_ANCHOR_DIRECTION_BACKWARD ? "backward" : "forward" ); - pluginsd_function_result_end_to_stdout(); -} - -static const char *syslog_facility_to_name(int facility) { - switch (facility) { - case LOG_FAC(LOG_KERN): return "kern"; - case LOG_FAC(LOG_USER): return "user"; - case LOG_FAC(LOG_MAIL): return "mail"; - case LOG_FAC(LOG_DAEMON): return "daemon"; - case LOG_FAC(LOG_AUTH): return "auth"; - case LOG_FAC(LOG_SYSLOG): return "syslog"; - case LOG_FAC(LOG_LPR): return "lpr"; - case LOG_FAC(LOG_NEWS): return "news"; - case LOG_FAC(LOG_UUCP): return "uucp"; - case LOG_FAC(LOG_CRON): return "cron"; - case LOG_FAC(LOG_AUTHPRIV): return "authpriv"; - case LOG_FAC(LOG_FTP): return "ftp"; - case LOG_FAC(LOG_LOCAL0): return "local0"; - case LOG_FAC(LOG_LOCAL1): return "local1"; - case LOG_FAC(LOG_LOCAL2): return "local2"; - case LOG_FAC(LOG_LOCAL3): return "local3"; - case LOG_FAC(LOG_LOCAL4): return "local4"; - case LOG_FAC(LOG_LOCAL5): return "local5"; - case LOG_FAC(LOG_LOCAL6): return "local6"; - case LOG_FAC(LOG_LOCAL7): return "local7"; - default: return NULL; - } -} - -static const char *syslog_priority_to_name(int priority) { - switch (priority) { - case LOG_ALERT: return "alert"; - case LOG_CRIT: return "critical"; - case LOG_DEBUG: return "debug"; - case LOG_EMERG: return "panic"; - case LOG_ERR: return "error"; - case LOG_INFO: return "info"; - case LOG_NOTICE: return "notice"; - case LOG_WARNING: return "warning"; - default: return NULL; - } -} - -static char *uid_to_username(uid_t uid, char *buffer, size_t buffer_size) { - struct passwd pw, *result; - char tmp[1024 + 1]; - - if (getpwuid_r(uid, &pw, tmp, 1024, &result) != 0 || result == NULL) - return NULL; - - strncpy(buffer, pw.pw_name, buffer_size - 1); - buffer[buffer_size - 1] = '\0'; // Null-terminate just in case - return buffer; -} -static char *gid_to_groupname(gid_t gid, char* buffer, size_t buffer_size) { - struct group grp, *result; - char tmp[1024 + 1]; + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + netdata_mutex_unlock(&stdout_mutex); - if (getgrgid_r(gid, &grp, tmp, 1024, &result) != 0 || result == NULL) - return NULL; - - strncpy(buffer, grp.gr_name, buffer_size - 1); - buffer[buffer_size - 1] = '\0'; // Null-terminate just in case - return buffer; + buffer_free(wb); } -static void systemd_journal_transform_syslog_facility(FACETS *facets __maybe_unused, BUFFER *wb, void *data __maybe_unused) { - const char *v = buffer_tostring(wb); - if(*v && isdigit(*v)) { - int facility = str2i(buffer_tostring(wb)); - const char *name = syslog_facility_to_name(facility); - if (name) { - buffer_flush(wb); - buffer_strcat(wb, name); - } - } -} +DICTIONARY *function_query_status_dict = NULL; -static void systemd_journal_transform_priority(FACETS *facets __maybe_unused, BUFFER *wb, void *data __maybe_unused) { - const char *v = buffer_tostring(wb); - if(*v && isdigit(*v)) { - int priority = str2i(buffer_tostring(wb)); - const char *name = syslog_priority_to_name(priority); - if (name) { - buffer_flush(wb); - buffer_strcat(wb, name); - } +static void function_systemd_journal_progress(BUFFER *wb, const char *transaction, const char *progress_id) { + if(!progress_id || !(*progress_id)) { + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, "missing progress id"); + netdata_mutex_unlock(&stdout_mutex); + return; } -} - -static void systemd_journal_transform_uid(FACETS *facets __maybe_unused, BUFFER *wb, void *data) { - DICTIONARY *cache = data; - const char *v = buffer_tostring(wb); - if(*v && isdigit(*v)) { - const char *sv = dictionary_get(cache, v); - if(!sv) { - char buf[1024 + 1]; - int uid = str2i(buffer_tostring(wb)); - const char *name = uid_to_username(uid, buf, 1024); - if (!name) - name = v; - sv = dictionary_set(cache, v, (void *)name, strlen(name) + 1); - } + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(function_query_status_dict, progress_id); - buffer_flush(wb); - buffer_strcat(wb, sv); + if(!item) { + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_NOT_FOUND, "progress id is not found here"); + netdata_mutex_unlock(&stdout_mutex); + return; } -} - -static void systemd_journal_transform_gid(FACETS *facets __maybe_unused, BUFFER *wb, void *data) { - DICTIONARY *cache = data; - const char *v = buffer_tostring(wb); - if(*v && isdigit(*v)) { - const char *sv = dictionary_get(cache, v); - if(!sv) { - char buf[1024 + 1]; - int gid = str2i(buffer_tostring(wb)); - const char *name = gid_to_groupname(gid, buf, 1024); - if (!name) - name = v; - sv = dictionary_set(cache, v, (void *)name, strlen(name) + 1); - } + FUNCTION_QUERY_STATUS *fqs = dictionary_acquired_item_value(item); - buffer_flush(wb); - buffer_strcat(wb, sv); - } -} + usec_t now_monotonic_ut = now_monotonic_usec(); + if(now_monotonic_ut + 10 * USEC_PER_SEC > fqs->stop_monotonic_ut) + fqs->stop_monotonic_ut = now_monotonic_ut + 10 * USEC_PER_SEC; -static void systemd_journal_dynamic_row_id(FACETS *facets __maybe_unused, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row, void *data __maybe_unused) { - FACET_ROW_KEY_VALUE *pid_rkv = dictionary_get(row->dict, "_PID"); - const char *pid = pid_rkv ? buffer_tostring(pid_rkv->wb) : FACET_VALUE_UNSET; + usec_t duration_ut = now_monotonic_ut - fqs->started_monotonic_ut; - FACET_ROW_KEY_VALUE *syslog_identifier_rkv = dictionary_get(row->dict, "SYSLOG_IDENTIFIER"); - const char *identifier = syslog_identifier_rkv ? buffer_tostring(syslog_identifier_rkv->wb) : FACET_VALUE_UNSET; + size_t files_matched = fqs->files_matched; + size_t file_working = fqs->file_working; + if(file_working > files_matched) + files_matched = file_working; - if(strcmp(identifier, FACET_VALUE_UNSET) == 0) { - FACET_ROW_KEY_VALUE *comm_rkv = dictionary_get(row->dict, "_COMM"); - identifier = comm_rkv ? buffer_tostring(comm_rkv->wb) : FACET_VALUE_UNSET; - } + size_t rows_read = __atomic_load_n(&fqs->rows_read, __ATOMIC_RELAXED); + size_t bytes_read = __atomic_load_n(&fqs->bytes_read, __ATOMIC_RELAXED); - buffer_flush(rkv->wb); + buffer_flush(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_uint64(wb, "running_duration_usec", duration_ut); + buffer_json_member_add_double(wb, "progress", (double)file_working * 100.0 / (double)files_matched); + char msg[1024 + 1]; + snprintfz(msg, sizeof(msg) - 1, + "Read %zu rows (%0.0f rows/s), " + "data %0.1f MB (%0.1f MB/s), " + "file %zu of %zu", + rows_read, (double)rows_read / (double)duration_ut * (double)USEC_PER_SEC, + (double)bytes_read / 1024.0 / 1024.0, ((double)bytes_read / (double)duration_ut * (double)USEC_PER_SEC) / 1024.0 / 1024.0, + file_working, files_matched + ); + buffer_json_member_add_string(wb, "message", msg); + buffer_json_finalize(wb); - if(strcmp(pid, FACET_VALUE_UNSET) == 0) - buffer_strcat(rkv->wb, identifier); - else - buffer_sprintf(rkv->wb, "%s[%s]", identifier, pid); + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_realtime_sec() + 1, wb); + netdata_mutex_unlock(&stdout_mutex); - buffer_json_add_array_item_string(json_array, buffer_tostring(rkv->wb)); + dictionary_acquired_item_release(function_query_status_dict, item); } -static void function_systemd_journal(const char *transaction, char *function, char *line_buffer __maybe_unused, int line_max __maybe_unused, int timeout __maybe_unused) { - char *words[SYSTEMD_JOURNAL_MAX_PARAMS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_JOURNAL_MAX_PARAMS); +void function_systemd_journal(const char *transaction, char *function, int timeout, bool *cancelled) { + fstat_thread_calls = 0; + fstat_thread_cached_responses = 0; BUFFER *wb = buffer_create(0, NULL); buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS); - - FACETS *facets = facets_create(50, 0, FACETS_OPTION_ALL_KEYS_FTS, + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + usec_t now_monotonic_ut = now_monotonic_usec(); + FUNCTION_QUERY_STATUS tmp_fqs = { + .cancelled = cancelled, + .started_monotonic_ut = now_monotonic_ut, + .stop_monotonic_ut = now_monotonic_ut + (timeout * USEC_PER_SEC), + }; + FUNCTION_QUERY_STATUS *fqs = NULL; + const DICTIONARY_ITEM *fqs_item = NULL; + + FACETS *facets = facets_create(50, FACETS_OPTION_ALL_KEYS_FTS, SYSTEMD_ALWAYS_VISIBLE_KEYS, SYSTEMD_KEYS_INCLUDED_IN_FACETS, SYSTEMD_KEYS_EXCLUDED_FROM_FACETS); + facets_accepted_param(facets, JOURNAL_PARAMETER_INFO); + facets_accepted_param(facets, JOURNAL_PARAMETER_SOURCE); facets_accepted_param(facets, JOURNAL_PARAMETER_AFTER); facets_accepted_param(facets, JOURNAL_PARAMETER_BEFORE); facets_accepted_param(facets, JOURNAL_PARAMETER_ANCHOR); + facets_accepted_param(facets, JOURNAL_PARAMETER_DIRECTION); facets_accepted_param(facets, JOURNAL_PARAMETER_LAST); facets_accepted_param(facets, JOURNAL_PARAMETER_QUERY); + facets_accepted_param(facets, JOURNAL_PARAMETER_FACETS); + facets_accepted_param(facets, JOURNAL_PARAMETER_HISTOGRAM); + facets_accepted_param(facets, JOURNAL_PARAMETER_IF_MODIFIED_SINCE); + facets_accepted_param(facets, JOURNAL_PARAMETER_DATA_ONLY); + facets_accepted_param(facets, JOURNAL_PARAMETER_ID); + facets_accepted_param(facets, JOURNAL_PARAMETER_PROGRESS); + facets_accepted_param(facets, JOURNAL_PARAMETER_DELTA); + facets_accepted_param(facets, JOURNAL_PARAMETER_TAIL); + facets_accepted_param(facets, JOURNAL_PARAMETER_SAMPLING); + +#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS + facets_accepted_param(facets, JOURNAL_PARAMETER_SLICE); +#endif // HAVE_SD_JOURNAL_RESTART_FIELDS // register the fields in the order you want them on the dashboard - facets_register_dynamic_key(facets, "ND_JOURNAL_PROCESS", FACET_KEY_OPTION_NO_FACET|FACET_KEY_OPTION_VISIBLE|FACET_KEY_OPTION_FTS, - systemd_journal_dynamic_row_id, NULL); + facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL); + + facets_register_key_name(facets, "_HOSTNAME", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_VISIBLE); + + facets_register_dynamic_key_name(facets, JOURNAL_KEY_ND_JOURNAL_PROCESS, + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE, + netdata_systemd_journal_dynamic_row_id, NULL); + + facets_register_key_name(facets, "MESSAGE", + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | + FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); + +// facets_register_dynamic_key_name(facets, "MESSAGE", +// FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT | +// FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS, +// netdata_systemd_journal_rich_message, NULL); + + facets_register_key_name_transformation(facets, "PRIORITY", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_priority, NULL); + + facets_register_key_name_transformation(facets, "SYSLOG_FACILITY", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_syslog_facility, NULL); + + facets_register_key_name_transformation(facets, "ERRNO", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_errno, NULL); + + facets_register_key_name(facets, JOURNAL_KEY_ND_JOURNAL_FILE, + FACET_KEY_OPTION_NEVER_FACET); + + facets_register_key_name(facets, "SYSLOG_IDENTIFIER", + FACET_KEY_OPTION_FACET); + + facets_register_key_name(facets, "UNIT", + FACET_KEY_OPTION_FACET); + + facets_register_key_name(facets, "USER_UNIT", + FACET_KEY_OPTION_FACET); + + facets_register_key_name_transformation(facets, "MESSAGE_ID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_message_id, NULL); + + facets_register_key_name_transformation(facets, "_BOOT_ID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_boot_id, NULL); - facets_register_key(facets, "MESSAGE", - FACET_KEY_OPTION_NO_FACET|FACET_KEY_OPTION_MAIN_TEXT|FACET_KEY_OPTION_VISIBLE|FACET_KEY_OPTION_FTS); + facets_register_key_name_transformation(facets, "_SYSTEMD_OWNER_UID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); - facets_register_key_transformation(facets, "PRIORITY", FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS, - systemd_journal_transform_priority, NULL); + facets_register_key_name_transformation(facets, "_UID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); - facets_register_key_transformation(facets, "SYSLOG_FACILITY", FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS, - systemd_journal_transform_syslog_facility, NULL); + facets_register_key_name_transformation(facets, "OBJECT_SYSTEMD_OWNER_UID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); - facets_register_key(facets, "SYSLOG_IDENTIFIER", FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS); - facets_register_key(facets, "UNIT", FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS); - facets_register_key(facets, "USER_UNIT", FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS); + facets_register_key_name_transformation(facets, "OBJECT_UID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); - facets_register_key_transformation(facets, "_UID", FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS, - systemd_journal_transform_uid, uids); + facets_register_key_name_transformation(facets, "_GID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_gid, NULL); - facets_register_key_transformation(facets, "_GID", FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS, - systemd_journal_transform_gid, gids); + facets_register_key_name_transformation(facets, "OBJECT_GID", + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_gid, NULL); + facets_register_key_name_transformation(facets, "_CAP_EFFECTIVE", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_cap_effective, NULL); + + facets_register_key_name_transformation(facets, "_AUDIT_LOGINUID", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation(facets, "OBJECT_AUDIT_LOGINUID", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation(facets, "_SOURCE_REALTIME_TIMESTAMP", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_timestamp_usec, NULL); + + // ------------------------------------------------------------------------ + // parse the parameters + + bool info = false, data_only = false, progress = false, slice = JOURNAL_DEFAULT_SLICE_MODE, delta = false, tail = false; time_t after_s = 0, before_s = 0; usec_t anchor = 0; + usec_t if_modified_since = 0; size_t last = 0; + FACETS_ANCHOR_DIRECTION direction = JOURNAL_DEFAULT_DIRECTION; const char *query = NULL; + const char *chart = NULL; + SIMPLE_PATTERN *sources = NULL; + const char *progress_id = NULL; + SD_JOURNAL_FILE_SOURCE_TYPE source_type = SDJF_ALL; + size_t filters = 0; + size_t sampling = SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING; - buffer_json_member_add_object(wb, "request"); - buffer_json_member_add_object(wb, "filters"); + buffer_json_member_add_object(wb, "_request"); + char *words[SYSTEMD_JOURNAL_MAX_PARAMS] = { NULL }; + size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_JOURNAL_MAX_PARAMS); for(int i = 1; i < SYSTEMD_JOURNAL_MAX_PARAMS ;i++) { - const char *keyword = get_word(words, num_words, i); + char *keyword = get_word(words, num_words, i); if(!keyword) break; if(strcmp(keyword, JOURNAL_PARAMETER_HELP) == 0) { - systemd_journal_function_help(transaction); + netdata_systemd_journal_function_help(transaction); goto cleanup; } - else if(strncmp(keyword, JOURNAL_PARAMETER_AFTER ":", strlen(JOURNAL_PARAMETER_AFTER ":")) == 0) { - after_s = str2l(&keyword[strlen(JOURNAL_PARAMETER_AFTER ":")]); + else if(strcmp(keyword, JOURNAL_PARAMETER_INFO) == 0) { + info = true; + } + else if(strcmp(keyword, JOURNAL_PARAMETER_PROGRESS) == 0) { + progress = true; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_DELTA ":", sizeof(JOURNAL_PARAMETER_DELTA ":") - 1) == 0) { + char *v = &keyword[sizeof(JOURNAL_PARAMETER_DELTA ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + delta = false; + else + delta = true; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_TAIL ":", sizeof(JOURNAL_PARAMETER_TAIL ":") - 1) == 0) { + char *v = &keyword[sizeof(JOURNAL_PARAMETER_TAIL ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + tail = false; + else + tail = true; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_SAMPLING ":", sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1) == 0) { + sampling = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1]); + } + else if(strncmp(keyword, JOURNAL_PARAMETER_DATA_ONLY ":", sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1) == 0) { + char *v = &keyword[sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + data_only = false; + else + data_only = true; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_SLICE ":", sizeof(JOURNAL_PARAMETER_SLICE ":") - 1) == 0) { + char *v = &keyword[sizeof(JOURNAL_PARAMETER_SLICE ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + slice = false; + else + slice = true; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_ID ":", sizeof(JOURNAL_PARAMETER_ID ":") - 1) == 0) { + char *id = &keyword[sizeof(JOURNAL_PARAMETER_ID ":") - 1]; + + if(*id) + progress_id = id; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_SOURCE ":", sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1) == 0) { + const char *value = &keyword[sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1]; + + buffer_json_member_add_array(wb, JOURNAL_PARAMETER_SOURCE); + + BUFFER *sources_list = buffer_create(0, NULL); + + source_type = SDJF_NONE; + while(value) { + char *sep = strchr(value, ','); + if(sep) + *sep++ = '\0'; + + buffer_json_add_array_item_string(wb, value); + + if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0) { + source_type |= SDJF_ALL; + value = NULL; + } + else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0) { + source_type |= SDJF_LOCAL_ALL; + value = NULL; + } + else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0) { + source_type |= SDJF_REMOTE_ALL; + value = NULL; + } + else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0) { + source_type |= SDJF_LOCAL_NAMESPACE; + value = NULL; + } + else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0) { + source_type |= SDJF_LOCAL_SYSTEM; + value = NULL; + } + else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0) { + source_type |= SDJF_LOCAL_USER; + value = NULL; + } + else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0) { + source_type |= SDJF_LOCAL_OTHER; + value = NULL; + } + else { + // else, match the source, whatever it is + if(buffer_strlen(sources_list)) + buffer_strcat(sources_list, ","); + + buffer_strcat(sources_list, value); + } + + value = sep; + } + + if(buffer_strlen(sources_list)) { + simple_pattern_free(sources); + sources = simple_pattern_create(buffer_tostring(sources_list), ",", SIMPLE_PATTERN_EXACT, false); + } + + buffer_free(sources_list); + + buffer_json_array_close(wb); // source + } + else if(strncmp(keyword, JOURNAL_PARAMETER_AFTER ":", sizeof(JOURNAL_PARAMETER_AFTER ":") - 1) == 0) { + after_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_AFTER ":") - 1]); + } + else if(strncmp(keyword, JOURNAL_PARAMETER_BEFORE ":", sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1) == 0) { + before_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1]); + } + else if(strncmp(keyword, JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":", sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1) == 0) { + if_modified_since = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1], NULL); } - else if(strncmp(keyword, JOURNAL_PARAMETER_BEFORE ":", strlen(JOURNAL_PARAMETER_BEFORE ":")) == 0) { - before_s = str2l(&keyword[strlen(JOURNAL_PARAMETER_BEFORE ":")]); + else if(strncmp(keyword, JOURNAL_PARAMETER_ANCHOR ":", sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1) == 0) { + anchor = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1], NULL); } - else if(strncmp(keyword, JOURNAL_PARAMETER_ANCHOR ":", strlen(JOURNAL_PARAMETER_ANCHOR ":")) == 0) { - anchor = str2ull(&keyword[strlen(JOURNAL_PARAMETER_ANCHOR ":")], NULL); + else if(strncmp(keyword, JOURNAL_PARAMETER_DIRECTION ":", sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1) == 0) { + direction = strcasecmp(&keyword[sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1], "forward") == 0 ? FACETS_ANCHOR_DIRECTION_FORWARD : FACETS_ANCHOR_DIRECTION_BACKWARD; } - else if(strncmp(keyword, JOURNAL_PARAMETER_LAST ":", strlen(JOURNAL_PARAMETER_LAST ":")) == 0) { - last = str2ul(&keyword[strlen(JOURNAL_PARAMETER_LAST ":")]); + else if(strncmp(keyword, JOURNAL_PARAMETER_LAST ":", sizeof(JOURNAL_PARAMETER_LAST ":") - 1) == 0) { + last = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_LAST ":") - 1]); } - else if(strncmp(keyword, JOURNAL_PARAMETER_QUERY ":", strlen(JOURNAL_PARAMETER_QUERY ":")) == 0) { - query= &keyword[strlen(JOURNAL_PARAMETER_QUERY ":")]; + else if(strncmp(keyword, JOURNAL_PARAMETER_QUERY ":", sizeof(JOURNAL_PARAMETER_QUERY ":") - 1) == 0) { + query= &keyword[sizeof(JOURNAL_PARAMETER_QUERY ":") - 1]; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_HISTOGRAM ":", sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1) == 0) { + chart = &keyword[sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1]; + } + else if(strncmp(keyword, JOURNAL_PARAMETER_FACETS ":", sizeof(JOURNAL_PARAMETER_FACETS ":") - 1) == 0) { + char *value = &keyword[sizeof(JOURNAL_PARAMETER_FACETS ":") - 1]; + if(*value) { + buffer_json_member_add_array(wb, JOURNAL_PARAMETER_FACETS); + + while(value) { + char *sep = strchr(value, ','); + if(sep) + *sep++ = '\0'; + + facets_register_facet_id(facets, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); + buffer_json_add_array_item_string(wb, value); + + value = sep; + } + + buffer_json_array_close(wb); // JOURNAL_PARAMETER_FACETS + } } else { char *value = strchr(keyword, ':'); @@ -412,8 +1915,9 @@ static void function_systemd_journal(const char *transaction, char *function, ch if(sep) *sep++ = '\0'; - facets_register_facet_filter(facets, keyword, value, FACET_KEY_OPTION_REORDER); + facets_register_facet_id_filter(facets, keyword, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); buffer_json_add_array_item_string(wb, value); + filters++; value = sep; } @@ -423,18 +1927,31 @@ static void function_systemd_journal(const char *transaction, char *function, ch } } - buffer_json_object_close(wb); // filters + // ------------------------------------------------------------------------ + // put this request into the progress db + + if(progress_id && *progress_id) { + fqs_item = dictionary_set_and_acquire_item(function_query_status_dict, progress_id, &tmp_fqs, sizeof(tmp_fqs)); + fqs = dictionary_acquired_item_value(fqs_item); + } + else { + // no progress id given, proceed without registering our progress in the dictionary + fqs = &tmp_fqs; + fqs_item = NULL; + } + + // ------------------------------------------------------------------------ + // validate parameters - time_t expires = now_realtime_sec() + 1; - time_t now_s; + time_t now_s = now_realtime_sec(); + time_t expires = now_s + 1; if(!after_s && !before_s) { - now_s = now_realtime_sec(); before_s = now_s; after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION; } else - rrdr_relative_window_to_absolute(&after_s, &before_s, &now_s, false); + rrdr_relative_window_to_absolute(&after_s, &before_s, now_s); if(after_s > before_s) { time_t tmp = after_s; @@ -448,149 +1965,175 @@ static void function_systemd_journal(const char *transaction, char *function, ch if(!last) last = SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY; - buffer_json_member_add_time_t(wb, "after", after_s); - buffer_json_member_add_time_t(wb, "before", before_s); - buffer_json_member_add_uint64(wb, "anchor", anchor); - buffer_json_member_add_uint64(wb, "last", last); - buffer_json_member_add_string(wb, "query", query); - buffer_json_member_add_time_t(wb, "timeout", timeout); - buffer_json_object_close(wb); // request - - facets_set_items(facets, last); - facets_set_anchor(facets, anchor); - facets_set_query(facets, query); - int response = systemd_journal_query(wb, facets, after_s * USEC_PER_SEC, before_s * USEC_PER_SEC, - now_monotonic_usec() + (timeout - 1) * USEC_PER_SEC); - if(response != HTTP_RESP_OK) { - pluginsd_function_json_error(transaction, response, "failed"); - goto cleanup; + // ------------------------------------------------------------------------ + // set query time-frame, anchors and direction + + fqs->after_ut = after_s * USEC_PER_SEC; + fqs->before_ut = (before_s * USEC_PER_SEC) + USEC_PER_SEC - 1; + fqs->if_modified_since = if_modified_since; + fqs->data_only = data_only; + fqs->delta = (fqs->data_only) ? delta : false; + fqs->tail = (fqs->data_only && fqs->if_modified_since) ? tail : false; + fqs->sources = sources; + fqs->source_type = source_type; + fqs->entries = last; + fqs->last_modified = 0; + fqs->filters = filters; + fqs->query = (query && *query) ? query : NULL; + fqs->histogram = (chart && *chart) ? chart : NULL; + fqs->direction = direction; + fqs->anchor.start_ut = anchor; + fqs->anchor.stop_ut = 0; + fqs->sampling = sampling; + + if(fqs->anchor.start_ut && fqs->tail) { + // a tail request + // we need the top X entries from BEFORE + // but, we need to calculate the facets and the + // histogram up to the anchor + fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + fqs->anchor.start_ut = 0; + fqs->anchor.stop_ut = anchor; } - pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires); - fwrite(buffer_tostring(wb), buffer_strlen(wb), 1, stdout); + if(anchor && anchor < fqs->after_ut) { + log_fqs(fqs, "received anchor is too small for query timeframe, ignoring anchor"); + anchor = 0; + fqs->anchor.start_ut = 0; + fqs->anchor.stop_ut = 0; + fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + } + else if(anchor > fqs->before_ut) { + log_fqs(fqs, "received anchor is too big for query timeframe, ignoring anchor"); + anchor = 0; + fqs->anchor.start_ut = 0; + fqs->anchor.stop_ut = 0; + fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + } - pluginsd_function_result_end_to_stdout(); + facets_set_anchor(facets, fqs->anchor.start_ut, fqs->anchor.stop_ut, fqs->direction); -cleanup: - facets_destroy(facets); - buffer_free(wb); -} + facets_set_additional_options(facets, + ((fqs->data_only) ? FACETS_OPTION_DATA_ONLY : 0) | + ((fqs->delta) ? FACETS_OPTION_SHOW_DELTAS : 0)); -static void *reader_main(void *arg __maybe_unused) { - char buffer[PLUGINSD_LINE_MAX + 1]; + // ------------------------------------------------------------------------ + // set the rest of the query parameters - char *s = NULL; - while(!plugin_should_exit && (s = fgets(buffer, PLUGINSD_LINE_MAX, stdin))) { - char *words[PLUGINSD_MAX_WORDS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(buffer, words, PLUGINSD_MAX_WORDS); + facets_set_items(facets, fqs->entries); + facets_set_query(facets, fqs->query); - const char *keyword = get_word(words, num_words, 0); +#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS + fqs->slice = slice; + if(slice) + facets_enable_slice_mode(facets); +#else + fqs->slice = false; +#endif - if(keyword && strcmp(keyword, PLUGINSD_KEYWORD_FUNCTION) == 0) { - char *transaction = get_word(words, num_words, 1); - char *timeout_s = get_word(words, num_words, 2); - char *function = get_word(words, num_words, 3); + if(fqs->histogram) + facets_set_timeframe_and_histogram_by_id(facets, fqs->histogram, fqs->after_ut, fqs->before_ut); + else + facets_set_timeframe_and_histogram_by_name(facets, "PRIORITY", fqs->after_ut, fqs->before_ut); - if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) { - netdata_log_error("Received incomplete %s (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", - keyword, - transaction?transaction:"(unset)", - timeout_s?timeout_s:"(unset)", - function?function:"(unset)"); - } - else { - int timeout = str2i(timeout_s); - if(timeout <= 0) timeout = SYSTEMD_JOURNAL_DEFAULT_TIMEOUT; - netdata_mutex_lock(&mutex); + // ------------------------------------------------------------------------ + // complete the request object + + buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_INFO, false); + buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_SLICE, fqs->slice); + buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DATA_ONLY, fqs->data_only); + buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_PROGRESS, false); + buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DELTA, fqs->delta); + buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_TAIL, fqs->tail); + buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_SAMPLING, fqs->sampling); + buffer_json_member_add_string(wb, JOURNAL_PARAMETER_ID, progress_id); + buffer_json_member_add_uint64(wb, "source_type", fqs->source_type); + buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_AFTER, fqs->after_ut / USEC_PER_SEC); + buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_BEFORE, fqs->before_ut / USEC_PER_SEC); + buffer_json_member_add_uint64(wb, "if_modified_since", fqs->if_modified_since); + buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_ANCHOR, anchor); + buffer_json_member_add_string(wb, JOURNAL_PARAMETER_DIRECTION, fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward"); + buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_LAST, fqs->entries); + buffer_json_member_add_string(wb, JOURNAL_PARAMETER_QUERY, fqs->query); + buffer_json_member_add_string(wb, JOURNAL_PARAMETER_HISTOGRAM, fqs->histogram); + buffer_json_object_close(wb); // request - if(strncmp(function, SYSTEMD_JOURNAL_FUNCTION_NAME, strlen(SYSTEMD_JOURNAL_FUNCTION_NAME)) == 0) - function_systemd_journal(transaction, function, buffer, PLUGINSD_LINE_MAX + 1, timeout); - else - pluginsd_function_json_error(transaction, HTTP_RESP_NOT_FOUND, "No function with this name found in systemd-journal.plugin."); + buffer_json_journal_versions(wb); - fflush(stdout); - netdata_mutex_unlock(&mutex); + // ------------------------------------------------------------------------ + // run the request + + int response; + + if(info) { + facets_accepted_parameters_to_json_array(facets, wb, false); + buffer_json_member_add_array(wb, "required_params"); + { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "id", "source"); + buffer_json_member_add_string(wb, "name", "source"); + buffer_json_member_add_string(wb, "help", "Select the SystemD Journal source to query"); + buffer_json_member_add_string(wb, "type", "multiselect"); + buffer_json_member_add_array(wb, "options"); + { + available_journal_file_sources_to_json_array(wb); + } + buffer_json_array_close(wb); // options array } + buffer_json_object_close(wb); // required params object } - else - netdata_log_error("Received unknown command: %s", keyword?keyword:"(unset)"); - } - - if(!s || feof(stdin) || ferror(stdin)) { - plugin_should_exit = true; - netdata_log_error("Received error on stdin."); - } - - exit(1); -} - -int main(int argc __maybe_unused, char **argv __maybe_unused) { - stderror = stderr; - clocks_init(); - - program_name = "systemd-journal.plugin"; - - // disable syslog - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; - - // initialize the threads - netdata_threads_init_for_external_plugins(0); // set the default threads stack size here - - uids = dictionary_create(0); - gids = dictionary_create(0); + buffer_json_array_close(wb); // required_params array - // ------------------------------------------------------------------------ - // debug + facets_table_config(wb); - if(argc == 2 && strcmp(argv[1], "debug") == 0) { - char buf[] = "systemd-journal after:-86400 before:0 last:500"; - function_systemd_journal("123", buf, "", 0, 30); - exit(1); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION); + buffer_json_finalize(wb); + response = HTTP_RESP_OK; + goto output; } - // ------------------------------------------------------------------------ + if(progress) { + function_systemd_journal_progress(wb, transaction, progress_id); + goto cleanup; + } - netdata_thread_t reader_thread; - netdata_thread_create(&reader_thread, "SDJ_READER", NETDATA_THREAD_OPTION_DONT_LOG, reader_main, NULL); + response = netdata_systemd_journal_query(wb, facets, fqs); // ------------------------------------------------------------------------ + // handle error response - time_t started_t = now_monotonic_sec(); - - size_t iteration; - usec_t step = 1000 * USEC_PER_MS; - bool tty = isatty(fileno(stderr)) == 1; - - netdata_mutex_lock(&mutex); - fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\"\n", - SYSTEMD_JOURNAL_FUNCTION_NAME, SYSTEMD_JOURNAL_DEFAULT_TIMEOUT, SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION); - - heartbeat_t hb; - heartbeat_init(&hb); - for(iteration = 0; 1 ; iteration++) { - netdata_mutex_unlock(&mutex); - heartbeat_next(&hb, step); - netdata_mutex_lock(&mutex); + if(response != HTTP_RESP_OK) { + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_json_error_to_stdout(transaction, response, "failed"); + netdata_mutex_unlock(&stdout_mutex); + goto cleanup; + } - if(!tty) - fprintf(stdout, "\n"); +output: + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, response, "application/json", expires, wb); + netdata_mutex_unlock(&stdout_mutex); - fflush(stdout); +cleanup: + simple_pattern_free(sources); + facets_destroy(facets); + buffer_free(wb); - time_t now = now_monotonic_sec(); - if(now - started_t > 86400) - break; + if(fqs_item) { + dictionary_del(function_query_status_dict, dictionary_acquired_item_name(fqs_item)); + dictionary_acquired_item_release(function_query_status_dict, fqs_item); + dictionary_garbage_collect(function_query_status_dict); } +} - dictionary_destroy(uids); - dictionary_destroy(gids); - - exit(0); +void journal_init_query_status(void) { + function_query_status_dict = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(FUNCTION_QUERY_STATUS)); } diff --git a/collectors/systemd-journal.plugin/systemd-main.c b/collectors/systemd-journal.plugin/systemd-main.c new file mode 100644 index 00000000000000..d335fd82befb3c --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-main.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "systemd-internals.h" +#include "libnetdata/required_dummies.h" + +#define SYSTEMD_JOURNAL_WORKER_THREADS 5 + +netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER; +static bool plugin_should_exit = false; + +int main(int argc __maybe_unused, char **argv __maybe_unused) { + clocks_init(); + netdata_thread_set_tag("SDMAIN"); + nd_log_initialize_for_external_plugins("systemd-journal.plugin"); + + netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); + if(verify_netdata_host_prefix() == -1) exit(1); + + // ------------------------------------------------------------------------ + // initialization + + netdata_systemd_journal_message_ids_init(); + journal_init_query_status(); + journal_init_files_and_directories(); + + // ------------------------------------------------------------------------ + // debug + + if(argc == 2 && strcmp(argv[1], "debug") == 0) { + journal_files_registry_update(); + + bool cancelled = false; + char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true source:all"; + // char buf[] = "systemd-journal after:1695332964 before:1695937764 direction:backward last:100 slice:true source:all DHKucpqUoe1:PtVoyIuX.MU"; + // char buf[] = "systemd-journal after:1694511062 before:1694514662 anchor:1694514122024403"; + function_systemd_journal("123", buf, 600, &cancelled); +// function_systemd_units("123", "systemd-units", 600, &cancelled); + exit(1); + } +#ifdef ENABLE_SYSTEMD_DBUS + if(argc == 2 && strcmp(argv[1], "debug-units") == 0) { + bool cancelled = false; + function_systemd_units("123", "systemd-units", 600, &cancelled); + exit(1); + } +#endif + + // ------------------------------------------------------------------------ + // watcher thread + + netdata_thread_t watcher_thread; + netdata_thread_create(&watcher_thread, "SDWATCH", + NETDATA_THREAD_OPTION_DONT_LOG, journal_watcher_main, NULL); + + // ------------------------------------------------------------------------ + // the event loop for functions + + struct functions_evloop_globals *wg = + functions_evloop_init(SYSTEMD_JOURNAL_WORKER_THREADS, "SDJ", &stdout_mutex, &plugin_should_exit); + + functions_evloop_add_function(wg, SYSTEMD_JOURNAL_FUNCTION_NAME, function_systemd_journal, + SYSTEMD_JOURNAL_DEFAULT_TIMEOUT); + +#ifdef ENABLE_SYSTEMD_DBUS + functions_evloop_add_function(wg, SYSTEMD_UNITS_FUNCTION_NAME, function_systemd_units, + SYSTEMD_UNITS_DEFAULT_TIMEOUT); +#endif + + // ------------------------------------------------------------------------ + // register functions to netdata + + netdata_mutex_lock(&stdout_mutex); + + fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\"\n", + SYSTEMD_JOURNAL_FUNCTION_NAME, SYSTEMD_JOURNAL_DEFAULT_TIMEOUT, SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION); + +#ifdef ENABLE_SYSTEMD_DBUS + fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\"\n", + SYSTEMD_UNITS_FUNCTION_NAME, SYSTEMD_UNITS_DEFAULT_TIMEOUT, SYSTEMD_UNITS_FUNCTION_DESCRIPTION); +#endif + + fflush(stdout); + netdata_mutex_unlock(&stdout_mutex); + + // ------------------------------------------------------------------------ + + usec_t step_ut = 100 * USEC_PER_MS; + usec_t send_newline_ut = 0; + usec_t since_last_scan_ut = SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start + bool tty = isatty(fileno(stderr)) == 1; + + heartbeat_t hb; + heartbeat_init(&hb); + while(!plugin_should_exit) { + + if(since_last_scan_ut > SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC) { + journal_files_registry_update(); + since_last_scan_ut = 0; + } + + usec_t dt_ut = heartbeat_next(&hb, step_ut); + since_last_scan_ut += dt_ut; + send_newline_ut += dt_ut; + + if(!tty && send_newline_ut > USEC_PER_SEC) { + send_newline_and_flush(); + send_newline_ut = 0; + } + } + + exit(0); +} diff --git a/collectors/systemd-journal.plugin/systemd-units.c b/collectors/systemd-journal.plugin/systemd-units.c new file mode 100644 index 00000000000000..dac15881748ab3 --- /dev/null +++ b/collectors/systemd-journal.plugin/systemd-units.c @@ -0,0 +1,1965 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "systemd-internals.h" + +#ifdef ENABLE_SYSTEMD_DBUS +#include + +#define SYSTEMD_UNITS_MAX_PARAMS 10 +#define SYSTEMD_UNITS_DBUS_TYPES "(ssssssouso)" + +// ---------------------------------------------------------------------------- +// copied from systemd: string-table.h + +typedef char sd_char; +#define XCONCATENATE(x, y) x ## y +#define CONCATENATE(x, y) XCONCATENATE(x, y) + +#ifndef __COVERITY__ +# define VOID_0 ((void)0) +#else +# define VOID_0 ((void*)0) +#endif + +#define ELEMENTSOF(x) \ + (__builtin_choose_expr( \ + !__builtin_types_compatible_p(typeof(x), typeof(&*(x))), \ + sizeof(x)/sizeof((x)[0]), \ + VOID_0)) + +#define UNIQ_T(x, uniq) CONCATENATE(__unique_prefix_, CONCATENATE(x, uniq)) +#define UNIQ __COUNTER__ +#define __CMP(aq, a, bq, b) \ + ({ \ + const typeof(a) UNIQ_T(A, aq) = (a); \ + const typeof(b) UNIQ_T(B, bq) = (b); \ + UNIQ_T(A, aq) < UNIQ_T(B, bq) ? -1 : \ + UNIQ_T(A, aq) > UNIQ_T(B, bq) ? 1 : 0; \ + }) +#define CMP(a, b) __CMP(UNIQ, (a), UNIQ, (b)) + +static inline int strcmp_ptr(const sd_char *a, const sd_char *b) { + if (a && b) + return strcmp(a, b); + + return CMP(a, b); +} + +static inline bool streq_ptr(const sd_char *a, const sd_char *b) { + return strcmp_ptr(a, b) == 0; +} + +ssize_t string_table_lookup(const char * const *table, size_t len, const char *key) { + if (!key || !*key) + return -EINVAL; + + for (size_t i = 0; i < len; ++i) + if (streq_ptr(table[i], key)) + return (ssize_t) i; + + return -EINVAL; +} + +/* For basic lookup tables with strictly enumerated entries */ +#define _DEFINE_STRING_TABLE_LOOKUP_TO_STRING(name,type,scope) \ + scope const char *name##_to_string(type i) { \ + if (i < 0 || i >= (type) ELEMENTSOF(name##_table)) \ + return NULL; \ + return name##_table[i]; \ + } + +#define _DEFINE_STRING_TABLE_LOOKUP_FROM_STRING(name,type,scope) \ + scope type name##_from_string(const char *s) { \ + return (type) string_table_lookup(name##_table, ELEMENTSOF(name##_table), s); \ + } + +#define _DEFINE_STRING_TABLE_LOOKUP(name,type,scope) \ + _DEFINE_STRING_TABLE_LOOKUP_TO_STRING(name,type,scope) \ + _DEFINE_STRING_TABLE_LOOKUP_FROM_STRING(name,type,scope) + +#define DEFINE_STRING_TABLE_LOOKUP(name,type) _DEFINE_STRING_TABLE_LOOKUP(name,type,) + +// ---------------------------------------------------------------------------- +// copied from systemd: unit-def.h + +typedef enum UnitType { + UNIT_SERVICE, + UNIT_MOUNT, + UNIT_SWAP, + UNIT_SOCKET, + UNIT_TARGET, + UNIT_DEVICE, + UNIT_AUTOMOUNT, + UNIT_TIMER, + UNIT_PATH, + UNIT_SLICE, + UNIT_SCOPE, + _UNIT_TYPE_MAX, + _UNIT_TYPE_INVALID = -EINVAL, +} UnitType; + +typedef enum UnitLoadState { + UNIT_STUB, + UNIT_LOADED, + UNIT_NOT_FOUND, /* error condition #1: unit file not found */ + UNIT_BAD_SETTING, /* error condition #2: we couldn't parse some essential unit file setting */ + UNIT_ERROR, /* error condition #3: other "system" error, catchall for the rest */ + UNIT_MERGED, + UNIT_MASKED, + _UNIT_LOAD_STATE_MAX, + _UNIT_LOAD_STATE_INVALID = -EINVAL, +} UnitLoadState; + +typedef enum UnitActiveState { + UNIT_ACTIVE, + UNIT_RELOADING, + UNIT_INACTIVE, + UNIT_FAILED, + UNIT_ACTIVATING, + UNIT_DEACTIVATING, + UNIT_MAINTENANCE, + _UNIT_ACTIVE_STATE_MAX, + _UNIT_ACTIVE_STATE_INVALID = -EINVAL, +} UnitActiveState; + +typedef enum AutomountState { + AUTOMOUNT_DEAD, + AUTOMOUNT_WAITING, + AUTOMOUNT_RUNNING, + AUTOMOUNT_FAILED, + _AUTOMOUNT_STATE_MAX, + _AUTOMOUNT_STATE_INVALID = -EINVAL, +} AutomountState; + +typedef enum DeviceState { + DEVICE_DEAD, + DEVICE_TENTATIVE, /* mounted or swapped, but not (yet) announced by udev */ + DEVICE_PLUGGED, /* announced by udev */ + _DEVICE_STATE_MAX, + _DEVICE_STATE_INVALID = -EINVAL, +} DeviceState; + +typedef enum MountState { + MOUNT_DEAD, + MOUNT_MOUNTING, /* /usr/bin/mount is running, but the mount is not done yet. */ + MOUNT_MOUNTING_DONE, /* /usr/bin/mount is running, and the mount is done. */ + MOUNT_MOUNTED, + MOUNT_REMOUNTING, + MOUNT_UNMOUNTING, + MOUNT_REMOUNTING_SIGTERM, + MOUNT_REMOUNTING_SIGKILL, + MOUNT_UNMOUNTING_SIGTERM, + MOUNT_UNMOUNTING_SIGKILL, + MOUNT_FAILED, + MOUNT_CLEANING, + _MOUNT_STATE_MAX, + _MOUNT_STATE_INVALID = -EINVAL, +} MountState; + +typedef enum PathState { + PATH_DEAD, + PATH_WAITING, + PATH_RUNNING, + PATH_FAILED, + _PATH_STATE_MAX, + _PATH_STATE_INVALID = -EINVAL, +} PathState; + +typedef enum ScopeState { + SCOPE_DEAD, + SCOPE_START_CHOWN, + SCOPE_RUNNING, + SCOPE_ABANDONED, + SCOPE_STOP_SIGTERM, + SCOPE_STOP_SIGKILL, + SCOPE_FAILED, + _SCOPE_STATE_MAX, + _SCOPE_STATE_INVALID = -EINVAL, +} ScopeState; + +typedef enum ServiceState { + SERVICE_DEAD, + SERVICE_CONDITION, + SERVICE_START_PRE, + SERVICE_START, + SERVICE_START_POST, + SERVICE_RUNNING, + SERVICE_EXITED, /* Nothing is running anymore, but RemainAfterExit is true hence this is OK */ + SERVICE_RELOAD, /* Reloading via ExecReload= */ + SERVICE_RELOAD_SIGNAL, /* Reloading via SIGHUP requested */ + SERVICE_RELOAD_NOTIFY, /* Waiting for READY=1 after RELOADING=1 notify */ + SERVICE_STOP, /* No STOP_PRE state, instead just register multiple STOP executables */ + SERVICE_STOP_WATCHDOG, + SERVICE_STOP_SIGTERM, + SERVICE_STOP_SIGKILL, + SERVICE_STOP_POST, + SERVICE_FINAL_WATCHDOG, /* In case the STOP_POST executable needs to be aborted. */ + SERVICE_FINAL_SIGTERM, /* In case the STOP_POST executable hangs, we shoot that down, too */ + SERVICE_FINAL_SIGKILL, + SERVICE_FAILED, + SERVICE_DEAD_BEFORE_AUTO_RESTART, + SERVICE_FAILED_BEFORE_AUTO_RESTART, + SERVICE_DEAD_RESOURCES_PINNED, /* Like SERVICE_DEAD, but with pinned resources */ + SERVICE_AUTO_RESTART, + SERVICE_AUTO_RESTART_QUEUED, + SERVICE_CLEANING, + _SERVICE_STATE_MAX, + _SERVICE_STATE_INVALID = -EINVAL, +} ServiceState; + +typedef enum SliceState { + SLICE_DEAD, + SLICE_ACTIVE, + _SLICE_STATE_MAX, + _SLICE_STATE_INVALID = -EINVAL, +} SliceState; + +typedef enum SocketState { + SOCKET_DEAD, + SOCKET_START_PRE, + SOCKET_START_CHOWN, + SOCKET_START_POST, + SOCKET_LISTENING, + SOCKET_RUNNING, + SOCKET_STOP_PRE, + SOCKET_STOP_PRE_SIGTERM, + SOCKET_STOP_PRE_SIGKILL, + SOCKET_STOP_POST, + SOCKET_FINAL_SIGTERM, + SOCKET_FINAL_SIGKILL, + SOCKET_FAILED, + SOCKET_CLEANING, + _SOCKET_STATE_MAX, + _SOCKET_STATE_INVALID = -EINVAL, +} SocketState; + +typedef enum SwapState { + SWAP_DEAD, + SWAP_ACTIVATING, /* /sbin/swapon is running, but the swap not yet enabled. */ + SWAP_ACTIVATING_DONE, /* /sbin/swapon is running, and the swap is done. */ + SWAP_ACTIVE, + SWAP_DEACTIVATING, + SWAP_DEACTIVATING_SIGTERM, + SWAP_DEACTIVATING_SIGKILL, + SWAP_FAILED, + SWAP_CLEANING, + _SWAP_STATE_MAX, + _SWAP_STATE_INVALID = -EINVAL, +} SwapState; + +typedef enum TargetState { + TARGET_DEAD, + TARGET_ACTIVE, + _TARGET_STATE_MAX, + _TARGET_STATE_INVALID = -EINVAL, +} TargetState; + +typedef enum TimerState { + TIMER_DEAD, + TIMER_WAITING, + TIMER_RUNNING, + TIMER_ELAPSED, + TIMER_FAILED, + _TIMER_STATE_MAX, + _TIMER_STATE_INVALID = -EINVAL, +} TimerState; + +typedef enum FreezerState { + FREEZER_RUNNING, + FREEZER_FREEZING, + FREEZER_FROZEN, + FREEZER_THAWING, + _FREEZER_STATE_MAX, + _FREEZER_STATE_INVALID = -EINVAL, +} FreezerState; + +// ---------------------------------------------------------------------------- +// copied from systemd: unit-def.c + +static const char* const unit_type_table[_UNIT_TYPE_MAX] = { + [UNIT_SERVICE] = "service", + [UNIT_SOCKET] = "socket", + [UNIT_TARGET] = "target", + [UNIT_DEVICE] = "device", + [UNIT_MOUNT] = "mount", + [UNIT_AUTOMOUNT] = "automount", + [UNIT_SWAP] = "swap", + [UNIT_TIMER] = "timer", + [UNIT_PATH] = "path", + [UNIT_SLICE] = "slice", + [UNIT_SCOPE] = "scope", +}; + +DEFINE_STRING_TABLE_LOOKUP(unit_type, UnitType); + +static const char* const unit_load_state_table[_UNIT_LOAD_STATE_MAX] = { + [UNIT_STUB] = "stub", + [UNIT_LOADED] = "loaded", + [UNIT_NOT_FOUND] = "not-found", + [UNIT_BAD_SETTING] = "bad-setting", + [UNIT_ERROR] = "error", + [UNIT_MERGED] = "merged", + [UNIT_MASKED] = "masked" +}; + +DEFINE_STRING_TABLE_LOOKUP(unit_load_state, UnitLoadState); + +static const char* const unit_active_state_table[_UNIT_ACTIVE_STATE_MAX] = { + [UNIT_ACTIVE] = "active", + [UNIT_RELOADING] = "reloading", + [UNIT_INACTIVE] = "inactive", + [UNIT_FAILED] = "failed", + [UNIT_ACTIVATING] = "activating", + [UNIT_DEACTIVATING] = "deactivating", + [UNIT_MAINTENANCE] = "maintenance", +}; + +DEFINE_STRING_TABLE_LOOKUP(unit_active_state, UnitActiveState); + +static const char* const automount_state_table[_AUTOMOUNT_STATE_MAX] = { + [AUTOMOUNT_DEAD] = "dead", + [AUTOMOUNT_WAITING] = "waiting", + [AUTOMOUNT_RUNNING] = "running", + [AUTOMOUNT_FAILED] = "failed" +}; + +DEFINE_STRING_TABLE_LOOKUP(automount_state, AutomountState); + +static const char* const device_state_table[_DEVICE_STATE_MAX] = { + [DEVICE_DEAD] = "dead", + [DEVICE_TENTATIVE] = "tentative", + [DEVICE_PLUGGED] = "plugged", +}; + +DEFINE_STRING_TABLE_LOOKUP(device_state, DeviceState); + +static const char* const mount_state_table[_MOUNT_STATE_MAX] = { + [MOUNT_DEAD] = "dead", + [MOUNT_MOUNTING] = "mounting", + [MOUNT_MOUNTING_DONE] = "mounting-done", + [MOUNT_MOUNTED] = "mounted", + [MOUNT_REMOUNTING] = "remounting", + [MOUNT_UNMOUNTING] = "unmounting", + [MOUNT_REMOUNTING_SIGTERM] = "remounting-sigterm", + [MOUNT_REMOUNTING_SIGKILL] = "remounting-sigkill", + [MOUNT_UNMOUNTING_SIGTERM] = "unmounting-sigterm", + [MOUNT_UNMOUNTING_SIGKILL] = "unmounting-sigkill", + [MOUNT_FAILED] = "failed", + [MOUNT_CLEANING] = "cleaning", +}; + +DEFINE_STRING_TABLE_LOOKUP(mount_state, MountState); + +static const char* const path_state_table[_PATH_STATE_MAX] = { + [PATH_DEAD] = "dead", + [PATH_WAITING] = "waiting", + [PATH_RUNNING] = "running", + [PATH_FAILED] = "failed" +}; + +DEFINE_STRING_TABLE_LOOKUP(path_state, PathState); + +static const char* const scope_state_table[_SCOPE_STATE_MAX] = { + [SCOPE_DEAD] = "dead", + [SCOPE_START_CHOWN] = "start-chown", + [SCOPE_RUNNING] = "running", + [SCOPE_ABANDONED] = "abandoned", + [SCOPE_STOP_SIGTERM] = "stop-sigterm", + [SCOPE_STOP_SIGKILL] = "stop-sigkill", + [SCOPE_FAILED] = "failed", +}; + +DEFINE_STRING_TABLE_LOOKUP(scope_state, ScopeState); + +static const char* const service_state_table[_SERVICE_STATE_MAX] = { + [SERVICE_DEAD] = "dead", + [SERVICE_CONDITION] = "condition", + [SERVICE_START_PRE] = "start-pre", + [SERVICE_START] = "start", + [SERVICE_START_POST] = "start-post", + [SERVICE_RUNNING] = "running", + [SERVICE_EXITED] = "exited", + [SERVICE_RELOAD] = "reload", + [SERVICE_RELOAD_SIGNAL] = "reload-signal", + [SERVICE_RELOAD_NOTIFY] = "reload-notify", + [SERVICE_STOP] = "stop", + [SERVICE_STOP_WATCHDOG] = "stop-watchdog", + [SERVICE_STOP_SIGTERM] = "stop-sigterm", + [SERVICE_STOP_SIGKILL] = "stop-sigkill", + [SERVICE_STOP_POST] = "stop-post", + [SERVICE_FINAL_WATCHDOG] = "final-watchdog", + [SERVICE_FINAL_SIGTERM] = "final-sigterm", + [SERVICE_FINAL_SIGKILL] = "final-sigkill", + [SERVICE_FAILED] = "failed", + [SERVICE_DEAD_BEFORE_AUTO_RESTART] = "dead-before-auto-restart", + [SERVICE_FAILED_BEFORE_AUTO_RESTART] = "failed-before-auto-restart", + [SERVICE_DEAD_RESOURCES_PINNED] = "dead-resources-pinned", + [SERVICE_AUTO_RESTART] = "auto-restart", + [SERVICE_AUTO_RESTART_QUEUED] = "auto-restart-queued", + [SERVICE_CLEANING] = "cleaning", +}; + +DEFINE_STRING_TABLE_LOOKUP(service_state, ServiceState); + +static const char* const slice_state_table[_SLICE_STATE_MAX] = { + [SLICE_DEAD] = "dead", + [SLICE_ACTIVE] = "active" +}; + +DEFINE_STRING_TABLE_LOOKUP(slice_state, SliceState); + +static const char* const socket_state_table[_SOCKET_STATE_MAX] = { + [SOCKET_DEAD] = "dead", + [SOCKET_START_PRE] = "start-pre", + [SOCKET_START_CHOWN] = "start-chown", + [SOCKET_START_POST] = "start-post", + [SOCKET_LISTENING] = "listening", + [SOCKET_RUNNING] = "running", + [SOCKET_STOP_PRE] = "stop-pre", + [SOCKET_STOP_PRE_SIGTERM] = "stop-pre-sigterm", + [SOCKET_STOP_PRE_SIGKILL] = "stop-pre-sigkill", + [SOCKET_STOP_POST] = "stop-post", + [SOCKET_FINAL_SIGTERM] = "final-sigterm", + [SOCKET_FINAL_SIGKILL] = "final-sigkill", + [SOCKET_FAILED] = "failed", + [SOCKET_CLEANING] = "cleaning", +}; + +DEFINE_STRING_TABLE_LOOKUP(socket_state, SocketState); + +static const char* const swap_state_table[_SWAP_STATE_MAX] = { + [SWAP_DEAD] = "dead", + [SWAP_ACTIVATING] = "activating", + [SWAP_ACTIVATING_DONE] = "activating-done", + [SWAP_ACTIVE] = "active", + [SWAP_DEACTIVATING] = "deactivating", + [SWAP_DEACTIVATING_SIGTERM] = "deactivating-sigterm", + [SWAP_DEACTIVATING_SIGKILL] = "deactivating-sigkill", + [SWAP_FAILED] = "failed", + [SWAP_CLEANING] = "cleaning", +}; + +DEFINE_STRING_TABLE_LOOKUP(swap_state, SwapState); + +static const char* const target_state_table[_TARGET_STATE_MAX] = { + [TARGET_DEAD] = "dead", + [TARGET_ACTIVE] = "active" +}; + +DEFINE_STRING_TABLE_LOOKUP(target_state, TargetState); + +static const char* const timer_state_table[_TIMER_STATE_MAX] = { + [TIMER_DEAD] = "dead", + [TIMER_WAITING] = "waiting", + [TIMER_RUNNING] = "running", + [TIMER_ELAPSED] = "elapsed", + [TIMER_FAILED] = "failed" +}; + +DEFINE_STRING_TABLE_LOOKUP(timer_state, TimerState); + +static const char* const freezer_state_table[_FREEZER_STATE_MAX] = { + [FREEZER_RUNNING] = "running", + [FREEZER_FREEZING] = "freezing", + [FREEZER_FROZEN] = "frozen", + [FREEZER_THAWING] = "thawing", +}; + +DEFINE_STRING_TABLE_LOOKUP(freezer_state, FreezerState); + +// ---------------------------------------------------------------------------- +// our code + +typedef struct UnitAttribute { + union { + int boolean; + char *str; + uint64_t uint64; + int64_t int64; + uint32_t uint32; + int32_t int32; + double dbl; + }; +} UnitAttribute; + +struct UnitInfo; +typedef void (*attribute_handler_t)(struct UnitInfo *u, UnitAttribute *ua); + +static void update_freezer_state(struct UnitInfo *u, UnitAttribute *ua); + +struct { + const char *member; + char value_type; + + const char *show_as; + const char *info; + RRDF_FIELD_OPTIONS options; + RRDF_FIELD_FILTER filter; + + attribute_handler_t handler; +} unit_attributes[] = { + { + .member = "Type", + .value_type = SD_BUS_TYPE_STRING, + .show_as = "ServiceType", + .info = "Service Type", + .options = RRDF_FIELD_OPTS_VISIBLE, + .filter = RRDF_FIELD_FILTER_MULTISELECT, + }, { + .member = "Result", + .value_type = SD_BUS_TYPE_STRING, + .show_as = "Result", + .info = "Result", + .options = RRDF_FIELD_OPTS_VISIBLE, + .filter = RRDF_FIELD_FILTER_MULTISELECT, + }, { + .member = "UnitFileState", + .value_type = SD_BUS_TYPE_STRING, + .show_as = "Enabled", + .info = "Unit File State", + .options = RRDF_FIELD_OPTS_NONE, + .filter = RRDF_FIELD_FILTER_MULTISELECT, + }, { + .member = "UnitFilePreset", + .value_type = SD_BUS_TYPE_STRING, + .show_as = "Preset", + .info = "Unit File Preset", + .options = RRDF_FIELD_OPTS_NONE, + .filter = RRDF_FIELD_FILTER_MULTISELECT, + }, { + .member = "FreezerState", + .value_type = SD_BUS_TYPE_STRING, + .show_as = "FreezerState", + .info = "Freezer State", + .options = RRDF_FIELD_OPTS_NONE, + .filter = RRDF_FIELD_FILTER_MULTISELECT, + .handler = update_freezer_state, + }, +// { .member = "Id", .signature = "s", }, +// { .member = "LoadState", .signature = "s", }, +// { .member = "ActiveState", .signature = "s", }, +// { .member = "SubState", .signature = "s", }, +// { .member = "Description", .signature = "s", }, +// { .member = "Following", .signature = "s", }, +// { .member = "Documentation", .signature = "as", }, +// { .member = "FragmentPath", .signature = "s", }, +// { .member = "SourcePath", .signature = "s", }, +// { .member = "ControlGroup", .signature = "s", }, +// { .member = "DropInPaths", .signature = "as", }, +// { .member = "LoadError", .signature = "(ss)", }, +// { .member = "TriggeredBy", .signature = "as", }, +// { .member = "Triggers", .signature = "as", }, +// { .member = "InactiveExitTimestamp", .signature = "t", }, +// { .member = "InactiveExitTimestampMonotonic", .signature = "t", }, +// { .member = "ActiveEnterTimestamp", .signature = "t", }, +// { .member = "ActiveExitTimestamp", .signature = "t", }, +// { .member = "RuntimeMaxUSec", .signature = "t", }, +// { .member = "InactiveEnterTimestamp", .signature = "t", }, +// { .member = "NeedDaemonReload", .signature = "b", }, +// { .member = "Transient", .signature = "b", }, +// { .member = "ExecMainPID", .signature = "u", }, +// { .member = "MainPID", .signature = "u", }, +// { .member = "ControlPID", .signature = "u", }, +// { .member = "StatusText", .signature = "s", }, +// { .member = "PIDFile", .signature = "s", }, +// { .member = "StatusErrno", .signature = "i", }, +// { .member = "FileDescriptorStoreMax", .signature = "u", }, +// { .member = "NFileDescriptorStore", .signature = "u", }, +// { .member = "ExecMainStartTimestamp", .signature = "t", }, +// { .member = "ExecMainExitTimestamp", .signature = "t", }, +// { .member = "ExecMainCode", .signature = "i", }, +// { .member = "ExecMainStatus", .signature = "i", }, +// { .member = "LogNamespace", .signature = "s", }, +// { .member = "ConditionTimestamp", .signature = "t", }, +// { .member = "ConditionResult", .signature = "b", }, +// { .member = "Conditions", .signature = "a(sbbsi)", }, +// { .member = "AssertTimestamp", .signature = "t", }, +// { .member = "AssertResult", .signature = "b", }, +// { .member = "Asserts", .signature = "a(sbbsi)", }, +// { .member = "NextElapseUSecRealtime", .signature = "t", }, +// { .member = "NextElapseUSecMonotonic", .signature = "t", }, +// { .member = "NAccepted", .signature = "u", }, +// { .member = "NConnections", .signature = "u", }, +// { .member = "NRefused", .signature = "u", }, +// { .member = "Accept", .signature = "b", }, +// { .member = "Listen", .signature = "a(ss)", }, +// { .member = "SysFSPath", .signature = "s", }, +// { .member = "Where", .signature = "s", }, +// { .member = "What", .signature = "s", }, +// { .member = "MemoryCurrent", .signature = "t", }, +// { .member = "MemoryAvailable", .signature = "t", }, +// { .member = "DefaultMemoryMin", .signature = "t", }, +// { .member = "DefaultMemoryLow", .signature = "t", }, +// { .member = "DefaultStartupMemoryLow", .signature = "t", }, +// { .member = "MemoryMin", .signature = "t", }, +// { .member = "MemoryLow", .signature = "t", }, +// { .member = "StartupMemoryLow", .signature = "t", }, +// { .member = "MemoryHigh", .signature = "t", }, +// { .member = "StartupMemoryHigh", .signature = "t", }, +// { .member = "MemoryMax", .signature = "t", }, +// { .member = "StartupMemoryMax", .signature = "t", }, +// { .member = "MemorySwapMax", .signature = "t", }, +// { .member = "StartupMemorySwapMax", .signature = "t", }, +// { .member = "MemoryZSwapMax", .signature = "t", }, +// { .member = "StartupMemoryZSwapMax", .signature = "t", }, +// { .member = "MemoryLimit", .signature = "t", }, +// { .member = "CPUUsageNSec", .signature = "t", }, +// { .member = "TasksCurrent", .signature = "t", }, +// { .member = "TasksMax", .signature = "t", }, +// { .member = "IPIngressBytes", .signature = "t", }, +// { .member = "IPEgressBytes", .signature = "t", }, +// { .member = "IOReadBytes", .signature = "t", }, +// { .member = "IOWriteBytes", .signature = "t", }, +// { .member = "ExecCondition", .signature = "a(sasbttttuii)", }, +// { .member = "ExecConditionEx", .signature = "a(sasasttttuii)", }, +// { .member = "ExecStartPre", .signature = "a(sasbttttuii)", }, +// { .member = "ExecStartPreEx", .signature = "a(sasasttttuii)", }, +// { .member = "ExecStart", .signature = "a(sasbttttuii)", }, +// { .member = "ExecStartEx", .signature = "a(sasasttttuii)", }, +// { .member = "ExecStartPost", .signature = "a(sasbttttuii)", }, +// { .member = "ExecStartPostEx", .signature = "a(sasasttttuii)", }, +// { .member = "ExecReload", .signature = "a(sasbttttuii)", }, +// { .member = "ExecReloadEx", .signature = "a(sasasttttuii)", }, +// { .member = "ExecStopPre", .signature = "a(sasbttttuii)", }, +// { .member = "ExecStop", .signature = "a(sasbttttuii)", }, +// { .member = "ExecStopEx", .signature = "a(sasasttttuii)", }, +// { .member = "ExecStopPost", .signature = "a(sasbttttuii)", }, +// { .member = "ExecStopPostEx", .signature = "a(sasasttttuii)", }, +}; + +#define _UNIT_ATTRIBUTE_MAX (sizeof(unit_attributes) / sizeof(unit_attributes[0])) + +typedef struct UnitInfo { + char *id; + char *type; + char *description; + char *load_state; + char *active_state; + char *sub_state; + char *following; + char *unit_path; + uint32_t job_id; + char *job_type; + char *job_path; + + UnitType UnitType; + UnitLoadState UnitLoadState; + UnitActiveState UnitActiveState; + FreezerState FreezerState; + + union { + AutomountState AutomountState; + DeviceState DeviceState; + MountState MountState; + PathState PathState; + ScopeState ScopeState; + ServiceState ServiceState; + SliceState SliceState; + SocketState SocketState; + SwapState SwapState; + TargetState TargetState; + TimerState TimerState; + }; + + struct UnitAttribute attributes[_UNIT_ATTRIBUTE_MAX]; + + FACET_ROW_SEVERITY severity; + uint32_t prio; + + struct UnitInfo *prev, *next; +} UnitInfo; + +static void update_freezer_state(UnitInfo *u, UnitAttribute *ua) { + u->FreezerState = freezer_state_from_string(ua->str); +} + +// ---------------------------------------------------------------------------- +// common helpers + +static void log_dbus_error(int r, const char *msg) { + netdata_log_error("SYSTEMD_UNITS: %s failed with error %d (%s)", msg, r, strerror(-r)); +} + +// ---------------------------------------------------------------------------- +// attributes management + +static inline ssize_t unit_property_slot_from_string(const char *s) { + if(!s || !*s) + return -EINVAL; + + for(size_t i = 0; i < _UNIT_ATTRIBUTE_MAX ;i++) + if(streq_ptr(unit_attributes[i].member, s)) + return (ssize_t)i; + + return -EINVAL; +} + +static inline const char *unit_property_name_to_string_from_slot(ssize_t i) { + if(i >= 0 && i < (ssize_t)_UNIT_ATTRIBUTE_MAX) + return unit_attributes[i].member; + + return NULL; +} + +static inline void systemd_unit_free_property(char type, struct UnitAttribute *at) { + switch(type) { + case SD_BUS_TYPE_STRING: + case SD_BUS_TYPE_OBJECT_PATH: + freez(at->str); + at->str = NULL; + break; + + default: + break; + } +} + +static int systemd_unit_get_property(sd_bus_message *m, UnitInfo *u, const char *name) { + int r; + char type; + + r = sd_bus_message_peek_type(m, &type, NULL); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_peek_type()"); + return r; + } + + ssize_t slot = unit_property_slot_from_string(name); + if(slot < 0) { + // internal_error(true, "unused attribute '%s' for unit '%s'", name, u->id); + sd_bus_message_skip(m, NULL); + return 0; + } + + systemd_unit_free_property(unit_attributes[slot].value_type, &u->attributes[slot]); + + if(unit_attributes[slot].value_type != type) { + netdata_log_error("Type of field '%s' expected to be '%c' but found '%c'. Ignoring field.", + unit_attributes[slot].member, unit_attributes[slot].value_type, type); + sd_bus_message_skip(m, NULL); + return 0; + } + + switch (type) { + case SD_BUS_TYPE_OBJECT_PATH: + case SD_BUS_TYPE_STRING: { + char *s; + + r = sd_bus_message_read_basic(m, type, &s); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic()"); + return r; + } + + if(s && *s) + u->attributes[slot].str = strdupz(s); + } + break; + + case SD_BUS_TYPE_BOOLEAN: { + r = sd_bus_message_read_basic(m, type, &u->attributes[slot].boolean); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic()"); + return r; + } + } + break; + + case SD_BUS_TYPE_UINT64: { + r = sd_bus_message_read_basic(m, type, &u->attributes[slot].uint64); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic()"); + return r; + } + } + break; + + case SD_BUS_TYPE_INT64: { + r = sd_bus_message_read_basic(m, type, &u->attributes[slot].int64); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic()"); + return r; + } + } + break; + + case SD_BUS_TYPE_UINT32: { + r = sd_bus_message_read_basic(m, type, &u->attributes[slot].uint32); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic()"); + return r; + } + } + break; + + case SD_BUS_TYPE_INT32: { + r = sd_bus_message_read_basic(m, type, &u->attributes[slot].int32); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic()"); + return r; + } + } + break; + + case SD_BUS_TYPE_DOUBLE: { + r = sd_bus_message_read_basic(m, type, &u->attributes[slot].dbl); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic()"); + return r; + } + } + break; + + case SD_BUS_TYPE_ARRAY: { + internal_error(true, "member '%s' is an array", name); + sd_bus_message_skip(m, NULL); + return 0; + } + break; + + default: { + internal_error(true, "unknown field type '%c' for key '%s'", type, name); + sd_bus_message_skip(m, NULL); + return 0; + } + break; + } + + if(unit_attributes[slot].handler) + unit_attributes[slot].handler(u, &u->attributes[slot]); + + return 0; +} + +static int systemd_unit_get_all_properties(sd_bus *bus, UnitInfo *u) { + _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL; + _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; + int r; + + r = sd_bus_call_method(bus, + "org.freedesktop.systemd1", + u->unit_path, + "org.freedesktop.DBus.Properties", + "GetAll", + &error, + &m, + "s", ""); + if (r < 0) { + log_dbus_error(r, "sd_bus_call_method(p1)"); + return r; + } + + r = sd_bus_message_enter_container(m, SD_BUS_TYPE_ARRAY, "{sv}"); + if (r < 0) { + log_dbus_error(r, "sd_bus_message_enter_container(p2)"); + return r; + } + + int c = 0; + while ((r = sd_bus_message_enter_container(m, SD_BUS_TYPE_DICT_ENTRY, "sv")) > 0) { + const char *member, *contents; + c++; + + r = sd_bus_message_read_basic(m, SD_BUS_TYPE_STRING, &member); + if (r < 0) { + log_dbus_error(r, "sd_bus_message_read_basic(p3)"); + return r; + } + + r = sd_bus_message_peek_type(m, NULL, &contents); + if (r < 0) { + log_dbus_error(r, "sd_bus_message_peek_type(p4)"); + return r; + } + + r = sd_bus_message_enter_container(m, SD_BUS_TYPE_VARIANT, contents); + if (r < 0) { + log_dbus_error(r, "sd_bus_message_enter_container(p5)"); + return r; + } + + systemd_unit_get_property(m, u, member); + + r = sd_bus_message_exit_container(m); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_exit_container(p6)"); + return r; + } + + r = sd_bus_message_exit_container(m); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_exit_container(p7)"); + return r; + } + } + if(r < 0) { + log_dbus_error(r, "sd_bus_message_enter_container(p8)"); + return r; + } + + r = sd_bus_message_exit_container(m); + if(r < 0) { + log_dbus_error(r, "sd_bus_message_exit_container(p9)"); + return r; + } + + return 0; +} + +static void systemd_units_get_all_properties(sd_bus *bus, UnitInfo *base) { + for(UnitInfo *u = base ; u ;u = u->next) + systemd_unit_get_all_properties(bus, u); +} + + + +// ---------------------------------------------------------------------------- +// main unit info + +int bus_parse_unit_info(sd_bus_message *message, UnitInfo *u) { + assert(message); + assert(u); + + u->type = NULL; + + int r = sd_bus_message_read( + message, + SYSTEMD_UNITS_DBUS_TYPES, + &u->id, + &u->description, + &u->load_state, + &u->active_state, + &u->sub_state, + &u->following, + &u->unit_path, + &u->job_id, + &u->job_type, + &u->job_path); + + if(r <= 0) + return r; + + char *dot; + if(u->id && (dot = strrchr(u->id, '.')) != NULL) + u->type = &dot[1]; + else + u->type = "unknown"; + + u->UnitType = unit_type_from_string(u->type); + u->UnitLoadState = unit_load_state_from_string(u->load_state); + u->UnitActiveState = unit_active_state_from_string(u->active_state); + + switch(u->UnitType) { + case UNIT_SERVICE: + u->ServiceState = service_state_from_string(u->sub_state); + break; + + case UNIT_MOUNT: + u->MountState = mount_state_from_string(u->sub_state); + break; + + case UNIT_SWAP: + u->SwapState = swap_state_from_string(u->sub_state); + break; + + case UNIT_SOCKET: + u->SocketState = socket_state_from_string(u->sub_state); + break; + + case UNIT_TARGET: + u->TargetState = target_state_from_string(u->sub_state); + break; + + case UNIT_DEVICE: + u->DeviceState = device_state_from_string(u->sub_state); + break; + + case UNIT_AUTOMOUNT: + u->AutomountState = automount_state_from_string(u->sub_state); + break; + + case UNIT_TIMER: + u->TimerState = timer_state_from_string(u->sub_state); + break; + + case UNIT_PATH: + u->PathState = path_state_from_string(u->sub_state); + break; + + case UNIT_SLICE: + u->SliceState = slice_state_from_string(u->sub_state); + break; + + case UNIT_SCOPE: + u->ScopeState = scope_state_from_string(u->sub_state); + break; + + default: + break; + } + + return r; +} + +static int hex_to_int(char c) { + if (c >= '0' && c <= '9') return c - '0'; + if (c >= 'a' && c <= 'f') return c - 'a' + 10; + if (c >= 'A' && c <= 'F') return c - 'A' + 10; + return 0; +} + +// un-escape hex sequences (\xNN) in id +static void txt_decode(char *txt) { + if(!txt || !*txt) + return; + + char *src = txt, *dst = txt; + + size_t id_len = strlen(src); + size_t s = 0, d = 0; + for(; s < id_len ; s++) { + if(src[s] == '\\' && src[s + 1] == 'x' && isxdigit(src[s + 2]) && isxdigit(src[s + 3])) { + int value = (hex_to_int(src[s + 2]) << 4) + hex_to_int(src[s + 3]); + dst[d++] = (char)value; + s += 3; + } + else + dst[d++] = src[s]; + } + dst[d] = '\0'; +} + +static UnitInfo *systemd_units_get_all(void) { + _cleanup_(sd_bus_unrefp) sd_bus *bus = NULL; + _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; + _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; + + UnitInfo *base = NULL; + int r; + + r = sd_bus_default_system(&bus); + if (r < 0) { + log_dbus_error(r, "sd_bus_default_system()"); + return base; + } + + // This calls the ListUnits method of the org.freedesktop.systemd1.Manager interface + // Replace "ListUnits" with "ListUnitsFiltered" to get specific units based on filters + r = sd_bus_call_method(bus, + "org.freedesktop.systemd1", /* service to contact */ + "/org/freedesktop/systemd1", /* object path */ + "org.freedesktop.systemd1.Manager", /* interface name */ + "ListUnits", /* method name */ + &error, /* object to return error in */ + &reply, /* return message on success */ + NULL); /* input signature */ + if (r < 0) { + log_dbus_error(r, "sd_bus_call_method()"); + return base; + } + + r = sd_bus_message_enter_container(reply, SD_BUS_TYPE_ARRAY, SYSTEMD_UNITS_DBUS_TYPES); + if (r < 0) { + log_dbus_error(r, "sd_bus_message_enter_container()"); + return base; + } + + UnitInfo u; + memset(&u, 0, sizeof(u)); + while ((r = bus_parse_unit_info(reply, &u)) > 0) { + UnitInfo *i = callocz(1, sizeof(u)); + *i = u; + + i->id = strdupz(u.id && *u.id ? u.id : "-"); + txt_decode(i->id); + + i->type = strdupz(u.type && *u.type ? u.type : "-"); + i->description = strdupz(u.description && *u.description ? u.description : "-"); + txt_decode(i->description); + + i->load_state = strdupz(u.load_state && *u.load_state ? u.load_state : "-"); + i->active_state = strdupz(u.active_state && *u.active_state ? u.active_state : "-"); + i->sub_state = strdupz(u.sub_state && *u.sub_state ? u.sub_state : "-"); + i->following = strdupz(u.following && *u.following ? u.following : "-"); + i->unit_path = strdupz(u.unit_path && *u.unit_path ? u.unit_path : "-"); + i->job_type = strdupz(u.job_type && *u.job_type ? u.job_type : "-"); + i->job_path = strdupz(u.job_path && *u.job_path ? u.job_path : "-"); + i->job_id = u.job_id; + + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, i, prev, next); + memset(&u, 0, sizeof(u)); + } + if (r < 0) { + log_dbus_error(r, "sd_bus_message_read()"); + return base; + } + + r = sd_bus_message_exit_container(reply); + if (r < 0) { + log_dbus_error(r, "sd_bus_message_exit_container()"); + return base; + } + + systemd_units_get_all_properties(bus, base); + + return base; +} + +void systemd_units_free_all(UnitInfo *base) { + while(base) { + UnitInfo *u = base; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(base, u, prev, next); + freez((void *)u->id); + freez((void *)u->type); + freez((void *)u->description); + freez((void *)u->load_state); + freez((void *)u->active_state); + freez((void *)u->sub_state); + freez((void *)u->following); + freez((void *)u->unit_path); + freez((void *)u->job_type); + freez((void *)u->job_path); + + for(int i = 0; i < (ssize_t)_UNIT_ATTRIBUTE_MAX ;i++) + systemd_unit_free_property(unit_attributes[i].value_type, &u->attributes[i]); + + freez(u); + } +} + +// ---------------------------------------------------------------------------- + +static void netdata_systemd_units_function_help(const char *transaction) { + BUFFER *wb = buffer_create(0, NULL); + buffer_sprintf(wb, + "%s / %s\n" + "\n" + "%s\n" + "\n" + "The following parameters are supported:\n" + "\n" + " help\n" + " Shows this help message.\n" + "\n" + " info\n" + " Request initial configuration information about the plugin.\n" + " The key entity returned is the required_params array, which includes\n" + " all the available systemd journal sources.\n" + " When `info` is requested, all other parameters are ignored.\n" + "\n" + , program_name + , SYSTEMD_UNITS_FUNCTION_NAME + , SYSTEMD_UNITS_FUNCTION_DESCRIPTION + ); + + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + netdata_mutex_unlock(&stdout_mutex); + + buffer_free(wb); +} + +static void netdata_systemd_units_function_info(const char *transaction) { + BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_string(wb, "help", SYSTEMD_UNITS_FUNCTION_DESCRIPTION); + + buffer_json_finalize(wb); + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + netdata_mutex_unlock(&stdout_mutex); + + buffer_free(wb); +} + +// ---------------------------------------------------------------------------- + +static void systemd_unit_priority(UnitInfo *u, size_t units) { + uint32_t prio; + + switch(u->severity) { + case FACET_ROW_SEVERITY_CRITICAL: + prio = 0; + break; + + default: + case FACET_ROW_SEVERITY_WARNING: + prio = 1; + break; + + case FACET_ROW_SEVERITY_NOTICE: + prio = 2; + break; + + case FACET_ROW_SEVERITY_NORMAL: + prio = 3; + break; + + case FACET_ROW_SEVERITY_DEBUG: + prio = 4; + break; + } + + prio = prio * (uint32_t)(_UNIT_TYPE_MAX + 1) + (uint32_t)u->UnitType; + u->prio = (prio * units) + u->prio; +} + +#define if_less(current, max, target) ({ \ + typeof(current) _wanted = (current); \ + if((current) < (target)) \ + _wanted = (target) > (max) ? (max) : (target); \ + _wanted; \ +}) + +#define if_normal(current, max, target) ({ \ + typeof(current) _wanted = (current); \ + if((current) == FACET_ROW_SEVERITY_NORMAL) \ + _wanted = (target) > (max) ? (max) : (target); \ + _wanted; \ +}) + +FACET_ROW_SEVERITY system_unit_severity(UnitInfo *u) { + FACET_ROW_SEVERITY severity, max_severity; + + switch(u->UnitLoadState) { + case UNIT_ERROR: + case UNIT_BAD_SETTING: + severity = FACET_ROW_SEVERITY_CRITICAL; + max_severity = FACET_ROW_SEVERITY_CRITICAL; + break; + + default: + severity = FACET_ROW_SEVERITY_WARNING; + max_severity = FACET_ROW_SEVERITY_CRITICAL; + break; + + case UNIT_NOT_FOUND: + severity = FACET_ROW_SEVERITY_NOTICE; + max_severity = FACET_ROW_SEVERITY_NOTICE; + break; + + case UNIT_LOADED: + severity = FACET_ROW_SEVERITY_NORMAL; + max_severity = FACET_ROW_SEVERITY_CRITICAL; + break; + + case UNIT_MERGED: + case UNIT_MASKED: + case UNIT_STUB: + severity = FACET_ROW_SEVERITY_DEBUG; + max_severity = FACET_ROW_SEVERITY_DEBUG; + break; + } + + switch(u->UnitActiveState) { + case UNIT_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + case UNIT_RELOADING: + case UNIT_ACTIVATING: + case UNIT_DEACTIVATING: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case UNIT_MAINTENANCE: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE); + break; + + case UNIT_ACTIVE: + break; + + case UNIT_INACTIVE: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + + switch(u->FreezerState) { + default: + case FREEZER_FROZEN: + case FREEZER_FREEZING: + case FREEZER_THAWING: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case FREEZER_RUNNING: + break; + } + + switch(u->UnitType) { + case UNIT_SERVICE: + switch(u->ServiceState) { + case SERVICE_FAILED: + case SERVICE_FAILED_BEFORE_AUTO_RESTART: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + case SERVICE_STOP: + case SERVICE_STOP_WATCHDOG: + case SERVICE_STOP_SIGTERM: + case SERVICE_STOP_SIGKILL: + case SERVICE_STOP_POST: + case SERVICE_FINAL_WATCHDOG: + case SERVICE_FINAL_SIGTERM: + case SERVICE_FINAL_SIGKILL: + case SERVICE_AUTO_RESTART: + case SERVICE_AUTO_RESTART_QUEUED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case SERVICE_CONDITION: + case SERVICE_START_PRE: + case SERVICE_START: + case SERVICE_START_POST: + case SERVICE_RELOAD: + case SERVICE_RELOAD_SIGNAL: + case SERVICE_RELOAD_NOTIFY: + case SERVICE_DEAD_RESOURCES_PINNED: + case SERVICE_CLEANING: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE); + break; + + case SERVICE_EXITED: + case SERVICE_RUNNING: + break; + + case SERVICE_DEAD: + case SERVICE_DEAD_BEFORE_AUTO_RESTART: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_MOUNT: + switch(u->MountState) { + case MOUNT_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + case MOUNT_REMOUNTING_SIGTERM: + case MOUNT_REMOUNTING_SIGKILL: + case MOUNT_UNMOUNTING_SIGTERM: + case MOUNT_UNMOUNTING_SIGKILL: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case MOUNT_MOUNTING: + case MOUNT_MOUNTING_DONE: + case MOUNT_REMOUNTING: + case MOUNT_UNMOUNTING: + case MOUNT_CLEANING: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE); + break; + + case MOUNT_MOUNTED: + break; + + case MOUNT_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_SWAP: + switch(u->SwapState) { + case SWAP_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + case SWAP_DEACTIVATING_SIGTERM: + case SWAP_DEACTIVATING_SIGKILL: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case SWAP_ACTIVATING: + case SWAP_ACTIVATING_DONE: + case SWAP_DEACTIVATING: + case SWAP_CLEANING: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE); + break; + + case SWAP_ACTIVE: + break; + + case SWAP_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_SOCKET: + switch(u->SocketState) { + case SOCKET_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + case SOCKET_STOP_PRE_SIGTERM: + case SOCKET_STOP_PRE_SIGKILL: + case SOCKET_FINAL_SIGTERM: + case SOCKET_FINAL_SIGKILL: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case SOCKET_START_PRE: + case SOCKET_START_CHOWN: + case SOCKET_START_POST: + case SOCKET_STOP_PRE: + case SOCKET_STOP_POST: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE); + break; + + case SOCKET_RUNNING: + case SOCKET_LISTENING: + break; + + case SOCKET_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_TARGET: + switch(u->TargetState) { + default: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case TARGET_ACTIVE: + break; + + case TARGET_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_DEVICE: + switch(u->DeviceState) { + default: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case DEVICE_TENTATIVE: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE); + break; + + case DEVICE_PLUGGED: + break; + + case DEVICE_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_AUTOMOUNT: + switch(u->AutomountState) { + case AUTOMOUNT_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case AUTOMOUNT_WAITING: + case AUTOMOUNT_RUNNING: + break; + + case AUTOMOUNT_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_TIMER: + switch(u->TimerState) { + case TIMER_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + case TIMER_ELAPSED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case TIMER_WAITING: + case TIMER_RUNNING: + break; + + case TIMER_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_PATH: + switch(u->PathState) { + case PATH_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case PATH_WAITING: + case PATH_RUNNING: + break; + + case PATH_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_SLICE: + switch(u->SliceState) { + default: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case SLICE_ACTIVE: + break; + + case SLICE_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + case UNIT_SCOPE: + switch(u->ScopeState) { + case SCOPE_FAILED: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL); + break; + + default: + case SCOPE_STOP_SIGTERM: + case SCOPE_STOP_SIGKILL: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + + case SCOPE_ABANDONED: + case SCOPE_START_CHOWN: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE); + break; + + case SCOPE_RUNNING: + break; + + case SCOPE_DEAD: + severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG); + break; + } + break; + + default: + severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING); + break; + } + + u->severity = severity; + return severity; +} + +int unit_info_compar(const void *a, const void *b) { + UnitInfo *u1 = *((UnitInfo **)a); + UnitInfo *u2 = *((UnitInfo **)b); + + return strcasecmp(u1->id, u2->id); +} + +void systemd_units_assign_priority(UnitInfo *base) { + size_t units = 0, c = 0, prio = 0; + for(UnitInfo *u = base; u ; u = u->next) + units++; + + UnitInfo *array[units]; + for(UnitInfo *u = base; u ; u = u->next) + array[c++] = u; + + qsort(array, units, sizeof(UnitInfo *), unit_info_compar); + + for(c = 0; c < units ; c++) { + array[c]->prio = prio++; + system_unit_severity(array[c]); + systemd_unit_priority(array[c], units); + } +} + +void function_systemd_units(const char *transaction, char *function, int timeout, bool *cancelled) { + char *words[SYSTEMD_UNITS_MAX_PARAMS] = { NULL }; + size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_UNITS_MAX_PARAMS); + for(int i = 1; i < SYSTEMD_UNITS_MAX_PARAMS ;i++) { + char *keyword = get_word(words, num_words, i); + if(!keyword) break; + + if(strcmp(keyword, "info") == 0) { + netdata_systemd_units_function_info(transaction); + return; + } + else if(strcmp(keyword, "help") == 0) { + netdata_systemd_units_function_help(transaction); + return; + } + } + + UnitInfo *base = systemd_units_get_all(); + systemd_units_assign_priority(base); + + BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", 10); + buffer_json_member_add_string(wb, "help", SYSTEMD_UNITS_FUNCTION_DESCRIPTION); + buffer_json_member_add_array(wb, "data"); + + size_t count[_UNIT_ATTRIBUTE_MAX] = { 0 }; + struct UnitAttribute max[_UNIT_ATTRIBUTE_MAX]; + + for(UnitInfo *u = base; u ;u = u->next) { + buffer_json_add_array_item_array(wb); + { + buffer_json_add_array_item_string(wb, u->id); + + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "severity", facets_severity_to_string(u->severity)); + } + buffer_json_object_close(wb); + + buffer_json_add_array_item_string(wb, u->type); + buffer_json_add_array_item_string(wb, u->description); + buffer_json_add_array_item_string(wb, u->load_state); + buffer_json_add_array_item_string(wb, u->active_state); + buffer_json_add_array_item_string(wb, u->sub_state); + buffer_json_add_array_item_string(wb, u->following); + buffer_json_add_array_item_string(wb, u->unit_path); + buffer_json_add_array_item_uint64(wb, u->job_id); + buffer_json_add_array_item_string(wb, u->job_type); + buffer_json_add_array_item_string(wb, u->job_path); + + for(ssize_t i = 0; i < (ssize_t)_UNIT_ATTRIBUTE_MAX ;i++) { + switch(unit_attributes[i].value_type) { + case SD_BUS_TYPE_OBJECT_PATH: + case SD_BUS_TYPE_STRING: + buffer_json_add_array_item_string(wb, u->attributes[i].str && *u->attributes[i].str ? u->attributes[i].str : "-"); + break; + + case SD_BUS_TYPE_UINT64: + buffer_json_add_array_item_uint64(wb, u->attributes[i].uint64); + if(!count[i]++) max[i].uint64 = 0; + max[i].uint64 = MAX(max[i].uint64, u->attributes[i].uint64); + break; + + case SD_BUS_TYPE_UINT32: + buffer_json_add_array_item_uint64(wb, u->attributes[i].uint32); + if(!count[i]++) max[i].uint32 = 0; + max[i].uint32 = MAX(max[i].uint32, u->attributes[i].uint32); + break; + + case SD_BUS_TYPE_INT64: + buffer_json_add_array_item_uint64(wb, u->attributes[i].int64); + if(!count[i]++) max[i].uint64 = 0; + max[i].int64 = MAX(max[i].int64, u->attributes[i].int64); + break; + + case SD_BUS_TYPE_INT32: + buffer_json_add_array_item_uint64(wb, u->attributes[i].int32); + if(!count[i]++) max[i].int32 = 0; + max[i].int32 = MAX(max[i].int32, u->attributes[i].int32); + break; + + case SD_BUS_TYPE_DOUBLE: + buffer_json_add_array_item_double(wb, u->attributes[i].dbl); + if(!count[i]++) max[i].dbl = 0.0; + max[i].dbl = MAX(max[i].dbl, u->attributes[i].dbl); + break; + + case SD_BUS_TYPE_BOOLEAN: + buffer_json_add_array_item_boolean(wb, u->attributes[i].boolean); + break; + + default: + break; + } + } + + buffer_json_add_array_item_uint64(wb, u->prio); + buffer_json_add_array_item_uint64(wb, 1); // count + } + buffer_json_array_close(wb); + } + + buffer_json_array_close(wb); // data + + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + buffer_rrdf_table_add_field(wb, field_id++, "id", "Unit ID", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + + buffer_rrdf_table_add_field( + wb, field_id++, + "rowOptions", "rowOptions", + RRDF_FIELD_TYPE_NONE, + RRDR_FIELD_VISUAL_ROW_OPTIONS, + RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_FIXED, + NULL, + RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_DUMMY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "type", "Unit Type", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "description", "Unit Description", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "loadState", "Unit Load State", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "activeState", "Unit Active State", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "subState", "Unit Sub State", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "following", "Unit Following", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_WRAP, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "path", "Unit Path", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "jobId", "Unit Job ID", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "jobType", "Unit Job Type", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "jobPath", "Unit Job Path", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH, + NULL); + + for(ssize_t i = 0; i < (ssize_t)_UNIT_ATTRIBUTE_MAX ;i++) { + char key[256], name[256]; + + if(unit_attributes[i].show_as) + snprintfz(key, sizeof(key), "%s", unit_attributes[i].show_as); + else + snprintfz(key, sizeof(key), "attribute%s", unit_property_name_to_string_from_slot(i)); + + if(unit_attributes[i].info) + snprintfz(name, sizeof(name), "%s", unit_attributes[i].info); + else + snprintfz(name, sizeof(name), "Attribute %s", unit_property_name_to_string_from_slot(i)); + + RRDF_FIELD_OPTIONS options = unit_attributes[i].options; + RRDF_FIELD_FILTER filter = unit_attributes[i].filter; + + switch(unit_attributes[i].value_type) { + case SD_BUS_TYPE_OBJECT_PATH: + case SD_BUS_TYPE_STRING: + buffer_rrdf_table_add_field(wb, field_id++, key, name, + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, filter, + RRDF_FIELD_OPTS_WRAP | options, + NULL); + break; + + case SD_BUS_TYPE_INT32: + case SD_BUS_TYPE_UINT32: + case SD_BUS_TYPE_INT64: + case SD_BUS_TYPE_UINT64: { + double m; + if(unit_attributes[i].value_type == SD_BUS_TYPE_UINT64) + m = (double)max[i].uint64; + else if(unit_attributes[i].value_type == SD_BUS_TYPE_INT64) + m = (double)max[i].int64; + else if(unit_attributes[i].value_type == SD_BUS_TYPE_UINT32) + m = (double)max[i].uint32; + else if(unit_attributes[i].value_type == SD_BUS_TYPE_INT32) + m = (double)max[i].int32; + + buffer_rrdf_table_add_field(wb, field_id++, key, name, + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, m, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, filter, + RRDF_FIELD_OPTS_WRAP | options, + NULL); + } + break; + + case SD_BUS_TYPE_DOUBLE: + buffer_rrdf_table_add_field(wb, field_id++, key, name, + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 2, NULL, max[i].dbl, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, filter, + RRDF_FIELD_OPTS_WRAP | options, + NULL); + break; + + case SD_BUS_TYPE_BOOLEAN: + buffer_rrdf_table_add_field(wb, field_id++, key, name, + RRDF_FIELD_TYPE_BOOLEAN, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, filter, + RRDF_FIELD_OPTS_WRAP | options, + NULL); + break; + + default: + break; + } + + } + + buffer_rrdf_table_add_field(wb, field_id++, "priority", "Priority", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "count", "Count", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE, + RRDF_FIELD_OPTS_NONE, + NULL); + } + + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "priority"); + + buffer_json_member_add_object(wb, "charts"); + { + buffer_json_member_add_object(wb, "count"); + { + buffer_json_member_add_string(wb, "name", "count"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "count"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "count"); + buffer_json_add_array_item_string(wb, "activeState"); + buffer_json_array_close(wb); + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "count"); + buffer_json_add_array_item_string(wb, "subState"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_object(wb, "group_by"); + { + buffer_json_member_add_object(wb, "type"); + { + buffer_json_member_add_string(wb, "name", "Top Down Tree"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "type"); + buffer_json_add_array_item_string(wb, "loadState"); + buffer_json_add_array_item_string(wb, "activeState"); + buffer_json_add_array_item_string(wb, "subState"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "subState"); + { + buffer_json_member_add_string(wb, "name", "Bottom Up Tree"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "subState"); + buffer_json_add_array_item_string(wb, "activeState"); + buffer_json_add_array_item_string(wb, "loadState"); + buffer_json_add_array_item_string(wb, "type"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // group_by + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_realtime_sec() + 1, wb); + netdata_mutex_unlock(&stdout_mutex); + + buffer_free(wb); + systemd_units_free_all(base); +} + +#endif // ENABLE_SYSTEMD_DBUS diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md deleted file mode 100644 index de5fd4743ceee6..00000000000000 --- a/collectors/tc.plugin/README.md +++ /dev/null @@ -1,209 +0,0 @@ - - -# tc.plugin - -Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** ! - -![qos](https://cloud.githubusercontent.com/assets/2662304/14439411/b7f36254-0033-11e6-93f0-c739bb6a1c3a.gif) - -Netdata monitors `tc` QoS classes for all interfaces. - -If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect interface and class names. - -There is a [shell helper](https://raw.githubusercontent.com/netdata/netdata/master/collectors/tc.plugin/tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output). - -The source of the tc plugin is [here](https://raw.githubusercontent.com/netdata/netdata/master/collectors/tc.plugin/plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates. - -## Motivation - -One category of metrics missing in Linux monitoring, is bandwidth consumption for each open socket (inbound and outbound traffic). So, you cannot tell how much bandwidth your web server, your database server, your backup, your ssh sessions, etc are using. - -To solve this problem, the most *adventurous* Linux monitoring tools install kernel modules to capture all traffic, analyze it and provide reports per application. A lot of work, CPU intensive and with a great degree of risk (due to the kernel modules involved which might affect the stability of the whole system). Not to mention that such solutions are probably better suited for a core linux router in your network. - -Others use NFACCT, the netfilter accounting module which is already part of the Linux firewall. However, this would require configuring a firewall on every system you want to measure bandwidth (just FYI, I do install a firewall on every server - and I strongly advise you to do so too - but configuring accounting on all servers seems overkill when you don't really need it for billing purposes). - -**There is however a much simpler approach**. - -## QoS - -One of the features the Linux kernel has, but it is rarely used, is its ability to **apply QoS on traffic**. Even most interesting is that it can apply QoS to **both inbound and outbound traffic**. - -QoS is about 2 features: - -1. **Classify traffic** - - Classification is the process of organizing traffic in groups, called **classes**. Classification can evaluate every aspect of network packets, like source and destination ports, source and destination IPs, netfilter marks, etc. - - When you classify traffic, you just assign a label to it. Of course classes have some properties themselves (like queuing mechanisms), but let's say it is that simple: **a label**. For example **I call `web server` traffic, the traffic from my server's tcp/80, tcp/443 and to my server's tcp/80, tcp/443, while I call `web surfing` all other tcp/80 and tcp/443 traffic**. You can use any combinations you like. There is no limit. - -2. **Apply traffic shaping rules to these classes** - - Traffic shaping is used to control how network interface bandwidth should be shared among the classes. Normally, you need to do this, when there is not enough bandwidth to satisfy all the demand, or when you want to control the supply of bandwidth to certain services. Of course classification is sufficient for monitoring traffic, but traffic shaping is also quite important, as we will explain in the next section. - -## Why you want QoS - -1. **Monitoring the bandwidth used by services** - - Netdata provides wonderful real-time charts, like this one (wait to see the orange `rsync` part): - - ![qos3](https://cloud.githubusercontent.com/assets/2662304/14474189/713ede84-0104-11e6-8c9c-8dca5c2abd63.gif) - -2. **Ensure sensitive administrative tasks will not starve for bandwidth** - - Have you tried to ssh to a server when the network is congested? If you have, you already know it does not work very well. QoS can guarantee that services like ssh, dns, ntp, etc will always have a small supply of bandwidth. So, no matter what happens, you will be able to ssh to your server and DNS will always work. - -3. **Ensure administrative tasks will not monopolize all the bandwidth** - - Services like backups, file copies, database dumps, etc can easily monopolize all the available bandwidth. It is common for example a nightly backup, or a huge file transfer to negatively influence the end-user experience. QoS can fix that. - -4. **Ensure each end-user connection will get a fair cut of the available bandwidth.** - - Several QoS queuing disciplines in Linux do this automatically, without any configuration from you. The result is that new sockets are favored over older ones, so that users will get a snappier experience, while others are transferring large amounts of traffic. - -5. **Protect the servers from DDoS attacks.** - - When your system is under a DDoS attack, it will get a lot more bandwidth compared to the one it can handle and probably your applications will crash. Setting a limit on the inbound traffic using QoS, will protect your servers (throttle the requests) and depending on the size of the attack may allow your legitimate users to access the server, while the attack is taking place. - - Using QoS together with a [SYNPROXY](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md) will provide a great degree of protection against most DDoS attacks. Actually when I wrote that article, a few folks tried to DDoS the Netdata demo site to see in real-time the SYNPROXY operation. They did not do it right, but anyway a great deal of requests reached the Netdata server. What saved Netdata was QoS. The Netdata demo server has QoS installed, so the requests were throttled and the server did not even reach the point of resource starvation. Read about it [here](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md). - -On top of all these, QoS is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers. - -``` -- ensure administrative tasks (like ssh, dns, etc) will always have a small but guaranteed bandwidth. So, no matter what happens, I will be able to ssh to my server and DNS will work. - -- ensure other administrative tasks will not monopolize all the available bandwidth. So, my nightly backup will not hurt my users, a developer that is copying files over the net will not get all the available bandwidth, etc. - -- ensure each end-user connection will get a fair cut of the available bandwidth. -``` - -Once **traffic classification** is applied, we can use **[netdata](https://github.com/netdata/netdata)** to visualize the bandwidth consumption per class in real-time (no configuration is needed for Netdata - it will figure it out). - -QoS, is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers. - -This is QoS from a home linux router. Check these features: - -1. It is real-time (per second updates) -2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it. - -![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif) - ---- - -## QoS in Linux? - -Of course, `tc` is probably **the most undocumented, complicated and unfriendly** command in Linux. - -For example, do you know that for matching a simple port range in `tc`, e.g. all the high ports, from 1025 to 65535 inclusive, you have to match these: - -``` -1025/0xffff -1026/0xfffe -1028/0xfffc -1032/0xfff8 -1040/0xfff0 -1056/0xffe0 -1088/0xffc0 -1152/0xff80 -1280/0xff00 -1536/0xfe00 -2048/0xf800 -4096/0xf000 -8192/0xe000 -16384/0xc000 -32768/0x8000 -``` - -To do it the hard way, you can go through the [tc configuration steps](#qos-configuration-with-tc). An easier way is to use **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool that simplifies QoS management in Linux. - -## Qos Configuration with FireHOL - -The **[FireHOL](https://firehol.org/)** package already distributes **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**. Check the **[FireQOS tutorial](https://firehol.org/tutorial/fireqos-new-user/)** to learn how to write your own QoS configuration. - -With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following `/etc/firehol/fireqos.conf`, used at the Netdata demo site: - -```sh - # configure the Netdata ports - server_netdata_ports="tcp/19999" - - interface eth0 world bidirectional ethernet balanced rate 50Mbit - class arp - match arp - - class icmp - match icmp - - class dns commit 1Mbit - server dns - client dns - - class ntp - server ntp - client ntp - - class ssh commit 2Mbit - server ssh - client ssh - - class rsync commit 2Mbit max 10Mbit - server rsync - client rsync - - class web_server commit 40Mbit - server http - server netdata - - class client - client surfing - - class nms commit 1Mbit - match input src 10.2.3.5 -``` - -Nothing more is needed. You just run `fireqos start` to apply this configuration, restart Netdata and you have real-time visualization of the bandwidth consumption of your applications. FireQOS is not a daemon. It will just convert the configuration to `tc` commands. It will run them and it will exit. - -**IMPORTANT**: If you copy this configuration to apply it to your system, please adapt the speeds - experiment in non-production environments to learn the tool, before applying it on your servers. - -And this is what you are going to get: - -![image](https://cloud.githubusercontent.com/assets/2662304/14436322/c91d90a4-0024-11e6-9fb1-57cdef1580df.png) - -## QoS Configuration with tc - -First, setup the tc rules in rc.local using commands to assign different QoS markings to different classids. You can see one such example in [github issue #4563](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973). - -Then, map the classids to names by creating `/etc/iproute2/tc_cls`. For example: - -``` -2:1 Standard -2:8 LowPriorityData -2:10 HighThroughputData -2:16 OAM -2:18 LowLatencyData -2:24 BroadcastVideo -2:26 MultimediaStreaming -2:32 RealTimeInteractive -2:34 MultimediaConferencing -2:40 Signalling -2:46 Telephony -2:48 NetworkControl -``` - -Add the following configuration option in `/etc/netdata.conf`: - -```\[plugin:tc] - enable show all classes and qdiscs for all interfaces = yes -``` - -Finally, create `/etc/netdata/tc-qos-helper.conf` with this content: -`tc_show="class"` - -Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. - - diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md new file mode 120000 index 00000000000000..2a20ff2622e306 --- /dev/null +++ b/collectors/tc.plugin/README.md @@ -0,0 +1 @@ +integrations/tc_qos_classes.md \ No newline at end of file diff --git a/collectors/tc.plugin/integrations/tc_qos_classes.md b/collectors/tc.plugin/integrations/tc_qos_classes.md new file mode 100644 index 00000000000000..7a665066085aec --- /dev/null +++ b/collectors/tc.plugin/integrations/tc_qos_classes.md @@ -0,0 +1,171 @@ + + +# tc QoS classes + + + + + +Plugin: tc.plugin +Module: tc.plugin + + + +## Overview + +Examine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow. + +The plugin uses `tc` command to collect information about Traffic control. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`. + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per network device direction + +Metrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | The network interface. | +| device_name | The network interface name | +| group | The device family | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| tc.qos | a dimension per class | kilobits/s | +| tc.qos_packets | a dimension per class | packets/s | +| tc.qos_dropped | a dimension per class | packets/s | +| tc.qos_tokens | a dimension per class | tokens | +| tc.qos_ctokens | a dimension per class | ctokens | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Create `tc-qos-helper.conf` + +In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content: + +```conf +tc_show="class" +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:tc]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config option + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no | +| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no | + +
+ +#### Examples + +##### Basic + +A basic example configuration using classes defined in `/etc/iproute2/tc_cls`. + +An example of class IDs mapped to names in that file can be: + +```conf +2:1 Standard +2:8 LowPriorityData +2:10 HighThroughputData +2:16 OAM +2:18 LowLatencyData +2:24 BroadcastVideo +2:26 MultimediaStreaming +2:32 RealTimeInteractive +2:34 MultimediaConferencing +2:40 Signalling +2:46 Telephony +2:48 NetworkControl +``` + +You can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973). + + +```yaml +[plugin:tc] + script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh + enable show all classes and qdiscs for all interfaces = yes + +``` + diff --git a/collectors/tc.plugin/metadata.yaml b/collectors/tc.plugin/metadata.yaml index dcd03e470825e5..f4039a8c555f2c 100644 --- a/collectors/tc.plugin/metadata.yaml +++ b/collectors/tc.plugin/metadata.yaml @@ -36,7 +36,14 @@ modules: description: "" setup: prerequisites: - list: [] + list: + - title: Create `tc-qos-helper.conf` + description: | + In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content: + + ```conf + tc_show="class" + ``` configuration: file: name: "netdata.conf" @@ -52,16 +59,42 @@ modules: description: Path to script `tc-qos-helper.sh` default_value: "usr/libexec/netdata/plugins.d/tc-qos-helper.s" required: false + - name: enable show all classes and qdiscs for all interfaces + description: yes/no flag to control what data is presented. + default_value: "yes" + required: false examples: folding: enabled: false title: "Config" list: - name: Basic - description: A basic example configuration. + description: | + A basic example configuration using classes defined in `/etc/iproute2/tc_cls`. + + An example of class IDs mapped to names in that file can be: + + ```conf + 2:1 Standard + 2:8 LowPriorityData + 2:10 HighThroughputData + 2:16 OAM + 2:18 LowLatencyData + 2:24 BroadcastVideo + 2:26 MultimediaStreaming + 2:32 RealTimeInteractive + 2:34 MultimediaConferencing + 2:40 Signalling + 2:46 Telephony + 2:48 NetworkControl + ``` + + You can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973). + config: | [plugin:tc] script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh + enable show all classes and qdiscs for all interfaces = yes troubleshooting: problems: list: [] diff --git a/collectors/tc.plugin/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in index 97d4d016db9785..3298c39a30cf40 100755 --- a/collectors/tc.plugin/tc-qos-helper.sh.in +++ b/collectors/tc.plugin/tc-qos-helper.sh.in @@ -2,54 +2,113 @@ # netdata # real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis +# (C) 2023 Netdata Inc. # SPDX-License-Identifier: GPL-3.0-or-later # # This script is a helper to allow netdata collect tc data. # tc output parsing has been implemented in C, inside netdata # This script allows setting names to dimensions. -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin:@sbindir_POST@" export LC_ALL=C +cmd_line="'${0}' $(printf "'%s' " "${@}")" + # ----------------------------------------------------------------------------- -# logging functions +# logging -PROGRAM_NAME="$(basename "$0")" +PROGRAM_NAME="$(basename "${0}")" PROGRAM_NAME="${PROGRAM_NAME/.plugin/}" -logdate() { - date "+%Y-%m-%d %H:%M:%S" +# these should be the same with syslog() priorities +NDLP_EMERG=0 # system is unusable +NDLP_ALERT=1 # action must be taken immediately +NDLP_CRIT=2 # critical conditions +NDLP_ERR=3 # error conditions +NDLP_WARN=4 # warning conditions +NDLP_NOTICE=5 # normal but significant condition +NDLP_INFO=6 # informational +NDLP_DEBUG=7 # debug-level messages + +# the max (numerically) log level we will log +LOG_LEVEL=$NDLP_INFO + +set_log_min_priority() { + case "${NETDATA_LOG_LEVEL,,}" in + "emerg" | "emergency") + LOG_LEVEL=$NDLP_EMERG + ;; + + "alert") + LOG_LEVEL=$NDLP_ALERT + ;; + + "crit" | "critical") + LOG_LEVEL=$NDLP_CRIT + ;; + + "err" | "error") + LOG_LEVEL=$NDLP_ERR + ;; + + "warn" | "warning") + LOG_LEVEL=$NDLP_WARN + ;; + + "notice") + LOG_LEVEL=$NDLP_NOTICE + ;; + + "info") + LOG_LEVEL=$NDLP_INFO + ;; + + "debug") + LOG_LEVEL=$NDLP_DEBUG + ;; + esac } -log() { - local status="${1}" - shift +set_log_min_priority - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" +log() { + local level="${1}" + shift 1 + + [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return + + systemd-cat-native --log-as-netdata --newline="--NEWLINE--" < - -# timex.plugin - -This plugin monitors the system kernel clock synchronization state. - -This plugin creates the following charts: - -- System clock synchronization state according to the system kernel -- System clock status which gives the value of the `time_status` variable in the kernel -- Computed time offset between local system and reference clock - -This is obtained from the information provided by the [ntp_adjtime()](https://man7.org/linux/man-pages/man2/adjtimex.2.html) system call. -An unsynchronized clock may indicate a hardware clock error, or an issue with UTC synchronization. - -## Configuration - -Edit the `netdata.conf` configuration file using [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) from the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`. - -Scroll down to the `[plugin:timex]` section to find the available options: - -```ini -[plugin:timex] - # update every = 1 - # clock synchronization state = yes - # time offset = yes -``` diff --git a/collectors/timex.plugin/README.md b/collectors/timex.plugin/README.md new file mode 120000 index 00000000000000..89c1bd0d410674 --- /dev/null +++ b/collectors/timex.plugin/README.md @@ -0,0 +1 @@ +integrations/timex.md \ No newline at end of file diff --git a/collectors/timex.plugin/integrations/timex.md b/collectors/timex.plugin/integrations/timex.md new file mode 100644 index 00000000000000..754b2368ce4856 --- /dev/null +++ b/collectors/timex.plugin/integrations/timex.md @@ -0,0 +1,143 @@ + + +# Timex + + + + + +Plugin: timex.plugin +Module: timex.plugin + + + +## Overview + +Examine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping. + +It uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Timex instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.clock_sync_state | state | state | +| system.clock_status | unsync, clockerr | status | +| system.clock_sync_offset | offset | milliseconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:timex]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + +At least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run. + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | +| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes | +| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes | + +
+ +#### Examples + +##### Basic + +A basic configuration example. + +
Config + +```yaml +[plugin:timex] + update every = 1 + clock synchronization state = yes + time offset = yes + +``` +
+ + diff --git a/collectors/xenstat.plugin/README.md b/collectors/xenstat.plugin/README.md deleted file mode 100644 index 8d17a33cd7c3b4..00000000000000 --- a/collectors/xenstat.plugin/README.md +++ /dev/null @@ -1,57 +0,0 @@ - - -# xenstat.plugin - -`xenstat.plugin` collects XenServer and XCP-ng statistics. - -## Prerequisites - -1. install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system. - Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel` - -2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin. - -Keep in mind that `libxenstat` requires root access, so the plugin is setuid to root. - -## Charts - -The plugin provides XenServer and XCP-ng host and domains statistics: - -Host: - -1. Number of domains. - -Domain: - -1. CPU. -2. Memory. -3. Networks. -4. VBDs. - -## Configuration - -If you need to disable xenstat for Netdata, edit /etc/netdata/netdata.conf and set: - -``` -[plugins] - xenstat = no -``` - -## Debugging - -You can run the plugin by hand: - -``` -sudo /usr/libexec/netdata/plugins.d/xenstat.plugin 1 debug -``` - -You will get verbose output on what the plugin does. - - diff --git a/collectors/xenstat.plugin/README.md b/collectors/xenstat.plugin/README.md new file mode 120000 index 00000000000000..32fe4d213ca2e1 --- /dev/null +++ b/collectors/xenstat.plugin/README.md @@ -0,0 +1 @@ +integrations/xen_xcp-ng.md \ No newline at end of file diff --git a/collectors/xenstat.plugin/integrations/xen_xcp-ng.md b/collectors/xenstat.plugin/integrations/xen_xcp-ng.md new file mode 100644 index 00000000000000..17dc8d78570470 --- /dev/null +++ b/collectors/xenstat.plugin/integrations/xen_xcp-ng.md @@ -0,0 +1,176 @@ + + +# Xen XCP-ng + + + + + +Plugin: xenstat.plugin +Module: xenstat.plugin + + + +## Overview + +This collector monitors XenServer and XCP-ng host and domains statistics. + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +The plugin needs setuid. + +### Default Behavior + +#### Auto-Detection + +This plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Xen XCP-ng instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| xenstat.mem | free, used | MiB | +| xenstat.domains | domains | domains | +| xenstat.cpus | cpus | cpus | +| xenstat.cpu_freq | frequency | MHz | + +### Per xendomain + +Metrics related to Xen domains. Each domain provides its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean | +| xendomain.cpu | used | percentage | +| xendomain.mem | maximum, current | MiB | +| xendomain.vcpu | a dimension per vcpu | percentage | + +### Per xendomain vbd + +Metrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| xendomain.oo_req_vbd | requests | requests/s | +| xendomain.requests_vbd | read, write | requests/s | +| xendomain.sectors_vbd | read, write | sectors/s | + +### Per xendomain network + +Metrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| xendomain.bytes_network | received, sent | kilobits/s | +| xendomain.packets_network | received, sent | packets/s | +| xendomain.errors_network | received, sent | errors/s | +| xendomain.drops_network | received, sent | drops/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Libraries + +1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system. + + Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel` + +2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin. + + + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:xenstat]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update every | Data collection frequency. | 1 | no | + +
+ +#### Examples +There are no configuration examples. + + diff --git a/collectors/xenstat.plugin/metadata.yaml b/collectors/xenstat.plugin/metadata.yaml index 493183694b556f..e5527dbb17fdba 100644 --- a/collectors/xenstat.plugin/metadata.yaml +++ b/collectors/xenstat.plugin/metadata.yaml @@ -4,7 +4,7 @@ modules: plugin_name: xenstat.plugin module_name: xenstat.plugin monitored_instance: - name: Xen/XCP-ng + name: Xen XCP-ng link: "https://xenproject.org/" categories: - data-collection.containers-and-vms diff --git a/collectors/xenstat.plugin/xenstat_plugin.c b/collectors/xenstat.plugin/xenstat_plugin.c index acd072605f28fd..319396d4359ff4 100644 --- a/collectors/xenstat.plugin/xenstat_plugin.c +++ b/collectors/xenstat.plugin/xenstat_plugin.c @@ -920,20 +920,14 @@ static void xenstat_send_domain_metrics() { } int main(int argc, char **argv) { - stderror = stderr; clocks_init(); // ------------------------------------------------------------------------ // initialization of netdata plugin - program_name = "xenstat.plugin"; + program_name = PLUGIN_XENSTAT_NAME; - // disable syslog - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; + nd_log_initialize_for_external_plugins(PLUGIN_XENSTAT_NAME); // ------------------------------------------------------------------------ // parse command line parameters diff --git a/configure.ac b/configure.ac index ac0d7bff6f138e..8f1e4115ce029d 100644 --- a/configure.ac +++ b/configure.ac @@ -54,6 +54,8 @@ else AC_CHECK_TOOL([AR], [ar]) fi +CFLAGS="$CFLAGS -fexceptions" + # ----------------------------------------------------------------------------- # configurable options @@ -75,6 +77,18 @@ AC_ARG_ENABLE( , [enable_plugin_systemd_journal="detect"] ) +AC_ARG_ENABLE( + [logsmanagement], + [AS_HELP_STRING([--disable-logsmanagement], [Disable logsmanagement @<:@default autodetect@:>@])], + , + [enable_logsmanagement="detect"] +) +AC_ARG_ENABLE( + [logsmanagement_tests], + [AS_HELP_STRING([--enable-logsmanagement-tests], [Enable logsmanagement tests @<:@default disabled@:>@])], + , + [enable_logsmanagement_tests="no"] +) AC_ARG_ENABLE( [plugin-cups], [AS_HELP_STRING([--enable-plugin-cups], [enable cups plugin @<:@default autodetect@:>@])], @@ -207,6 +221,12 @@ AC_ARG_ENABLE( , [enable_ml="detect"] ) +AC_ARG_ENABLE( + [gtests], + [AS_HELP_STRING([--enable-gtests], [Enable google tests @<:@default no@:>@])], + , + [enable_gtests="no"] +) AC_ARG_ENABLE( [aclk_ssl_debug], [AS_HELP_STRING([--enable-aclk-ssl-debug], [Enables possibility for SSL key logging @<:@default no@:>@])], @@ -555,16 +575,91 @@ OPTIONAL_UV_LIBS="${UV_LIBS}" AC_CHECK_LIB( [lz4], - [LZ4_initStream], + [LZ4_createStream], [LZ4_LIBS_FAST="-llz4"] ) AC_CHECK_LIB( [lz4], - [LZ4_compress_default], + [LZ4_compress_fast_continue], [LZ4_LIBS="-llz4"] ) +# ----------------------------------------------------------------------------- +# libcurl + +PKG_CHECK_MODULES( + [LIBCURL], + [libcurl], + [AC_CHECK_LIB( + [curl], + [curl_easy_init], + [have_libcurl=yes], + [have_libcurl=no] + )], + [have_libcurl=no] +) + +if test "x$have_libcurl" = "xyes"; then + AC_DEFINE([HAVE_CURL], [1], [libcurl usability]) + OPTIONAL_CURL_LIBS="-lcurl" +fi + +# ----------------------------------------------------------------------------- +# PCRE2 + +PKG_CHECK_MODULES( + [LIBPCRE2], + [libpcre2-8], + [AC_CHECK_LIB( + [pcre2-8], + [pcre2_compile_8], + [have_libpcre2=yes], + [have_libpcre2=no] + )], + [have_libpcre2=no] +) + +if test "x$have_libpcre2" = "xyes"; then + AC_DEFINE([HAVE_PCRE2], [1], [PCRE2 usability]) + OPTIONAL_PCRE2_LIBS="-lpcre2-8" +fi + +AM_CONDITIONAL([ENABLE_LOG2JOURNAL], [test "${have_libpcre2}" = "yes"]) + +# ----------------------------------------------------------------------------- +# zstd + +AC_CHECK_LIB([zstd], [ZSTD_createCStream, ZSTD_createDStream], + [LIBZSTD_FOUND=yes], + [LIBZSTD_FOUND=no]) + +if test "x$LIBZSTD_FOUND" = "xyes"; then + AC_DEFINE([ENABLE_ZSTD], [1], [libzstd usability]) + OPTIONAL_ZSTD_LIBS="-lzstd" +fi + +# ----------------------------------------------------------------------------- +# brotli + +AC_CHECK_LIB([brotlienc], [BrotliEncoderCreateInstance, BrotliEncoderCompressStream], + [LIBBROTLIENC_FOUND=yes], + [LIBBROTLIENC_FOUND=no]) + +if test "x$LIBBROTLIENC_FOUND" = "xyes"; then + AC_DEFINE([ENABLE_BROTLIENC], [1], [libbrotlienc usability]) + OPTIONAL_BROTLIENC_LIBS="-lbrotlienc" +fi + +AC_CHECK_LIB([brotlidec], [BrotliDecoderCreateInstance, BrotliDecoderDecompressStream], + [LIBBROTLIDEC_FOUND=yes], + [LIBBROTLIDEC_FOUND=no]) + +if test "x$LIBBROTLIDEC_FOUND" = "xyes"; then + AC_DEFINE([ENABLE_BROTLIDEC], [1], [libbrotlidec usability]) + OPTIONAL_BROTLIDEC_LIBS="-lbrotlidec" +fi + # ----------------------------------------------------------------------------- # zlib @@ -650,10 +745,10 @@ AC_C_BIGENDIAN([], [AC_MSG_ERROR([Could not find out system endiannnes])]) AC_CHECK_SIZEOF(void *) -if test "$ac_cv_sizeof_void_p" = 8; then +if test "$ac_cv_sizeof_void_p" = 8; then AC_MSG_RESULT(Detected 64-bit Build Environment) LIBJUDY_CFLAGS="$LIBJUDY_CFLAGS -DJU_64BIT" -else +else AC_MSG_RESULT(Detected 32-bit Build Environment) LIBJUDY_CFLAGS="$LIBJUDY_CFLAGS -UJU_64BIT" fi @@ -702,7 +797,7 @@ if test "${enable_lz4}" != "no"; then AC_TRY_LINK( [ #include ], [ - LZ4_stream_t* stream = LZ4_initStream(NULL, 0); + LZ4_stream_t* stream = LZ4_createStream(); ], [ enable_lz4="yes"], [ enable_lz4="no" ] @@ -769,6 +864,26 @@ fi AC_MSG_RESULT([${enable_jsonc}]) AM_CONDITIONAL([ENABLE_JSONC], [test "${enable_jsonc}" = "yes"]) +# ----------------------------------------------------------------------------- +# libyaml + +PKG_CHECK_MODULES( + [LIBYAML], + [yaml-0.1], + [AC_CHECK_LIB( + [yaml], + [yaml_parser_initialize], + [have_libyaml=yes], + [have_libyaml=no] + )], + [have_libyaml=no] +) + +if test "x$have_libyaml" = "xyes"; then + AC_DEFINE([HAVE_LIBYAML], [1], [libyaml usability]) + OPTIONAL_YAML_LIBS="-lyaml" +fi + # ----------------------------------------------------------------------------- # YAML @@ -835,6 +950,27 @@ if test "${enable_pedantic}" = "yes"; then CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long" fi +# ----------------------------------------------------------------------------- +# dlsym check + +AC_MSG_CHECKING(whether we can use dlsym) +OLD_LIBS="${LIBS}" +LIBS="-ldl" +AC_LINK_IFELSE([AC_LANG_SOURCE([[ + #include + static void *(*libc_malloc)(size_t); + int main() { + libc_malloc = dlsym(RTLD_NEXT, "malloc"); + } +]])], CAN_USE_DLSYM=yes, CAN_USE_DLSYM=no) +LIBS="${OLD_LIBS}" +AC_MSG_RESULT($CAN_USE_DLSYM) + +if test "x$CAN_USE_DLSYM" = xyes; then + AC_DEFINE([HAVE_DLSYM], [1], [dlsym usability]) + OPTIONAL_DL_LIBS="-ldl" +fi +AC_SUBST([OPTIONAL_DL_LIBS]) # ----------------------------------------------------------------------------- # memory allocation library @@ -903,9 +1039,6 @@ if test "${enable_h2o}" != "no"; then else can_build_h2o="no" fi - if test "${with_zlib}" != "yes"; then - can_build_h2o="no" - fi AC_MSG_RESULT([${can_build_h2o}]) if test "${can_build_h2o}" = "no" -a "${enable_h2o}" = "yes"; then @@ -1063,7 +1196,6 @@ fi AC_MSG_RESULT([${enable_plugin_apps}]) AM_CONDITIONAL([ENABLE_PLUGIN_APPS], [test "${enable_plugin_apps}" = "yes"]) - # ----------------------------------------------------------------------------- # freeipmi.plugin - libipmimonitoring @@ -1141,8 +1273,44 @@ fi AC_MSG_RESULT([${enable_plugin_systemd_journal}]) AM_CONDITIONAL([ENABLE_PLUGIN_SYSTEMD_JOURNAL], [test "${enable_plugin_systemd_journal}" = "yes"]) +AC_CHECK_LIB([systemd], [sd_journal_open_files_fd], [have_sd_journal_open_files_fd=yes], [have_sd_journal_open_files_fd=no]) +if test "${have_sd_journal_open_files_fd}" = "yes"; then + AC_DEFINE([HAVE_SD_JOURNAL_OPEN_FILES_FD], [1], [sd_journal_open_files_fd usability]) +fi + +AC_CHECK_LIB([systemd], [sd_journal_restart_fields], [have_sd_journal_restart_fields=yes], [have_sd_journal_restart_fields=no]) +if test "${have_sd_journal_restart_fields}" = "yes"; then + AC_DEFINE([HAVE_SD_JOURNAL_RESTART_FIELDS], [1], [sd_journal_restart_fields usability]) +fi + +AC_CHECK_LIB([systemd], [sd_journal_get_seqnum], [have_sd_journal_get_seqnum=yes], [have_sd_journal_get_seqnum=no]) +if test "${have_sd_journal_get_seqnum}" = "yes"; then + AC_DEFINE([HAVE_SD_JOURNAL_GET_SEQNUM], [1], [sd_journal_get_seqnum usability]) +fi + +AC_CHECK_LIB([systemd], [sd_bus_default_system, sd_bus_call_method, sd_bus_message_enter_container, sd_bus_message_read, sd_bus_message_exit_container], + [SYSTEMD_DBUS_FOUND=yes], + [SYSTEMD_DBUS_FOUND=no]) + +if test "x$SYSTEMD_DBUS_FOUND" = "xyes"; then + AC_DEFINE([ENABLE_SYSTEMD_DBUS], [1], [libsystemd dbus usability]) +fi + AC_MSG_NOTICE([OPTIONAL_SYSTEMD_LIBS is set to: ${OPTIONAL_SYSTEMD_LIBS}]) +if test "${enable_plugin_systemd_journal}" = "yes"; then + AC_MSG_CHECKING([for SD_JOURNAL_OS_ROOT in systemd]) + AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [[#include ]], + [[int x = SD_JOURNAL_OS_ROOT;]] + )], + [AC_DEFINE(HAVE_SD_JOURNAL_OS_ROOT, 1, [Define if SD_JOURNAL_OS_ROOT is available]) + AC_MSG_RESULT(yes)], + [AC_MSG_RESULT(no)] + ) +fi + LIBS="${LIBS_BAK}" # ----------------------------------------------------------------------------- @@ -1371,17 +1539,17 @@ AM_CONDITIONAL([ENABLE_PLUGIN_PERF], [test "${enable_plugin_perf}" = "yes"]) # ----------------------------------------------------------------------------- # gtest/gmock -AC_MSG_CHECKING([if gtest and gmock can be found]) +if test "${enable_gtests}" = "yes"; then + AC_MSG_CHECKING([if gtest can be found]) -PKG_CHECK_MODULES([GTEST], [gtest], [have_gtest=yes], [have_gtest=no]) -PKG_CHECK_MODULES([GMOCK], [gmock], [have_gmock=yes], [have_gmock=no]) + PKG_CHECK_MODULES([GTEST], [gtest], [have_gtest=yes], [have_gtest=no]) -if test "${have_gtest}" = "yes" -a "${have_gmock}" = "yes"; then - OPTIONAL_GTEST_CFLAGS="${GTEST_CFLAGS} ${GMOCK_CFLAGS}" - OPTIONAL_GTEST_LIBS="${GTEST_LIBS} ${GMOCK_LIBS}" - have_gtest="yes" -else - have_gtest="no" + if test "${have_gtest}" = "yes"; then + OPTIONAL_GTEST_CFLAGS="${GTEST_CFLAGS}" + OPTIONAL_GTEST_LIBS="${GTEST_LIBS}" + + AC_DEFINE([HAVE_GTEST], [1], [gtest availability]) + fi fi # ----------------------------------------------------------------------------- @@ -1426,6 +1594,56 @@ if test "${build_ml}" = "yes"; then fi +# ----------------------------------------------------------------------------- +# logsmanagement + +LIBS_BAK="${LIBS}" + +# Check if submodules have not been fetched. Fail if Logs Management was explicitly requested. + +AC_MSG_CHECKING([if git submodules are present for logs management functionality]) +if test -f "fluent-bit/CMakeLists.txt"; then + AC_MSG_RESULT([yes]) + have_logsmanagement_submodules="yes" +else + AC_MSG_RESULT([no]) + have_logsmanagement_submodules="no" +fi + +if test "${enable_logsmanagement}" != "no" -a "${have_logsmanagement_submodules}" = "no"; then + AC_MSG_WARN([Logs management cannot be built because the required git submodules are missing.]) +fi + +if test "${enable_logsmanagement}" != "no" -a "x$CAN_USE_DLSYM" = xno; then + AC_MSG_WARN([Logs management cannot be built because dlsym cannot be used.]) +fi + +# Decide if we should build Logs Management +if test "${enable_logsmanagement}" != "no" -a "${have_logsmanagement_submodules}" = "yes" -a "x$CAN_USE_DLSYM" = xyes; then + build_logsmanagement="yes" +else + build_logsmanagement="no" +fi + +AM_CONDITIONAL([ENABLE_LOGSMANAGEMENT], [test "${build_logsmanagement}" = "yes"]) +if test "${build_logsmanagement}" = "yes"; then + AC_DEFINE([ENABLE_LOGSMANAGEMENT], [1], [enable logs management functionality]) +fi + +# Decide if we should build Logs Management tests. +if test "${build_logsmanagement}" = "yes" -a "${enable_logsmanagement_tests}" = "yes"; then + build_logsmanagement_tests="yes" +else + build_logsmanagement_tests="no" +fi + +AM_CONDITIONAL([ENABLE_LOGSMANAGEMENT_TESTS], [test "${build_logsmanagement_tests}" = "yes"]) +if test "${build_logsmanagement_tests}" = "yes"; then + AC_DEFINE([ENABLE_LOGSMANAGEMENT_TESTS], [1], [logs management tests]) +fi + +LIBS="${LIBS_BAK}" + # ----------------------------------------------------------------------------- # debugfs.plugin @@ -1523,18 +1741,6 @@ PKG_CHECK_MODULES( [have_libssl=no] ) -PKG_CHECK_MODULES( - [LIBCURL], - [libcurl], - [AC_CHECK_LIB( - [curl], - [curl_easy_init], - [have_libcurl=yes], - [have_libcurl=no] - )], - [have_libcurl=no] -) - PKG_CHECK_MODULES( [AWS_CPP_SDK_CORE], [aws-cpp-sdk-core], @@ -1803,27 +2009,6 @@ AC_LANG_POP([C++]) # ----------------------------------------------------------------------------- -AC_MSG_CHECKING(whether we can use dlsym) -OLD_LIBS="${LIBS}" -LIBS="-ldl" -AC_LINK_IFELSE([AC_LANG_SOURCE([[ - #include - static void *(*libc_malloc)(size_t); - int main() { - libc_malloc = dlsym(RTLD_NEXT, "malloc"); - } -]])], CAN_USE_DLSYM=yes, CAN_USE_DLSYM=no) -LIBS="${OLD_LIBS}" -AC_MSG_RESULT($CAN_USE_DLSYM) - -if test "x$CAN_USE_DLSYM" = xyes; then - AC_DEFINE([HAVE_DLSYM], [1], [dlsym usability]) - OPTIONAL_DL_LIBS="-ldl" -fi -AC_SUBST([OPTIONAL_DL_LIBS]) - -# ----------------------------------------------------------------------------- - AC_DEFINE_UNQUOTED([NETDATA_USER], ["${with_user}"], [use this user to drop privileged]) @@ -1877,6 +2062,12 @@ AC_SUBST([OPTIONAL_MATH_LIBS]) AC_SUBST([OPTIONAL_DATACHANNEL_LIBS]) AC_SUBST([OPTIONAL_UV_LIBS]) AC_SUBST([OPTIONAL_LZ4_LIBS]) +AC_SUBST([OPTIONAL_BROTLIENC_LIBS]) +AC_SUBST([OPTIONAL_BROTLIDEC_LIBS]) +AC_SUBST([OPTIONAL_YAML_LIBS]) +AC_SUBST([OPTIONAL_CURL_LIBS]) +AC_SUBST([OPTIONAL_PCRE2_LIBS]) +AC_SUBST([OPTIONAL_ZSTD_LIBS]) AC_SUBST([OPTIONAL_SSL_LIBS]) AC_SUBST([OPTIONAL_JSONC_LIBS]) AC_SUBST([OPTIONAL_YAML_LIBS]) @@ -1965,6 +2156,7 @@ AC_CONFIG_FILES([ collectors/freebsd.plugin/Makefile collectors/freeipmi.plugin/Makefile collectors/cups.plugin/Makefile + collectors/log2journal/Makefile collectors/idlejitter.plugin/Makefile collectors/macos.plugin/Makefile collectors/nfacct.plugin/Makefile @@ -2003,14 +2195,18 @@ AC_CONFIG_FILES([ libnetdata/aral/Makefile libnetdata/avl/Makefile libnetdata/buffer/Makefile + libnetdata/buffered_reader/Makefile libnetdata/clocks/Makefile libnetdata/completion/Makefile libnetdata/config/Makefile + libnetdata/datetime/Makefile libnetdata/dictionary/Makefile libnetdata/ebpf/Makefile libnetdata/eval/Makefile libnetdata/facets/Makefile + libnetdata/functions_evloop/Makefile libnetdata/july/Makefile + libnetdata/line_splitter/Makefile libnetdata/locks/Makefile libnetdata/log/Makefile libnetdata/onewayalloc/Makefile @@ -2024,6 +2220,7 @@ AC_CONFIG_FILES([ libnetdata/storage_number/tests/Makefile libnetdata/threads/Makefile libnetdata/url/Makefile + libnetdata/uuid/Makefile libnetdata/json/Makefile libnetdata/health/Makefile libnetdata/worker_utilization/Makefile @@ -2034,6 +2231,7 @@ AC_CONFIG_FILES([ web/Makefile web/api/Makefile web/api/badges/Makefile + web/api/ilove/Makefile web/api/exporters/Makefile web/api/exporters/shell/Makefile web/api/exporters/prometheus/Makefile @@ -2064,6 +2262,7 @@ AC_CONFIG_FILES([ web/server/static/Makefile claim/Makefile spawn/Makefile + logsmanagement/Makefile ]) AC_OUTPUT diff --git a/contrib/debian/control b/contrib/debian/control index 4f819ac1800428..4163aa1df5cf1b 100644 --- a/contrib/debian/control +++ b/contrib/debian/control @@ -4,6 +4,7 @@ Build-Depends: debhelper (>= 9.20160709), dpkg-dev (>= 1.13.19), zlib1g-dev, uuid-dev, + libcurl4-openssl-dev, libelf-dev, libuv1-dev, liblz4-dev, @@ -15,6 +16,7 @@ Build-Depends: debhelper (>= 9.20160709), libipmimonitoring-dev, libnetfilter-acct-dev, libsnappy-dev, + libpcre2-dev, libprotobuf-dev, libprotoc-dev, libsystemd-dev, @@ -24,7 +26,9 @@ Build-Depends: debhelper (>= 9.20160709), automake, pkg-config, curl, - protobuf-compiler + protobuf-compiler, + bison, + flex Section: net Priority: optional Maintainer: Netdata Builder @@ -55,7 +59,8 @@ Conflicts: netdata-core, netdata-web Suggests: netdata-plugin-cups (= ${source:Version}), netdata-plugin-freeipmi (= ${source:Version}) -Recommends: netdata-plugin-systemd-journal (= ${source:Version}) +Recommends: netdata-plugin-systemd-journal (= ${source:Version}), + netdata-plugin-logs-management (= ${source:Version}) Description: real-time charts for system monitoring Netdata is a daemon that collects data in realtime (per second) and presents a web site to view and analyze them. The presentation @@ -201,3 +206,13 @@ Conflicts: netdata (<< ${source:Version}) Description: The systemd-journal collector for the Netdata Agent This plugin allows the Netdata Agent to present logs from the systemd journal on Netdata Cloud or the local Agent dashboard. + +Package: netdata-plugin-logs-management +Architecture: any +Depends: ${shlibs:Depends}, + netdata (= ${source:Version}) +Pre-Depends: libcap2-bin, adduser +Conflicts: netdata (<< ${source:Version}) +Description: The logs-management plugin for the Netdata Agent + This plugin allows the Netdata Agent to collect logs from the system + and parse them to extract metrics. diff --git a/contrib/debian/netdata-plugin-logs-management.postinst b/contrib/debian/netdata-plugin-logs-management.postinst new file mode 100644 index 00000000000000..0565b54e86b1ef --- /dev/null +++ b/contrib/debian/netdata-plugin-logs-management.postinst @@ -0,0 +1,17 @@ +#!/bin/sh + +set -e + +case "$1" in + configure|reconfigure) + chown root:netdata /usr/libexec/netdata/plugins.d/logs-management.plugin + chmod 0750 /usr/libexec/netdata/plugins.d/logs-management.plugin + if ! setcap "cap_dac_read_search=eip cap_syslog=eip" /usr/libexec/netdata/plugins.d/logs-management.plugin; then + chmod -f 4750 /usr/libexec/netdata/plugins.d/logs-management.plugin + fi + ;; +esac + +#DEBHELPER# + +exit 0 diff --git a/contrib/debian/netdata-plugin-logs-management.preinst b/contrib/debian/netdata-plugin-logs-management.preinst new file mode 100644 index 00000000000000..fcabb415aaa039 --- /dev/null +++ b/contrib/debian/netdata-plugin-logs-management.preinst @@ -0,0 +1,13 @@ +#!/bin/sh + +set -e + +case "$1" in + install) + if ! getent group netdata > /dev/null; then + addgroup --quiet --system netdata + fi + ;; +esac + +#DEBHELPER# diff --git a/contrib/debian/netdata-plugin-perf.postinst b/contrib/debian/netdata-plugin-perf.postinst index 76905878ef87ff..aa4f0f8d882b0f 100644 --- a/contrib/debian/netdata-plugin-perf.postinst +++ b/contrib/debian/netdata-plugin-perf.postinst @@ -7,16 +7,10 @@ case "$1" in chown root:netdata /usr/libexec/netdata/plugins.d/perf.plugin chmod 0750 /usr/libexec/netdata/plugins.d/perf.plugin - if capsh --supports=cap_perfmon 2>/dev/null; then - setcap cap_perfmon+ep /usr/libexec/netdata/plugins.d/perf.plugin - ret="$?" - else - setcap cap_sys_admin+ep /usr/libexec/netdata/plugins.d/perf.plugin - ret="$?" - fi - - if [ "${ret}" -ne 0 ]; then - chmod -f 4750 /usr/libexec/netdata/plugins.d/perf.plugin + if ! setcap cap_perfmon+ep /usr/libexec/netdata/plugins.d/perf.plugin 2>/dev/null; then + if ! setcap cap_sys_admin+ep /usr/libexec/netdata/plugins.d/perf.plugin 2>/dev/null; then + chmod -f 4750 /usr/libexec/netdata/plugins.d/perf.plugin + fi fi ;; esac diff --git a/contrib/debian/rules b/contrib/debian/rules index c1dbb6f619499d..d0aa353943fd95 100755 --- a/contrib/debian/rules +++ b/contrib/debian/rules @@ -128,7 +128,17 @@ override_dh_install: # Add systemd-journal plugin install rules mkdir -p $(TOP)-plugin-systemd-journal/usr/libexec/netdata/plugins.d/ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/systemd-journal.plugin \ - $(TOP)-plugin-systemd-journal/usr/libexec/netdata/plugins.d/systemd-journal.plugin; \ + $(TOP)-plugin-systemd-journal/usr/libexec/netdata/plugins.d/systemd-journal.plugin + + # Add logs-management plugin install rules + mkdir -p $(TOP)-plugin-logs-management/usr/libexec/netdata/plugins.d/ + mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/logs-management.plugin \ + $(TOP)-plugin-logs-management/usr/libexec/netdata/plugins.d/logs-management.plugin + mkdir -p $(TOP)-plugin-logs-management/usr/lib/netdata/conf.d/ + mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/logsmanagement.d.conf \ + $(TOP)-plugin-logs-management/usr/lib/netdata/conf.d/logsmanagement.d.conf + mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/logsmanagement.d/ \ + $(TOP)-plugin-logs-management/usr/lib/netdata/conf.d/logsmanagement.d/ # Set the rest of the software in the main package # @@ -221,6 +231,9 @@ override_dh_fixperms: # systemd-journal chmod 4750 $(TOP)-plugin-systemd-journal/usr/libexec/netdata/plugins.d/systemd-journal.plugin + # systemd-journal + chmod 4750 $(TOP)-plugin-logs-management/usr/libexec/netdata/plugins.d/logs-management.plugin + override_dh_installlogrotate: cp system/logrotate/netdata debian/netdata.logrotate dh_installlogrotate diff --git a/coverity-scan.sh b/coverity-scan.sh index 2050d13cdb40a2..8466b21a7f9cac 100755 --- a/coverity-scan.sh +++ b/coverity-scan.sh @@ -40,7 +40,7 @@ set -e INSTALL_DIR="/opt" # the version of coverity to use -COVERITY_BUILD_VERSION="${COVERITY_BUILD_VERSION:-cov-analysis-linux64-2022.12.2}" +COVERITY_BUILD_VERSION="${COVERITY_BUILD_VERSION:-cov-analysis-linux64-2023.6.2}" # TODO: For some reasons this does not fully load on Debian 10 (Haven't checked if it happens on other distros yet), it breaks source packaging/installer/functions.sh || echo "Failed to fully load the functions library" diff --git a/daemon/README.md b/daemon/README.md index 3fb33e5c74335f..0707a406c132be 100644 --- a/daemon/README.md +++ b/daemon/README.md @@ -38,7 +38,7 @@ The command line options of the Netdata 1.10.0 version are the following: Support : https://github.com/netdata/netdata/issues License : https://github.com/netdata/netdata/blob/master/LICENSE.md - Twitter : https://twitter.com/linuxnetdata + Twitter : https://twitter.com/netdatahq LinkedIn : https://linkedin.com/company/netdata-cloud/ Facebook : https://facebook.com/linuxnetdata/ @@ -143,6 +143,8 @@ For most Netdata programs (including standard external plugins shipped by netdat | `ERROR` | Something that might disable a part of netdata.
The log line includes `errno` (if it is not zero). | | `FATAL` | Something prevented a program from running.
The log line includes `errno` (if it is not zero) and the program exited. | +The `FATAL` and `ERROR` messages will always appear in the logs, and `INFO`can be filtered using [severity level](https://github.com/netdata/netdata/tree/master/daemon/config#logs-section-options) option. + So, when auto-detection of data collection fail, `ERROR` lines are logged and the relevant modules are disabled, but the program continues to run. diff --git a/daemon/analytics.c b/daemon/analytics.c index 9323c8e8a08665..b026e34f8fa6f0 100644 --- a/daemon/analytics.c +++ b/daemon/analytics.c @@ -109,6 +109,7 @@ void analytics_free_data(void) freez(analytics_data.netdata_config_use_private_registry); freez(analytics_data.netdata_config_oom_score); freez(analytics_data.netdata_prebuilt_distro); + freez(analytics_data.netdata_fail_reason); } /* @@ -127,7 +128,7 @@ void analytics_set_data(char **name, char *value) /* * Set a string data with a value */ -void analytics_set_data_str(char **name, char *value) +void analytics_set_data_str(char **name, const char *value) { size_t value_string_len; if (*name) { @@ -148,7 +149,7 @@ void analytics_log_prometheus(void) if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.prometheus_hits < ANALYTICS_MAX_PROMETHEUS_HITS)) { analytics_data.prometheus_hits++; char b[21]; - snprintfz(b, 20, "%zu", analytics_data.prometheus_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.prometheus_hits); analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, b); } } @@ -161,7 +162,7 @@ void analytics_log_shell(void) if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.shell_hits < ANALYTICS_MAX_SHELL_HITS)) { analytics_data.shell_hits++; char b[21]; - snprintfz(b, 20, "%zu", analytics_data.shell_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.shell_hits); analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, b); } } @@ -174,7 +175,7 @@ void analytics_log_json(void) if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.json_hits < ANALYTICS_MAX_JSON_HITS)) { analytics_data.json_hits++; char b[21]; - snprintfz(b, 20, "%zu", analytics_data.json_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.json_hits); analytics_set_data(&analytics_data.netdata_allmetrics_json_used, b); } } @@ -187,7 +188,7 @@ void analytics_log_dashboard(void) if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.dashboard_hits < ANALYTICS_MAX_DASHBOARD_HITS)) { analytics_data.dashboard_hits++; char b[21]; - snprintfz(b, 20, "%zu", analytics_data.dashboard_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.dashboard_hits); analytics_set_data(&analytics_data.netdata_dashboard_used, b); } } @@ -197,7 +198,7 @@ void analytics_log_dashboard(void) */ void analytics_report_oom_score(long long int score){ char b[21]; - snprintfz(b, 20, "%lld", score); + snprintfz(b, sizeof(b) - 1, "%lld", score); analytics_set_data(&analytics_data.netdata_config_oom_score, b); } @@ -221,11 +222,11 @@ void analytics_mirrored_hosts(void) } rrd_unlock(); - snprintfz(b, 20, "%zu", count); + snprintfz(b, sizeof(b) - 1, "%zu", count); analytics_set_data(&analytics_data.netdata_mirrored_host_count, b); - snprintfz(b, 20, "%zu", reachable); + snprintfz(b, sizeof(b) - 1, "%zu", reachable); analytics_set_data(&analytics_data.netdata_mirrored_hosts_reachable, b); - snprintfz(b, 20, "%zu", unreachable); + snprintfz(b, sizeof(b) - 1, "%zu", unreachable); analytics_set_data(&analytics_data.netdata_mirrored_hosts_unreachable, b); } @@ -280,7 +281,7 @@ void analytics_collectors(void) .plugin = rrdset_plugin_name(st), .module = rrdset_module_name(st) }; - snprintfz(name, 499, "%s:%s", col.plugin, col.module); + snprintfz(name, sizeof(name) - 1, "%s:%s", col.plugin, col.module); dictionary_set(dict, name, &col, sizeof(struct collector)); } rrdset_foreach_done(st); @@ -296,7 +297,7 @@ void analytics_collectors(void) { char b[21]; - snprintfz(b, 20, "%d", ap.c); + snprintfz(b, sizeof(b) - 1, "%d", ap.c); analytics_set_data(&analytics_data.netdata_collectors_count, b); } @@ -401,7 +402,7 @@ void analytics_charts(void) analytics_data.charts_count = c; { char b[21]; - snprintfz(b, 20, "%zu", c); + snprintfz(b, sizeof(b) - 1, "%zu", c); analytics_set_data(&analytics_data.netdata_charts_count, b); } } @@ -426,7 +427,7 @@ void analytics_metrics(void) analytics_data.metrics_count = dimensions; { char b[21]; - snprintfz(b, 20, "%zu", dimensions); + snprintfz(b, sizeof(b) - 1, "%zu", dimensions); analytics_set_data(&analytics_data.netdata_metrics_count, b); } } @@ -453,11 +454,11 @@ void analytics_alarms(void) } foreach_rrdcalc_in_rrdhost_done(rc); - snprintfz(b, 20, "%zu", alarm_normal); + snprintfz(b, sizeof(b) - 1, "%zu", alarm_normal); analytics_set_data(&analytics_data.netdata_alarms_normal, b); - snprintfz(b, 20, "%zu", alarm_warn); + snprintfz(b, sizeof(b) - 1, "%zu", alarm_warn); analytics_set_data(&analytics_data.netdata_alarms_warning, b); - snprintfz(b, 20, "%zu", alarm_crit); + snprintfz(b, sizeof(b) - 1, "%zu", alarm_crit); analytics_set_data(&analytics_data.netdata_alarms_critical, b); } @@ -539,19 +540,19 @@ void analytics_gather_mutable_meta_data(void) { char b[21]; - snprintfz(b, 20, "%zu", analytics_data.prometheus_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.prometheus_hits); analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, b); - snprintfz(b, 20, "%zu", analytics_data.shell_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.shell_hits); analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, b); - snprintfz(b, 20, "%zu", analytics_data.json_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.json_hits); analytics_set_data(&analytics_data.netdata_allmetrics_json_used, b); - snprintfz(b, 20, "%zu", analytics_data.dashboard_hits); + snprintfz(b, sizeof(b) - 1, "%zu", analytics_data.dashboard_hits); analytics_set_data(&analytics_data.netdata_dashboard_used, b); - snprintfz(b, 20, "%zu", rrdhost_hosts_available()); + snprintfz(b, sizeof(b) - 1, "%zu", rrdhost_hosts_available()); analytics_set_data(&analytics_data.netdata_config_hosts_available, b); } } @@ -663,10 +664,10 @@ void set_late_global_environment(struct rrdhost_system_info *system_info) #ifdef ENABLE_DBENGINE { char b[16]; - snprintfz(b, 15, "%d", default_rrdeng_page_cache_mb); + snprintfz(b, sizeof(b) - 1, "%d", default_rrdeng_page_cache_mb); analytics_set_data(&analytics_data.netdata_config_page_cache_size, b); - snprintfz(b, 15, "%d", default_multidb_disk_quota_mb); + snprintfz(b, sizeof(b) - 1, "%d", default_multidb_disk_quota_mb); analytics_set_data(&analytics_data.netdata_config_multidb_disk_quota, b); } #endif @@ -823,11 +824,10 @@ void get_system_timezone(void) } } -void set_global_environment() -{ +void set_global_environment() { { char b[16]; - snprintfz(b, 15, "%d", default_rrd_update_every); + snprintfz(b, sizeof(b) - 1, "%d", default_rrd_update_every); setenv("NETDATA_UPDATE_EVERY", b, 1); } @@ -842,7 +842,6 @@ void set_global_environment() setenv("NETDATA_LIB_DIR", verify_or_create_required_directory(netdata_configured_varlib_dir), 1); setenv("NETDATA_LOCK_DIR", verify_or_create_required_directory(netdata_configured_lock_dir), 1); setenv("NETDATA_LOG_DIR", verify_or_create_required_directory(netdata_configured_log_dir), 1); - setenv("HOME", verify_or_create_required_directory(netdata_configured_home_dir), 1); setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1); { @@ -899,6 +898,7 @@ void set_global_environment() analytics_set_data(&analytics_data.netdata_config_use_private_registry, "null"); analytics_set_data(&analytics_data.netdata_config_oom_score, "null"); analytics_set_data(&analytics_data.netdata_prebuilt_distro, "null"); + analytics_set_data(&analytics_data.netdata_fail_reason, "null"); analytics_data.prometheus_hits = 0; analytics_data.shell_hits = 0; @@ -920,16 +920,14 @@ void set_global_environment() freez(default_port); // set the path we need - char path[1024 + 1], *p = getenv("PATH"); - if (!p) - p = "/bin:/usr/bin"; - snprintfz(path, 1024, "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); + char path[4096], *p = getenv("PATH"); + if (!p) p = "/bin:/usr/bin"; + snprintfz(path, sizeof(path), "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); setenv("PATH", config_get(CONFIG_SECTION_ENV_VARS, "PATH", path), 1); // python options p = getenv("PYTHONPATH"); - if (!p) - p = ""; + if (!p) p = ""; setenv("PYTHONPATH", config_get(CONFIG_SECTION_ENV_VARS, "PYTHONPATH", p), 1); // disable buffering for python plugins @@ -939,41 +937,51 @@ void set_global_environment() setenv("LC_ALL", "C", 1); } -void send_statistics(const char *action, const char *action_result, const char *action_data) -{ +void send_statistics(const char *action, const char *action_result, const char *action_data) { static char *as_script; if (netdata_anonymous_statistics_enabled == -1) { char *optout_file = mallocz( sizeof(char) * (strlen(netdata_configured_user_config_dir) + strlen(".opt-out-from-anonymous-statistics") + 2)); + sprintf(optout_file, "%s/%s", netdata_configured_user_config_dir, ".opt-out-from-anonymous-statistics"); + if (likely(access(optout_file, R_OK) != 0)) { as_script = mallocz( sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("anonymous-statistics.sh") + 2)); + sprintf(as_script, "%s/%s", netdata_configured_primary_plugins_dir, "anonymous-statistics.sh"); + if (unlikely(access(as_script, R_OK) != 0)) { netdata_anonymous_statistics_enabled = 0; - netdata_log_info("Anonymous statistics script %s not found.", as_script); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Statistics script '%s' not found.", + as_script); + freez(as_script); - } else { - netdata_anonymous_statistics_enabled = 1; } - } else { + else + netdata_anonymous_statistics_enabled = 1; + } + else { netdata_anonymous_statistics_enabled = 0; as_script = NULL; } + freez(optout_file); } - if (!netdata_anonymous_statistics_enabled) - return; - if (!action) + + if (!netdata_anonymous_statistics_enabled || !action) return; + if (!action_result) action_result = ""; if (!action_data) action_data = ""; + char *command_to_run = mallocz( sizeof(char) * (strlen(action) + strlen(action_result) + strlen(action_data) + strlen(as_script) + analytics_data.data_length + (ANALYTICS_NO_OF_ITEMS * 3) + 15)); @@ -981,7 +989,7 @@ void send_statistics(const char *action, const char *action_result, const char * sprintf( command_to_run, - "%s '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' ", + "%s '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' ", as_script, action, action_result, @@ -1024,9 +1032,12 @@ void send_statistics(const char *action, const char *action_result, const char * analytics_data.netdata_config_is_private_registry, analytics_data.netdata_config_use_private_registry, analytics_data.netdata_config_oom_score, - analytics_data.netdata_prebuilt_distro); + analytics_data.netdata_prebuilt_distro, + analytics_data.netdata_fail_reason); - netdata_log_info("%s '%s' '%s' '%s'", as_script, action, action_result, action_data); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "%s '%s' '%s' '%s'", + as_script, action, action_result, action_data); FILE *fp_child_input; FILE *fp_child_output = netdata_popen(command_to_run, &command_pid, &fp_child_input); @@ -1035,11 +1046,21 @@ void send_statistics(const char *action, const char *action_result, const char * char *s = fgets(buffer, 4, fp_child_output); int exit_code = netdata_pclose(fp_child_input, fp_child_output, command_pid); if (exit_code) - netdata_log_error("Execution of anonymous statistics script returned %d.", exit_code); - if (s && strncmp(buffer, "200", 3)) - netdata_log_error("Execution of anonymous statistics script returned http code %s.", buffer); - } else { - netdata_log_error("Failed to run anonymous statistics script %s.", as_script); + + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "Statistics script returned error: %d", + exit_code); + + if (s && strncmp(buffer, "200", 3) != 0) + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "Statistics script returned http code: %s", + buffer); + } + else + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "Failed to run statistics script: %s.", + as_script); + freez(command_to_run); } diff --git a/daemon/analytics.h b/daemon/analytics.h index 34418316fcf776..0a5cc458d75208 100644 --- a/daemon/analytics.h +++ b/daemon/analytics.h @@ -18,7 +18,7 @@ #define ANALYTICS_MAX_DASHBOARD_HITS 255 /* Needed to calculate the space needed for parameters */ -#define ANALYTICS_NO_OF_ITEMS 39 +#define ANALYTICS_NO_OF_ITEMS 40 struct analytics_data { char *netdata_config_stream_enabled; @@ -60,6 +60,7 @@ struct analytics_data { char *netdata_config_use_private_registry; char *netdata_config_oom_score; char *netdata_prebuilt_distro; + char *netdata_fail_reason; size_t data_length; diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in index 6b27dfea42204c..d12e7e32ae3b1f 100755 --- a/daemon/anonymous-statistics.sh.in +++ b/daemon/anonymous-statistics.sh.in @@ -68,6 +68,7 @@ NETDATA_IS_PRIVATE_REGISTRY="${39}" NETDATA_USE_PRIVATE_REGISTRY="${40}" NETDATA_CONFIG_OOM_SCORE="${41}" NETDATA_PREBUILT_DISTRO="${42}" +NETDATA_FAIL_REASON="${43}" [ -z "$NETDATA_REGISTRY_UNIQUE_ID" ] && NETDATA_REGISTRY_UNIQUE_ID="00000000-0000-0000-0000-000000000000" @@ -175,7 +176,8 @@ REQ_BODY="$(cat << EOF "mirrored_host_count": ${NETDATA_MIRRORED_HOST_COUNT}, "mirrored_hosts_reachable": ${NETDATA_MIRRORED_HOSTS_REACHABLE}, "mirrored_hosts_unreachable": ${NETDATA_MIRRORED_HOSTS_UNREACHABLE}, - "exporting_connectors": ${NETDATA_EXPORTING_CONNECTORS} + "exporting_connectors": ${NETDATA_EXPORTING_CONNECTORS}, + "netdata_fail_reason": ${NETDATA_FAIL_REASON} } } EOF diff --git a/daemon/buildinfo.c b/daemon/buildinfo.c index 4bc1e72a4e9f87..41af56af89dbb6 100644 --- a/daemon/buildinfo.c +++ b/daemon/buildinfo.c @@ -48,6 +48,7 @@ typedef enum __attribute__((packed)) { BIB_FEATURE_CLOUD, BIB_FEATURE_HEALTH, BIB_FEATURE_STREAMING, + BIB_FEATURE_BACKFILLING, BIB_FEATURE_REPLICATION, BIB_FEATURE_STREAMING_COMPRESSION, BIB_FEATURE_CONTEXTS, @@ -66,6 +67,7 @@ typedef enum __attribute__((packed)) { BIB_CONNECTIVITY_NATIVE_HTTPS, BIB_CONNECTIVITY_TLS_HOST_VERIFY, BIB_LIB_LZ4, + BIB_LIB_ZSTD, BIB_LIB_ZLIB, BIB_LIB_JUDY, BIB_LIB_DLIB, @@ -99,6 +101,7 @@ typedef enum __attribute__((packed)) { BIB_PLUGIN_SLABINFO, BIB_PLUGIN_XEN, BIB_PLUGIN_XEN_VBD_ERROR, + BIB_PLUGIN_LOGS_MANAGEMENT, BIB_EXPORT_AWS_KINESIS, BIB_EXPORT_GCP_PUBSUB, BIB_EXPORT_MONGOC, @@ -340,7 +343,7 @@ static struct { .json = "cpu_frequency", .value = "unknown", }, - [BIB_HW_RAM_SIZE] = { + [BIB_HW_ARCHITECTURE] = { .category = BIC_HARDWARE, .type = BIT_STRING, .analytics = NULL, @@ -348,7 +351,7 @@ static struct { .json = "cpu_architecture", .value = "unknown", }, - [BIB_HW_DISK_SPACE] = { + [BIB_HW_RAM_SIZE] = { .category = BIC_HARDWARE, .type = BIT_STRING, .analytics = NULL, @@ -356,7 +359,7 @@ static struct { .json = "ram", .value = "unknown", }, - [BIB_HW_ARCHITECTURE] = { + [BIB_HW_DISK_SPACE] = { .category = BIC_HARDWARE, .type = BIT_STRING, .analytics = NULL, @@ -484,6 +487,14 @@ static struct { .json = "streaming", .value = NULL, }, + [BIB_FEATURE_BACKFILLING] = { + .category = BIC_FEATURE, + .type = BIT_BOOLEAN, + .analytics = NULL, + .print = "Back-filling (of higher database tiers)", + .json = "back-filling", + .value = NULL, + }, [BIB_FEATURE_REPLICATION] = { .category = BIC_FEATURE, .type = BIT_BOOLEAN, @@ -498,7 +509,7 @@ static struct { .analytics = "Stream Compression", .print = "Streaming and Replication Compression", .json = "stream-compression", - .value = "none", + .value = NULL, }, [BIB_FEATURE_CONTEXTS] = { .category = BIC_FEATURE, @@ -628,6 +639,14 @@ static struct { .json = "lz4", .value = NULL, }, + [BIB_LIB_ZSTD] = { + .category = BIC_LIBS, + .type = BIT_BOOLEAN, + .analytics = NULL, + .print = "ZSTD (fast, lossless compression algorithm)", + .json = "zstd", + .value = NULL, + }, [BIB_LIB_ZLIB] = { .category = BIC_LIBS, .type = BIT_BOOLEAN, @@ -893,6 +912,14 @@ static struct { .json = "xen-vbd-error", .value = NULL, }, + [BIB_PLUGIN_LOGS_MANAGEMENT] = { + .category = BIC_PLUGINS, + .type = BIT_BOOLEAN, + .analytics = "Logs Management", + .print = "Logs Management", + .json = "logs-management", + .value = NULL, + }, [BIB_EXPORT_MONGOC] = { .category = BIC_EXPORTERS, .type = BIT_BOOLEAN, @@ -1029,6 +1056,23 @@ static void build_info_set_value(BUILD_INFO_SLOT slot, const char *value) { BUILD_INFO[slot].value = value; } +static void build_info_append_value(BUILD_INFO_SLOT slot, const char *value) { + size_t size = BUILD_INFO[slot].value ? strlen(BUILD_INFO[slot].value) + 1 : 0; + size += strlen(value); + char buf[size + 1]; + + if(BUILD_INFO[slot].value) { + strcpy(buf, BUILD_INFO[slot].value); + strcat(buf, " "); + strcat(buf, value); + } + else + strcpy(buf, value); + + freez((void *)BUILD_INFO[slot].value); + BUILD_INFO[slot].value = strdupz(buf); +} + static void build_info_set_value_strdupz(BUILD_INFO_SLOT slot, const char *value) { if(!value) value = ""; build_info_set_value(slot, strdupz(value)); @@ -1075,14 +1119,21 @@ __attribute__((constructor)) void initialize_build_info(void) { build_info_set_status(BIB_FEATURE_HEALTH, true); build_info_set_status(BIB_FEATURE_STREAMING, true); + build_info_set_status(BIB_FEATURE_BACKFILLING, true); build_info_set_status(BIB_FEATURE_REPLICATION, true); -#ifdef ENABLE_RRDPUSH_COMPRESSION build_info_set_status(BIB_FEATURE_STREAMING_COMPRESSION, true); -#ifdef ENABLE_LZ4 - build_info_set_value(BIB_FEATURE_STREAMING_COMPRESSION, "lz4"); + +#ifdef ENABLE_BROTLI + build_info_append_value(BIB_FEATURE_STREAMING_COMPRESSION, "brotli"); #endif +#ifdef ENABLE_ZSTD + build_info_append_value(BIB_FEATURE_STREAMING_COMPRESSION, "zstd"); #endif +#ifdef ENABLE_LZ4 + build_info_append_value(BIB_FEATURE_STREAMING_COMPRESSION, "lz4"); +#endif + build_info_append_value(BIB_FEATURE_STREAMING_COMPRESSION, "gzip"); build_info_set_status(BIB_FEATURE_CONTEXTS, true); build_info_set_status(BIB_FEATURE_TIERING, true); @@ -1117,6 +1168,9 @@ __attribute__((constructor)) void initialize_build_info(void) { #ifdef ENABLE_LZ4 build_info_set_status(BIB_LIB_LZ4, true); #endif +#ifdef ENABLE_ZSTD + build_info_set_status(BIB_LIB_ZSTD, true); +#endif build_info_set_status(BIB_LIB_ZLIB, true); @@ -1198,6 +1252,9 @@ __attribute__((constructor)) void initialize_build_info(void) { #ifdef HAVE_XENSTAT_VBD_ERROR build_info_set_status(BIB_PLUGIN_XEN_VBD_ERROR, true); #endif +#ifdef ENABLE_LOGSMANAGEMENT + build_info_set_status(BIB_PLUGIN_LOGS_MANAGEMENT, true); +#endif build_info_set_status(BIB_EXPORT_PROMETHEUS_EXPORTER, true); build_info_set_status(BIB_EXPORT_GRAPHITE, true); @@ -1234,7 +1291,7 @@ __attribute__((constructor)) void initialize_build_info(void) { // ---------------------------------------------------------------------------- // system info -int get_system_info(struct rrdhost_system_info *system_info, bool log); +int get_system_info(struct rrdhost_system_info *system_info); static void populate_system_info(void) { static bool populated = false; static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; @@ -1257,7 +1314,7 @@ static void populate_system_info(void) { } else { system_info = callocz(1, sizeof(struct rrdhost_system_info)); - get_system_info(system_info, false); + get_system_info(system_info); free_system_info = true; } diff --git a/daemon/commands.c b/daemon/commands.c index a8afb5a0017bb7..ed544224ec91bf 100644 --- a/daemon/commands.c +++ b/daemon/commands.c @@ -142,10 +142,10 @@ static cmd_status_t cmd_reload_health_execute(char *args, char **message) (void)args; (void)message; - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("COMMAND: Reloading HEALTH configuration."); health_reload(); - error_log_limit_reset(); + nd_log_limits_reset(); return CMD_STATUS_SUCCESS; } @@ -155,11 +155,11 @@ static cmd_status_t cmd_save_database_execute(char *args, char **message) (void)args; (void)message; - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("COMMAND: Saving databases."); rrdhost_save_all(); netdata_log_info("COMMAND: Databases saved."); - error_log_limit_reset(); + nd_log_limits_reset(); return CMD_STATUS_SUCCESS; } @@ -169,10 +169,9 @@ static cmd_status_t cmd_reopen_logs_execute(char *args, char **message) (void)args; (void)message; - error_log_limit_unlimited(); - netdata_log_info("COMMAND: Reopening all log files."); - reopen_all_log_files(); - error_log_limit_reset(); + nd_log_limits_unlimited(); + nd_log_reopen_log_files(); + nd_log_limits_reset(); return CMD_STATUS_SUCCESS; } @@ -182,7 +181,7 @@ static cmd_status_t cmd_exit_execute(char *args, char **message) (void)args; (void)message; - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("COMMAND: Cleaning up to exit."); netdata_cleanup_and_exit(0); exit(0); diff --git a/daemon/common.h b/daemon/common.h index 4a3905924c17dd..b1739879f7b8f6 100644 --- a/daemon/common.h +++ b/daemon/common.h @@ -28,6 +28,7 @@ #define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed) +#define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section) #define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name) // ---------------------------------------------------------------------------- diff --git a/daemon/config/README.md b/daemon/config/README.md index bc5a5885c10f18..11ba2a1bc7de1b 100644 --- a/daemon/config/README.md +++ b/daemon/config/README.md @@ -72,40 +72,40 @@ Please note that your data history will be lost if you have modified `history` p ### [global] section options -| setting | default | info | -|:-------------------------------------:|:-------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| process scheduling policy | `keep` | See [Netdata process scheduling policy](https://github.com/netdata/netdata/blob/master/daemon/README.md#netdata-process-scheduling-policy) | -| OOM score | `0` | | -| glibc malloc arena max for plugins | `1` | See [Virtual memory](https://github.com/netdata/netdata/blob/master/daemon/README.md#virtual-memory). | -| glibc malloc arena max for Netdata | `1` | See [Virtual memory](https://github.com/netdata/netdata/blob/master/daemon/README.md#virtual-memory). | -| hostname | auto-detected | The hostname of the computer running Netdata. | -| host access prefix | empty | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43). | -| timezone | auto-detected | The timezone retrieved from the environment variable | -| run as user | `netdata` | The user Netdata will run as. | -| pthread stack size | auto-detected | | +| setting | default | info | +|:----------------------------------:|:-------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| process scheduling policy | `keep` | See [Netdata process scheduling policy](https://github.com/netdata/netdata/blob/master/daemon/README.md#netdata-process-scheduling-policy) | +| OOM score | `0` | | +| glibc malloc arena max for plugins | `1` | See [Virtual memory](https://github.com/netdata/netdata/blob/master/daemon/README.md#virtual-memory). | +| glibc malloc arena max for Netdata | `1` | See [Virtual memory](https://github.com/netdata/netdata/blob/master/daemon/README.md#virtual-memory). | +| hostname | auto-detected | The hostname of the computer running Netdata. | +| host access prefix | empty | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43). | +| timezone | auto-detected | The timezone retrieved from the environment variable | +| run as user | `netdata` | The user Netdata will run as. | +| pthread stack size | auto-detected | | ### [db] section options -| setting | default | info | -|:---------------------------------------------:|:----------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| setting | default | info | +|:---------------------------------------------:|:----------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size MB` and `dbengine disk space MB`.
`save`: Netdata will save its round robin database on exit and load it on startup.
`map`: Cache files will be updated in real-time. Not ideal for systems with high load or slow disks (check `man mmap`).
`ram`: The round-robin database will be temporary and it will be lost when Netdata exits.
`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM.
`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. | -| retention | `3600` | Used with `mode = save/map/ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](https://github.com/netdata/netdata/blob/master/database/README.md) for more information. | -| storage tiers | `1` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](https://github.com/netdata/netdata/blob/master/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. | -| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. | -| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier.
`N belongs to [1..4]` || - | dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). | -| dbengine multihost disk space MB | `256` | Same functionality as `dbengine disk space MB`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. This setting is only for _Tier 0_ metrics. | -| dbengine tier **`N`** multihost disk space MB | `256` | Same functionality as `dbengine multihost disk space MB`, but stores metrics of the **`N`** tier (both parent node and its children). Can be used in single-node environments as well.
`N belongs to [1..4]` | -| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](https://github.com/netdata/netdata/blob/master/database/engine/README.md#tiering). | -| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`.
`N belongs to [1..4]` | -| dbengine tier **`N`** back fill | `New` | Specifies the strategy of recreating missing data on each Tier from the exact lower Tier.
`New`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window).
`none`: No back filling is applied.
`N belongs to [1..4]` | -| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](https://github.com/netdata/netdata/blob/master/database/README.md#ksm) | -| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions | -| gap when lost iterations above | `1` | | -| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. | -| delete obsolete charts files | `yes` | See [monitoring ephemeral containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions | -| delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. | -| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. | +| retention | `3600` | Used with `mode = save/map/ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](https://github.com/netdata/netdata/blob/master/database/README.md) for more information. | +| storage tiers | `1` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](https://github.com/netdata/netdata/blob/master/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. | +| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. | +| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier.
`N belongs to [1..4]` | +| dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). | +| dbengine multihost disk space MB | `256` | Same functionality as `dbengine disk space MB`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. This setting is only for _Tier 0_ metrics. | +| dbengine tier **`N`** multihost disk space MB | `256` | Same functionality as `dbengine multihost disk space MB`, but stores metrics of the **`N`** tier (both parent node and its children). Can be used in single-node environments as well.
`N belongs to [1..4]` | +| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](https://github.com/netdata/netdata/blob/master/database/engine/README.md#tiering). | +| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`.
`N belongs to [1..4]` | +| dbengine tier **`N`** back fill | `New` | Specifies the strategy of recreating missing data on each Tier from the exact lower Tier.
`New`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window).
`none`: No back filling is applied.
`N belongs to [1..4]` | +| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](https://github.com/netdata/netdata/blob/master/database/README.md#ksm) | +| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions | +| gap when lost iterations above | `1` | | +| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. | +| delete obsolete charts files | `yes` | See [monitoring ephemeral containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions | +| delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. | +| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. | > ### Info > @@ -113,32 +113,33 @@ Please note that your data history will be lost if you have modified `history` p ### [directories] section options -| setting | default | info | -|:-------------------:|:------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| config | `/etc/netdata` | The directory configuration files are kept. | -| stock config | `/usr/lib/netdata/conf.d` | | -| log | `/var/log/netdata` | The directory in which the [log files](https://github.com/netdata/netdata/blob/master/daemon/README.md#log-files) are kept. | -| web | `/usr/share/netdata/web` | The directory the web static files are kept. | -| cache | `/var/cache/netdata` | The directory the memory database will be stored if and when Netdata exits. Netdata will re-read the database when it will start again, to continue from the same point. | -| lib | `/var/lib/netdata` | Contains the alarm log and the Netdata instance GUID. | -| home | `/var/cache/netdata` | Contains the db files for the collected metrics. | -| lock | `/var/lib/netdata/lock` | Contains the data collectors lock files. | -| plugins | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes. | -| health config | `/etc/netdata/health.d` | The directory containing the user alarm configuration files, to override the stock configurations | -| stock health config | `/usr/lib/netdata/conf.d/health.d` | Contains the stock alarm configuration files for each collector | -| registry | `/opt/netdata/var/lib/netdata/registry` | Contains the [registry](https://github.com/netdata/netdata/blob/master/registry/README.md) database and GUID that uniquely identifies each Netdata Agent | +| setting | default | info | +|:-------------------:|:------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| config | `/etc/netdata` | The directory configuration files are kept. | +| stock config | `/usr/lib/netdata/conf.d` | | +| log | `/var/log/netdata` | The directory in which the [log files](https://github.com/netdata/netdata/blob/master/daemon/README.md#log-files) are kept. | +| web | `/usr/share/netdata/web` | The directory the web static files are kept. | +| cache | `/var/cache/netdata` | The directory the memory database will be stored if and when Netdata exits. Netdata will re-read the database when it will start again, to continue from the same point. | +| lib | `/var/lib/netdata` | Contains the alert log and the Netdata instance GUID. | +| home | `/var/cache/netdata` | Contains the db files for the collected metrics. | +| lock | `/var/lib/netdata/lock` | Contains the data collectors lock files. | +| plugins | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes. | +| health config | `/etc/netdata/health.d` | The directory containing the user alert configuration files, to override the stock configurations | +| stock health config | `/usr/lib/netdata/conf.d/health.d` | Contains the stock alert configuration files for each collector | +| registry | `/opt/netdata/var/lib/netdata/registry` | Contains the [registry](https://github.com/netdata/netdata/blob/master/registry/README.md) database and GUID that uniquely identifies each Netdata Agent | ### [logs] section options -| setting | default | info | -|:----------------------------------:|:-----------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| setting | default | info | +|:----------------------------------:|:-----------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | debug flags | `0x0000000000000000` | Bitmap of debug options to enable. For more information check [Tracing Options](https://github.com/netdata/netdata/blob/master/daemon/README.md#debugging). | | debug | `/var/log/netdata/debug.log` | The filename to save debug information. This file will not be created if debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](https://github.com/netdata/netdata/blob/master/daemon/README.md#debugging). | -| error | `/var/log/netdata/error.log` | The filename to save error messages for Netdata daemon and all plugins (`stderr` is sent here for all Netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log. | -| access | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing Netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log. | -| facility | `daemon` | A facility keyword is used to specify the type of system that is logging the message. | -| errors flood protection period | `1200` | Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`. | -| errors to trigger flood protection | `200` | Number of errors written to the log in `errors flood protection period` sec before flood protection is activated. | +| error | `/var/log/netdata/error.log` | The filename to save error messages for Netdata daemon and all plugins (`stderr` is sent here for all Netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log. | +| access | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing Netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log. | +| facility | `daemon` | A facility keyword is used to specify the type of system that is logging the message. | +| errors flood protection period | `1200` | Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`. | +| errors to trigger flood protection | `200` | Number of errors written to the log in `errors flood protection period` sec before flood protection is activated. | +| severity level | `info` | Controls which log messages are logged, with error being the most important. Supported values: `info` and `error`. | ### [environment variables] section options @@ -163,20 +164,20 @@ Please note that your data history will be lost if you have modified `history` p This section controls the general behavior of the health monitoring capabilities of Netdata. -Specific alarms are configured in per-collector config files under the `health.d` directory. For more info, see [health +Specific alerts are configured in per-collector config files under the `health.d` directory. For more info, see [health monitoring](https://github.com/netdata/netdata/blob/master/health/README.md). -[Alarm notifications](https://github.com/netdata/netdata/blob/master/health/notifications/README.md) are configured in `health_alarm_notify.conf`. +[Alert notifications](https://github.com/netdata/netdata/blob/master/health/notifications/README.md) are configured in `health_alarm_notify.conf`. -| setting | default | info | -|:----------------------------------------------:|:------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| enabled | `yes` | Set to `no` to disable all alarms and notifications | -| in memory max health log entries | 1000 | Size of the alarm history held in RAM | -| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alarm notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). | -| run at least every seconds | `10` | Controls how often all alarm conditions should be evaluated. | -| postpone alarms during hibernation for seconds | `60` | Prevents false alarms. May need to be increased if you get alarms during hibernation. | -| health log history | `432000` | Specifies the history of alarm events (in seconds) kept in the agent's sqlite database. | -| enabled alarms | * | Defines which alarms to load from both user and stock directories. This is a [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) list of alarm or template names. Can be used to disable specific alarms. For example, `enabled alarms = !oom_kill *` will load all alarms except `oom_kill`. | +| setting | default | info | +|:----------------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| enabled | `yes` | Set to `no` to disable all alerts and notifications | +| in memory max health log entries | 1000 | Size of the alert history held in RAM | +| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). | +| run at least every seconds | `10` | Controls how often all alert conditions should be evaluated. | +| postpone alarms during hibernation for seconds | `60` | Prevents false alerts. May need to be increased if you get alerts during hibernation. | +| health log history | `432000` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. | +| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. | ### [web] section options @@ -222,10 +223,10 @@ for all internal Netdata plugins. External plugins will have only 2 options at `netdata.conf`: -| setting | default | info | -|:---------------:|:--------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------| +| setting | default | info | +|:---------------:|:--------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | update every | the value of `[global].update every` setting | The frequency in seconds the plugin should collect values. For more information check the [performance guide](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md). | -| command options | - | Additional command line options to pass to the plugin. | | +| command options | - | Additional command line options to pass to the plugin. | External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their documentation. diff --git a/daemon/daemon.c b/daemon/daemon.c index c7f0b51c6d5608..c82cb29edabb64 100644 --- a/daemon/daemon.c +++ b/daemon/daemon.c @@ -31,22 +31,6 @@ void get_netdata_execution_path(void) { dirname(netdata_exe_path); } -static void chown_open_file(int fd, uid_t uid, gid_t gid) { - if(fd == -1) return; - - struct stat buf; - - if(fstat(fd, &buf) == -1) { - netdata_log_error("Cannot fstat() fd %d", fd); - return; - } - - if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { - if(fchown(fd, uid, gid) == -1) - netdata_log_error("Cannot fchown() fd %d.", fd); - } -} - static void fix_directory_file_permissions(const char *dirname, uid_t uid, gid_t gid, bool recursive) { char filename[FILENAME_MAX + 1]; @@ -124,9 +108,6 @@ int become_user(const char *username, int pid_fd) { uid_t uid = pw->pw_uid; gid_t gid = pw->pw_gid; - if (am_i_root) - netdata_log_info("I am root, so checking permissions"); - prepare_required_directories(uid, gid); if(pidfile[0]) { @@ -150,9 +131,9 @@ int become_user(const char *username, int pid_fd) { } } + nd_log_chown_log_files(uid, gid); chown_open_file(STDOUT_FILENO, uid, gid); chown_open_file(STDERR_FILENO, uid, gid); - chown_open_file(stdaccess_fd, uid, gid); chown_open_file(pid_fd, uid, gid); if(supplementary_groups && ngroups > 0) { @@ -229,7 +210,7 @@ static void oom_score_adj(void) { // check the environment char *s = getenv("OOMScoreAdjust"); if(!s || !*s) { - snprintfz(buf, 30, "%d", (int)wanted_score); + snprintfz(buf, sizeof(buf) - 1, "%d", (int)wanted_score); s = buf; } @@ -264,7 +245,7 @@ static void oom_score_adj(void) { int written = 0; int fd = open("/proc/self/oom_score_adj", O_WRONLY); if(fd != -1) { - snprintfz(buf, 30, "%d", (int)wanted_score); + snprintfz(buf, sizeof(buf) - 1, "%d", (int)wanted_score); ssize_t len = strlen(buf); if(len > 0 && write(fd, buf, (size_t)len) == len) written = 1; close(fd); diff --git a/daemon/event_loop.c b/daemon/event_loop.c index fb38791546d4b5..93bac97d0a6812 100644 --- a/daemon/event_loop.c +++ b/daemon/event_loop.c @@ -52,6 +52,7 @@ void register_libuv_worker_jobs() { worker_register_job_name(UV_EVENT_HOST_CONTEXT_LOAD, "metadata load host context"); worker_register_job_name(UV_EVENT_METADATA_STORE, "metadata store host"); worker_register_job_name(UV_EVENT_METADATA_CLEANUP, "metadata cleanup"); + worker_register_job_name(UV_EVENT_METADATA_ML_LOAD, "metadata load ml models"); // netdatacli worker_register_job_name(UV_EVENT_SCHEDULE_CMD, "schedule command"); diff --git a/daemon/event_loop.h b/daemon/event_loop.h index 1ff1c2c1cb816a..c1821c64617cc8 100644 --- a/daemon/event_loop.h +++ b/daemon/event_loop.h @@ -44,6 +44,7 @@ enum event_loop_job { UV_EVENT_HOST_CONTEXT_LOAD, UV_EVENT_METADATA_STORE, UV_EVENT_METADATA_CLEANUP, + UV_EVENT_METADATA_ML_LOAD, // netdatacli UV_EVENT_SCHEDULE_CMD, diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c index ce8d41402601a2..9fb1df5f8ecffe 100644 --- a/daemon/global_statistics.c +++ b/daemon/global_statistics.c @@ -65,6 +65,11 @@ static struct global_statistics { uint64_t backfill_queries_made; uint64_t backfill_db_points_read; + uint64_t tier0_hot_gorilla_buffers; + + uint64_t tier0_disk_compressed_bytes; + uint64_t tier0_disk_uncompressed_bytes; + uint64_t db_points_stored_per_tier[RRD_STORAGE_TIERS]; } global_statistics = { @@ -80,6 +85,10 @@ static struct global_statistics { .api_data_queries_made = 0, .api_data_db_points_read = 0, .api_data_result_points_generated = 0, + + .tier0_hot_gorilla_buffers = 0, + .tier0_disk_compressed_bytes = 0, + .tier0_disk_uncompressed_bytes = 0, }; void global_statistics_rrdset_done_chart_collection_completed(size_t *points_read_per_tier_array) { @@ -108,6 +117,18 @@ void global_statistics_backfill_query_completed(size_t points_read) { __atomic_fetch_add(&global_statistics.backfill_db_points_read, points_read, __ATOMIC_RELAXED); } +void global_statistics_gorilla_buffer_add_hot() { + __atomic_fetch_add(&global_statistics.tier0_hot_gorilla_buffers, 1, __ATOMIC_RELAXED); +} + +void global_statistics_tier0_disk_compressed_bytes(uint32_t size) { + __atomic_fetch_add(&global_statistics.tier0_disk_compressed_bytes, size, __ATOMIC_RELAXED); +} + +void global_statistics_tier0_disk_uncompressed_bytes(uint32_t size) { + __atomic_fetch_add(&global_statistics.tier0_disk_uncompressed_bytes, size, __ATOMIC_RELAXED); +} + void global_statistics_rrdr_query_completed(size_t queries, uint64_t db_points_read, uint64_t result_points_generated, QUERY_SOURCE query_source) { switch(query_source) { case QUERY_SOURCE_API_DATA: @@ -210,6 +231,11 @@ static inline void global_statistics_copy(struct global_statistics *gs, uint8_t gs->backfill_queries_made = __atomic_load_n(&global_statistics.backfill_queries_made, __ATOMIC_RELAXED); gs->backfill_db_points_read = __atomic_load_n(&global_statistics.backfill_db_points_read, __ATOMIC_RELAXED); + gs->tier0_hot_gorilla_buffers = __atomic_load_n(&global_statistics.tier0_hot_gorilla_buffers, __ATOMIC_RELAXED); + + gs->tier0_disk_compressed_bytes = __atomic_load_n(&global_statistics.tier0_disk_compressed_bytes, __ATOMIC_RELAXED); + gs->tier0_disk_uncompressed_bytes = __atomic_load_n(&global_statistics.tier0_disk_uncompressed_bytes, __ATOMIC_RELAXED); + for(size_t tier = 0; tier < storage_tiers ;tier++) gs->db_points_stored_per_tier[tier] = __atomic_load_n(&global_statistics.db_points_stored_per_tier[tier], __ATOMIC_RELAXED); @@ -816,7 +842,7 @@ static void global_statistics_charts(void) { for(size_t tier = 0; tier < storage_tiers ;tier++) { char buf[30 + 1]; - snprintfz(buf, 30, "tier%zu", tier); + snprintfz(buf, sizeof(buf) - 1, "tier%zu", tier); rds[tier] = rrddim_add(st_points_stored, buf, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); } } @@ -828,6 +854,72 @@ static void global_statistics_charts(void) { } ml_update_global_statistics_charts(gs.ml_models_consulted); + + // ---------------------------------------------------------------- + +#ifdef ENABLE_DBENGINE + if (tier_page_type[0] == PAGE_GORILLA_METRICS) + { + static RRDSET *st_tier0_gorilla_pages = NULL; + static RRDDIM *rd_num_gorilla_pages = NULL; + + if (unlikely(!st_tier0_gorilla_pages)) { + st_tier0_gorilla_pages = rrdset_create_localhost( + "netdata" + , "tier0_gorilla_pages" + , NULL + , "tier0_gorilla_pages" + , NULL + , "Number of gorilla_pages" + , "count" + , "netdata" + , "stats" + , 131004 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rd_num_gorilla_pages = rrddim_add(st_tier0_gorilla_pages, "count", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(st_tier0_gorilla_pages, rd_num_gorilla_pages, (collected_number)gs.tier0_hot_gorilla_buffers); + + rrdset_done(st_tier0_gorilla_pages); + } + + if (tier_page_type[0] == PAGE_GORILLA_METRICS) + { + static RRDSET *st_tier0_compression_info = NULL; + + static RRDDIM *rd_compressed_bytes = NULL; + static RRDDIM *rd_uncompressed_bytes = NULL; + + if (unlikely(!st_tier0_compression_info)) { + st_tier0_compression_info = rrdset_create_localhost( + "netdata" + , "tier0_compression_info" + , NULL + , "tier0_compression_info" + , NULL + , "Tier 0 compression info" + , "bytes" + , "netdata" + , "stats" + , 131005 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rd_compressed_bytes = rrddim_add(st_tier0_compression_info, "compressed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_uncompressed_bytes = rrddim_add(st_tier0_compression_info, "uncompressed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(st_tier0_compression_info, rd_compressed_bytes, (collected_number)gs.tier0_disk_compressed_bytes); + rrddim_set_by_pointer(st_tier0_compression_info, rd_uncompressed_bytes, (collected_number)gs.tier0_disk_uncompressed_bytes); + + rrdset_done(st_tier0_compression_info); + } +#endif } // ---------------------------------------------------------------------------- @@ -1881,8 +1973,6 @@ static void dbengine2_statistics_charts(void) { static RRDDIM *rd_mrg_metrics = NULL; static RRDDIM *rd_mrg_acquired = NULL; static RRDDIM *rd_mrg_collected = NULL; - static RRDDIM *rd_mrg_with_retention = NULL; - static RRDDIM *rd_mrg_without_retention = NULL; static RRDDIM *rd_mrg_multiple_writers = NULL; if (unlikely(!st_mrg_metrics)) { @@ -1903,8 +1993,6 @@ static void dbengine2_statistics_charts(void) { rd_mrg_metrics = rrddim_add(st_mrg_metrics, "all", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); rd_mrg_acquired = rrddim_add(st_mrg_metrics, "acquired", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); rd_mrg_collected = rrddim_add(st_mrg_metrics, "collected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_mrg_with_retention = rrddim_add(st_mrg_metrics, "with retention", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_mrg_without_retention = rrddim_add(st_mrg_metrics, "without retention", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); rd_mrg_multiple_writers = rrddim_add(st_mrg_metrics, "multi-collected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); } priority++; @@ -1912,8 +2000,6 @@ static void dbengine2_statistics_charts(void) { rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_metrics, (collected_number)mrg_stats.entries); rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_acquired, (collected_number)mrg_stats.entries_referenced); rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_collected, (collected_number)mrg_stats.writers); - rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_with_retention, (collected_number)mrg_stats.entries_with_retention); - rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_without_retention, (collected_number)mrg_stats.entries - (collected_number)mrg_stats.entries_with_retention); rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_multiple_writers, (collected_number)mrg_stats.writers_conflicts); rrdset_done(st_mrg_metrics); @@ -2681,9 +2767,12 @@ static void dbengine2_statistics_charts(void) { static void update_strings_charts() { static RRDSET *st_ops = NULL, *st_entries = NULL, *st_mem = NULL; - static RRDDIM *rd_ops_inserts = NULL, *rd_ops_deletes = NULL, *rd_ops_searches = NULL, *rd_ops_duplications = NULL, *rd_ops_releases = NULL; - static RRDDIM *rd_entries_entries = NULL, *rd_entries_refs = NULL; + static RRDDIM *rd_ops_inserts = NULL, *rd_ops_deletes = NULL; + static RRDDIM *rd_entries_entries = NULL; static RRDDIM *rd_mem = NULL; +#ifdef NETDATA_INTERNAL_CHECKS + static RRDDIM *rd_entries_refs = NULL, *rd_ops_releases = NULL, *rd_ops_duplications = NULL, *rd_ops_searches = NULL; +#endif size_t inserts, deletes, searches, entries, references, memory, duplications, releases; @@ -2706,16 +2795,20 @@ static void update_strings_charts() { rd_ops_inserts = rrddim_add(st_ops, "inserts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_ops_deletes = rrddim_add(st_ops, "deletes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); +#ifdef NETDATA_INTERNAL_CHECKS rd_ops_searches = rrddim_add(st_ops, "searches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_ops_duplications = rrddim_add(st_ops, "duplications", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_ops_releases = rrddim_add(st_ops, "releases", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); +#endif } rrddim_set_by_pointer(st_ops, rd_ops_inserts, (collected_number)inserts); rrddim_set_by_pointer(st_ops, rd_ops_deletes, (collected_number)deletes); +#ifdef NETDATA_INTERNAL_CHECKS rrddim_set_by_pointer(st_ops, rd_ops_searches, (collected_number)searches); rrddim_set_by_pointer(st_ops, rd_ops_duplications, (collected_number)duplications); rrddim_set_by_pointer(st_ops, rd_ops_releases, (collected_number)releases); +#endif rrdset_done(st_ops); if (unlikely(!st_entries)) { @@ -2734,11 +2827,15 @@ static void update_strings_charts() { , RRDSET_TYPE_AREA); rd_entries_entries = rrddim_add(st_entries, "entries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); +#ifdef NETDATA_INTERNAL_CHECKS rd_entries_refs = rrddim_add(st_entries, "references", NULL, 1, -1, RRD_ALGORITHM_ABSOLUTE); +#endif } rrddim_set_by_pointer(st_entries, rd_entries_entries, (collected_number)entries); +#ifdef NETDATA_INTERNAL_CHECKS rrddim_set_by_pointer(st_entries, rd_entries_refs, (collected_number)references); +#endif rrdset_done(st_entries); if (unlikely(!st_mem)) { @@ -2813,6 +2910,7 @@ struct dictionary_stats dictionary_stats_category_rrdhealth = { .name = "health" struct dictionary_stats dictionary_stats_category_functions = { .name = "functions" }; struct dictionary_stats dictionary_stats_category_replication = { .name = "replication" }; +#ifdef DICT_WITH_STATS struct dictionary_categories { struct dictionary_stats *stats; const char *family; @@ -3165,6 +3263,13 @@ static void update_dictionary_category_charts(struct dictionary_categories *c) { } } +static void dictionary_statistics(void) { + for(int i = 0; dictionary_categories[i].stats ;i++) { + update_dictionary_category_charts(&dictionary_categories[i]); + } +} +#endif // DICT_WITH_STATS + #ifdef NETDATA_TRACE_ALLOCATIONS struct memory_trace_data { @@ -3304,12 +3409,6 @@ static void malloc_trace_statistics(void) { } #endif -static void dictionary_statistics(void) { - for(int i = 0; dictionary_categories[i].stats ;i++) { - update_dictionary_category_charts(&dictionary_categories[i]); - } -} - // --------------------------------------------------------------------------------------------------------------------- // worker utilization @@ -3432,6 +3531,7 @@ static struct worker_utilization all_workers_utilization[] = { { .name = "TC", .family = "workers plugin tc", .priority = 1000000 }, { .name = "TIMEX", .family = "workers plugin timex", .priority = 1000000 }, { .name = "IDLEJITTER", .family = "workers plugin idlejitter", .priority = 1000000 }, + { .name = "LOGSMANAGPLG",.family = "workers plugin logs management", .priority = 1000000 }, { .name = "RRDCONTEXT", .family = "workers contexts", .priority = 1000000 }, { .name = "REPLICATION", .family = "workers replication sender", .priority = 1000000 }, { .name = "SERVICE", .family = "workers service", .priority = 1000000 }, @@ -4171,8 +4271,10 @@ void *global_statistics_main(void *ptr) worker_is_busy(WORKER_JOB_STRINGS); update_strings_charts(); +#ifdef DICT_WITH_STATS worker_is_busy(WORKER_JOB_DICTIONARIES); dictionary_statistics(); +#endif #ifdef NETDATA_TRACE_ALLOCATIONS worker_is_busy(WORKER_JOB_MALLOC_TRACE); diff --git a/daemon/global_statistics.h b/daemon/global_statistics.h index 7bdb153dd9692b..44717c6cf4efed 100644 --- a/daemon/global_statistics.h +++ b/daemon/global_statistics.h @@ -45,6 +45,11 @@ void global_statistics_sqlite3_query_completed(bool success, bool busy, bool loc void global_statistics_sqlite3_row_completed(void); void global_statistics_rrdset_done_chart_collection_completed(size_t *points_read_per_tier_array); +void global_statistics_gorilla_buffer_add_hot(); + +void global_statistics_tier0_disk_compressed_bytes(uint32_t size); +void global_statistics_tier0_disk_uncompressed_bytes(uint32_t size); + void global_statistics_web_request_completed(uint64_t dt, uint64_t bytes_received, uint64_t bytes_sent, diff --git a/daemon/main.c b/daemon/main.c index 6ddf57aa174bb8..7351bd65c34331 100644 --- a/daemon/main.c +++ b/daemon/main.c @@ -4,6 +4,8 @@ #include "buildinfo.h" #include "static_threads.h" +#include "database/engine/page_test.h" + #if defined(ENV32BIT) #warning COMPILING 32BIT NETDATA #endif @@ -313,7 +315,7 @@ void netdata_cleanup_and_exit(int ret) { const char *prev_msg = NULL; bool timeout = false; - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("NETDATA SHUTDOWN: initializing shutdown with code %d...", ret); send_statistics("EXIT", ret?"ERROR":"OK","-"); @@ -371,6 +373,10 @@ void netdata_cleanup_and_exit(int ret) { SERVICE_REPLICATION // replication has to be stopped after STREAMING, because it cleans up ARAL , 3 * USEC_PER_SEC); + delta_shutdown_time("prepare metasync shutdown"); + + metadata_sync_shutdown_prepare(); + delta_shutdown_time("disable ML detection and training threads"); ml_stop_threads(); @@ -396,10 +402,6 @@ void netdata_cleanup_and_exit(int ret) { rrdhost_cleanup_all(); - delta_shutdown_time("prepare metasync shutdown"); - - metadata_sync_shutdown_prepare(); - delta_shutdown_time("stop aclk threads"); timeout = !service_wait_exit( @@ -422,6 +424,13 @@ void netdata_cleanup_and_exit(int ret) { delta_shutdown_time("flush dbengine tiers"); for (size_t tier = 0; tier < storage_tiers; tier++) rrdeng_prepare_exit(multidb_ctx[tier]); + + for (size_t tier = 0; tier < storage_tiers; tier++) { + if (!multidb_ctx[tier]) + continue; + completion_wait_for(&multidb_ctx[tier]->quiesce.completion); + completion_destroy(&multidb_ctx[tier]->quiesce.completion); + } } #endif @@ -440,16 +449,20 @@ void netdata_cleanup_and_exit(int ret) { delta_shutdown_time("wait for dbengine collectors to finish"); size_t running = 1; - while(running) { + size_t count = 10; + while(running && count) { running = 0; for (size_t tier = 0; tier < storage_tiers; tier++) running += rrdeng_collectors_running(multidb_ctx[tier]); if(running) { - error_limit_static_thread_var(erl, 1, 100 * USEC_PER_MS); - error_limit(&erl, "waiting for %zu collectors to finish", running); + nd_log_limit_static_thread_var(erl, 1, 100 * USEC_PER_MS); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_NOTICE, + "waiting for %zu collectors to finish", running); // sleep_usec(100 * USEC_PER_MS); + cleanup_destroyed_dictionaries(); } + count--; } delta_shutdown_time("wait for dbengine main cache to finish flushing"); @@ -613,8 +626,14 @@ int killpid(pid_t pid) { int ret; netdata_log_debug(D_EXIT, "Request to kill pid %d", pid); + int signal = SIGTERM; +//#ifdef NETDATA_INTERNAL_CHECKS +// if(service_running(SERVICE_COLLECTORS)) +// signal = SIGABRT; +//#endif + errno = 0; - ret = kill(pid, SIGTERM); + ret = kill(pid, signal); if (ret == -1) { switch(errno) { case ESRCH: @@ -661,7 +680,7 @@ static void set_nofile_limit(struct rlimit *rl) { } void cancel_main_threads() { - error_log_limit_unlimited(); + nd_log_limits_unlimited(); int i, found = 0; usec_t max = 5 * USEC_PER_SEC, step = 100000; @@ -751,7 +770,7 @@ int help(int exitcode) { " | '-' '-' '-' '-' real-time performance monitoring, done right! \n" " +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->\n" "\n" - " Copyright (C) 2016-2022, Netdata, Inc. \n" + " Copyright (C) 2016-2023, Netdata, Inc. \n" " Released under GNU General Public License v3 or later.\n" " All rights reserved.\n" "\n" @@ -761,7 +780,7 @@ int help(int exitcode) { " Support : https://github.com/netdata/netdata/issues\n" " License : https://github.com/netdata/netdata/blob/master/LICENSE.md\n" "\n" - " Twitter : https://twitter.com/linuxnetdata\n" + " Twitter : https://twitter.com/netdatahq\n" " LinkedIn : https://linkedin.com/company/netdata-cloud/\n" " Facebook : https://facebook.com/linuxnetdata/\n" "\n" @@ -787,8 +806,7 @@ int help(int exitcode) { " -W stacksize=N Set the stacksize (in bytes).\n\n" " -W debug_flags=N Set runtime tracing to debug.log.\n\n" " -W unittest Run internal unittests and exit.\n\n" - " -W sqlite-check Check metadata database integrity and exit.\n\n" - " -W sqlite-fix Check metadata database integrity, fix if needed and exit.\n\n" + " -W sqlite-meta-recover Run recovery on the metadata database and exit.\n\n" " -W sqlite-compact Reclaim metadata database unused space and exit.\n\n" #ifdef ENABLE_DBENGINE " -W createdataset=N Create a DB engine dataset of N seconds and exit.\n\n" @@ -841,40 +859,49 @@ static void security_init(){ #endif static void log_init(void) { + nd_log_set_facility(config_get(CONFIG_SECTION_LOGS, "facility", "daemon")); + + time_t period = ND_LOG_DEFAULT_THROTTLE_PERIOD; + size_t logs = ND_LOG_DEFAULT_THROTTLE_LOGS; + period = config_get_number(CONFIG_SECTION_LOGS, "logs flood protection period", period); + logs = (unsigned long)config_get_number(CONFIG_SECTION_LOGS, "logs to trigger flood protection", (long long int)logs); + nd_log_set_flood_protection(logs, period); + + nd_log_set_priority_level(config_get(CONFIG_SECTION_LOGS, "level", NDLP_INFO_STR)); + char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/debug.log", netdata_configured_log_dir); - stdout_filename = config_get(CONFIG_SECTION_LOGS, "debug", filename); + nd_log_set_user_settings(NDLS_DEBUG, config_get(CONFIG_SECTION_LOGS, "debug", filename)); - snprintfz(filename, FILENAME_MAX, "%s/error.log", netdata_configured_log_dir); - stderr_filename = config_get(CONFIG_SECTION_LOGS, "error", filename); + bool with_journal = is_stderr_connected_to_journal() /* || nd_log_journal_socket_available() */; + if(with_journal) + snprintfz(filename, FILENAME_MAX, "journal"); + else + snprintfz(filename, FILENAME_MAX, "%s/daemon.log", netdata_configured_log_dir); + nd_log_set_user_settings(NDLS_DAEMON, config_get(CONFIG_SECTION_LOGS, "daemon", filename)); - snprintfz(filename, FILENAME_MAX, "%s/collector.log", netdata_configured_log_dir); - stdcollector_filename = config_get(CONFIG_SECTION_LOGS, "collector", filename); + if(with_journal) + snprintfz(filename, FILENAME_MAX, "journal"); + else + snprintfz(filename, FILENAME_MAX, "%s/collector.log", netdata_configured_log_dir); + nd_log_set_user_settings(NDLS_COLLECTORS, config_get(CONFIG_SECTION_LOGS, "collector", filename)); snprintfz(filename, FILENAME_MAX, "%s/access.log", netdata_configured_log_dir); - stdaccess_filename = config_get(CONFIG_SECTION_LOGS, "access", filename); + nd_log_set_user_settings(NDLS_ACCESS, config_get(CONFIG_SECTION_LOGS, "access", filename)); - snprintfz(filename, FILENAME_MAX, "%s/health.log", netdata_configured_log_dir); - stdhealth_filename = config_get(CONFIG_SECTION_LOGS, "health", filename); + if(with_journal) + snprintfz(filename, FILENAME_MAX, "journal"); + else + snprintfz(filename, FILENAME_MAX, "%s/health.log", netdata_configured_log_dir); + nd_log_set_user_settings(NDLS_HEALTH, config_get(CONFIG_SECTION_LOGS, "health", filename)); #ifdef ENABLE_ACLK aclklog_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "conversation log", CONFIG_BOOLEAN_NO); if (aclklog_enabled) { snprintfz(filename, FILENAME_MAX, "%s/aclk.log", netdata_configured_log_dir); - aclklog_filename = config_get(CONFIG_SECTION_CLOUD, "conversation log file", filename); + nd_log_set_user_settings(NDLS_ACLK, config_get(CONFIG_SECTION_CLOUD, "conversation log file", filename)); } #endif - - char deffacility[8]; - snprintfz(deffacility,7,"%s","daemon"); - facility_log = config_get(CONFIG_SECTION_LOGS, "facility", deffacility); - - error_log_throttle_period = config_get_number(CONFIG_SECTION_LOGS, "errors flood protection period", error_log_throttle_period); - error_log_errors_per_period = (unsigned long)config_get_number(CONFIG_SECTION_LOGS, "errors to trigger flood protection", (long long int)error_log_errors_per_period); - error_log_errors_per_period_backup = error_log_errors_per_period; - - setenv("NETDATA_ERRORS_THROTTLE_PERIOD", config_get(CONFIG_SECTION_LOGS, "errors flood protection period" , ""), 1); - setenv("NETDATA_ERRORS_PER_PERIOD", config_get(CONFIG_SECTION_LOGS, "errors to trigger flood protection", ""), 1); } char *initialize_lock_directory_path(char *prefix) @@ -1046,6 +1073,36 @@ static void backwards_compatible_config() { config_move(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_SECTION_DB, "enable zero metrics"); + config_move(CONFIG_SECTION_LOGS, "error", + CONFIG_SECTION_LOGS, "daemon"); + + config_move(CONFIG_SECTION_LOGS, "severity level", + CONFIG_SECTION_LOGS, "level"); + + config_move(CONFIG_SECTION_LOGS, "errors to trigger flood protection", + CONFIG_SECTION_LOGS, "logs to trigger flood protection"); + + config_move(CONFIG_SECTION_LOGS, "errors flood protection period", + CONFIG_SECTION_LOGS, "logs flood protection period"); + config_move(CONFIG_SECTION_HEALTH, "is ephemeral", + CONFIG_SECTION_GLOBAL, "is ephemeral node"); + + config_move(CONFIG_SECTION_HEALTH, "has unstable connection", + CONFIG_SECTION_GLOBAL, "has unstable connection"); +} + +static int get_hostname(char *buf, size_t buf_size) { + if (netdata_configured_host_prefix && *netdata_configured_host_prefix) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/etc/hostname", netdata_configured_host_prefix); + + if (!read_file(filename, buf, buf_size)) { + trim(buf); + return 0; + } + } + + return gethostname(buf, buf_size); } static void get_netdata_configured_variables() { @@ -1054,10 +1111,12 @@ static void get_netdata_configured_variables() { // ------------------------------------------------------------------------ // get the hostname + netdata_configured_host_prefix = config_get(CONFIG_SECTION_GLOBAL, "host access prefix", ""); + verify_netdata_host_prefix(); + char buf[HOSTNAME_MAX + 1]; - if(gethostname(buf, HOSTNAME_MAX) == -1){ + if (get_hostname(buf, HOSTNAME_MAX)) netdata_log_error("Cannot get machine hostname."); - } netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf); netdata_log_debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname); @@ -1108,8 +1167,6 @@ static void get_netdata_configured_variables() { netdata_configured_web_dir = config_get(CONFIG_SECTION_DIRECTORIES, "web", netdata_configured_web_dir); netdata_configured_cache_dir = config_get(CONFIG_SECTION_DIRECTORIES, "cache", netdata_configured_cache_dir); netdata_configured_varlib_dir = config_get(CONFIG_SECTION_DIRECTORIES, "lib", netdata_configured_varlib_dir); - char *env_home=getenv("HOME"); - netdata_configured_home_dir = config_get(CONFIG_SECTION_DIRECTORIES, "home", env_home?env_home:netdata_configured_home_dir); netdata_configured_lock_dir = initialize_lock_directory_path(netdata_configured_varlib_dir); @@ -1119,6 +1176,16 @@ static void get_netdata_configured_variables() { } #ifdef ENABLE_DBENGINE + // ------------------------------------------------------------------------ + // get default Database Engine page type + + const char *page_type = config_get(CONFIG_SECTION_DB, "dbengine page type", "raw"); + if (strcmp(page_type, "gorilla") == 0) { + tier_page_type[0] = PAGE_GORILLA_METRICS; + } else if (strcmp(page_type, "raw") != 0) { + netdata_log_error("Invalid dbengine page type ''%s' given. Defaulting to 'raw'.", page_type); + } + // ------------------------------------------------------------------------ // get default Database Engine page cache size in MiB @@ -1157,10 +1224,6 @@ static void get_netdata_configured_variables() { default_rrd_memory_mode = RRD_MEMORY_MODE_SAVE; } #endif - // ------------------------------------------------------------------------ - - netdata_configured_host_prefix = config_get(CONFIG_SECTION_GLOBAL, "host access prefix", ""); - verify_netdata_host_prefix(); // -------------------------------------------------------------------- // get KSM settings @@ -1180,6 +1243,7 @@ static void get_netdata_configured_variables() { // -------------------------------------------------------------------- rrdset_free_obsolete_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s); + rrdhost_free_ephemeral_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup ephemeral hosts after secs", rrdhost_free_ephemeral_time_s); // Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short // cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at // https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information. @@ -1258,7 +1322,7 @@ static inline void coverity_remove_taint(char *s) (void)s; } -int get_system_info(struct rrdhost_system_info *system_info, bool log) { +int get_system_info(struct rrdhost_system_info *system_info) { char *script; script = mallocz(sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("system-info.sh") + 2)); sprintf(script, "%s/%s", netdata_configured_primary_plugins_dir, "system-info.sh"); @@ -1290,11 +1354,7 @@ int get_system_info(struct rrdhost_system_info *system_info, bool log) { if(unlikely(rrdhost_set_system_info_variable(system_info, line, value))) { netdata_log_error("Unexpected environment variable %s=%s", line, value); - } - else { - if(log) - netdata_log_info("%s=%s", line, value); - + } else { setenv(line, value, 1); } } @@ -1333,6 +1393,8 @@ int julytest(void); int pluginsd_parser_unittest(void); void replication_initialize(void); void bearer_tokens_init(void); +int unittest_rrdpush_compressions(void); +int uuid_unittest(void); int main(int argc, char **argv) { // initialize the system clocks @@ -1342,8 +1404,6 @@ int main(int argc, char **argv) { usec_t started_ut = now_monotonic_usec(); usec_t last_ut = started_ut; const char *prev_msg = NULL; - // Initialize stderror avoiding coredump when netdata_log_info() or netdata_log_error() is called - stderror = stderr; int i; int config_loaded = 0; @@ -1435,14 +1495,14 @@ int main(int argc, char **argv) { #ifdef ENABLE_DBENGINE char* createdataset_string = "createdataset="; char* stresstest_string = "stresstest="; -#endif - if(strcmp(optarg, "sqlite-check") == 0) { - sql_init_database(DB_CHECK_INTEGRITY, 0); - return 0; + + if(strcmp(optarg, "pgd-tests") == 0) { + return pgd_test(argc, argv); } +#endif - if(strcmp(optarg, "sqlite-fix") == 0) { - sql_init_database(DB_CHECK_FIX_DB, 0); + if(strcmp(optarg, "sqlite-meta-recover") == 0) { + sql_init_database(DB_CHECK_RECOVER, 0); return 0; } @@ -1495,6 +1555,8 @@ int main(int argc, char **argv) { return 1; if (ctx_unittest()) return 1; + if (uuid_unittest()) + return 1; fprintf(stderr, "\n\nALL TESTS PASSED\n\n"); return 0; } @@ -1509,7 +1571,7 @@ int main(int argc, char **argv) { unittest_running = true; return aral_unittest(10000); } - else if(strcmp(optarg, "stringtest") == 0) { + else if(strcmp(optarg, "stringtest") == 0) { unittest_running = true; return string_unittest(10000); } @@ -1521,6 +1583,10 @@ int main(int argc, char **argv) { unittest_running = true; return buffer_unittest(); } + else if(strcmp(optarg, "uuidtest") == 0) { + unittest_running = true; + return uuid_unittest(); + } #ifdef ENABLE_DBENGINE else if(strcmp(optarg, "mctest") == 0) { unittest_running = true; @@ -1550,6 +1616,10 @@ int main(int argc, char **argv) { unittest_running = true; return pluginsd_parser_unittest(); } + else if(strcmp(optarg, "rrdpush_compressions_test") == 0) { + unittest_running = true; + return unittest_rrdpush_compressions(); + } else if(strncmp(optarg, createdataset_string, strlen(createdataset_string)) == 0) { optarg += strlen(createdataset_string); unsigned history_seconds = strtoul(optarg, NULL, 0); @@ -1851,7 +1921,7 @@ int main(int argc, char **argv) { { char buf[20 + 1]; - snprintfz(buf, 20, "%d", libuv_worker_threads); + snprintfz(buf, sizeof(buf) - 1, "%d", libuv_worker_threads); setenv("UV_THREADPOOL_SIZE", buf, 1); } @@ -1894,12 +1964,15 @@ int main(int argc, char **argv) { // get log filenames and settings log_init(); - error_log_limit_unlimited(); + nd_log_limits_unlimited(); // initialize the log files - open_all_log_files(); + nd_log_initialize(); + netdata_log_info("Netdata agent version \""VERSION"\" is starting"); ieee754_doubles = is_system_ieee754_double(); + if(!ieee754_doubles) + globally_disabled_capabilities |= STREAM_CAP_IEEE754; aral_judy_init(); @@ -1909,6 +1982,8 @@ int main(int argc, char **argv) { replication_initialize(); + rrd_functions_inflight_init(); + // -------------------------------------------------------------------- // get the certificate and start security @@ -1938,8 +2013,6 @@ int main(int argc, char **argv) { signals_block(); signals_init(); // setup the signals we want to use - dyn_conf_init(); - // -------------------------------------------------------------------- // check which threads are enabled and initialize them @@ -2005,6 +2078,18 @@ int main(int argc, char **argv) { if(become_daemon(dont_fork, user) == -1) fatal("Cannot daemonize myself."); + // The "HOME" env var points to the root's home dir because Netdata starts as root. Can't use "HOME". + struct passwd *pw = getpwuid(getuid()); + if (config_exists(CONFIG_SECTION_DIRECTORIES, "home") || !pw || !pw->pw_dir) { + netdata_configured_home_dir = config_get(CONFIG_SECTION_DIRECTORIES, "home", netdata_configured_home_dir); + } else { + netdata_configured_home_dir = config_get(CONFIG_SECTION_DIRECTORIES, "home", pw->pw_dir); + } + + setenv("HOME", netdata_configured_home_dir, 1); + + dyn_conf_init(); + netdata_log_info("netdata started on pid %d.", getpid()); delta_startup_time("initialize threads after fork"); @@ -2036,7 +2121,7 @@ int main(int argc, char **argv) { netdata_anonymous_statistics_enabled=-1; struct rrdhost_system_info *system_info = callocz(1, sizeof(struct rrdhost_system_info)); __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(struct rrdhost_system_info), __ATOMIC_RELAXED); - get_system_info(system_info, true); + get_system_info(system_info); (void) registry_get_this_machine_guid(); system_info->hops = 0; get_install_type(&system_info->install_type, &system_info->prebuilt_arch, &system_info->prebuilt_dist); @@ -2073,7 +2158,7 @@ int main(int argc, char **argv) { // ------------------------------------------------------------------------ // enable log flood protection - error_log_limit_reset(); + nd_log_limits_reset(); // Load host labels delta_startup_time("collect host labels"); diff --git a/daemon/service.c b/daemon/service.c index a25e2a26b8da45..8a65de66c11c25 100644 --- a/daemon/service.c +++ b/daemon/service.c @@ -76,43 +76,55 @@ static void svc_rrddim_obsolete_to_archive(RRDDIM *rd) { rrddim_free(st, rd); } -static bool svc_rrdset_archive_obsolete_dimensions(RRDSET *st, bool all_dimensions) { +static inline bool svc_rrdset_archive_obsolete_dimensions(RRDSET *st, bool all_dimensions) { + if(!all_dimensions && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS)) + return true; + worker_is_busy(WORKER_JOB_ARCHIVE_CHART_DIMENSIONS); + rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS); + RRDDIM *rd; time_t now = now_realtime_sec(); - bool done_all_dimensions = true; + size_t dim_candidates = 0; + size_t dim_archives = 0; dfe_start_write(st->rrddim_root_index, rd) { - if(unlikely( - all_dimensions || - (rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE) && (rd->collector.last_collected_time.tv_sec + rrdset_free_obsolete_time_s < now)) - )) { - - if(dictionary_acquired_item_references(rd_dfe.item) == 1) { - netdata_log_info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rrddim_name(rd), rrddim_id(rd), rrdset_name(st), rrdset_id(st)); - svc_rrddim_obsolete_to_archive(rd); + bool candidate = (all_dimensions || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)); + + if(candidate) { + dim_candidates++; + + if(rd->collector.last_collected_time.tv_sec + rrdset_free_obsolete_time_s < now) { + size_t references = dictionary_acquired_item_references(rd_dfe.item); + if(references == 1) { +// netdata_log_info("Removing obsolete dimension 'host:%s/chart:%s/dim:%s'", +// rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_id(rd)); + svc_rrddim_obsolete_to_archive(rd); + dim_archives++; + } +// else +// netdata_log_info("Cannot remove obsolete dimension 'host:%s/chart:%s/dim:%s'", +// rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_id(rd)); } - else - done_all_dimensions = false; } - else - done_all_dimensions = false; } dfe_done(rd); - return done_all_dimensions; -} + if(dim_archives != dim_candidates) { + rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS); + return false; + } -static void svc_rrdset_obsolete_to_archive(RRDSET *st) { - worker_is_busy(WORKER_JOB_ARCHIVE_CHART); + return true; +} +static void svc_rrdset_obsolete_to_free(RRDSET *st) { if(!svc_rrdset_archive_obsolete_dimensions(st, true)) return; - rrdset_flag_set(st, RRDSET_FLAG_ARCHIVED); - rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); + worker_is_busy(WORKER_JOB_FREE_CHART); rrdcalc_unlink_all_rrdset_alerts(st); @@ -130,53 +142,83 @@ static void svc_rrdset_obsolete_to_archive(RRDSET *st) { worker_is_busy(WORKER_JOB_SAVE_CHART); rrdset_save(st); } - - worker_is_busy(WORKER_JOB_FREE_CHART); - rrdset_free(st); } + + rrdset_free(st); } -static void svc_rrdhost_cleanup_obsolete_charts(RRDHOST *host) { +static inline void svc_rrdhost_cleanup_charts_marked_obsolete(RRDHOST *host) { + if(!rrdhost_flag_check(host, RRDHOST_FLAG_PENDING_OBSOLETE_CHARTS|RRDHOST_FLAG_PENDING_OBSOLETE_DIMENSIONS)) + return; + worker_is_busy(WORKER_JOB_CLEANUP_OBSOLETE_CHARTS); + rrdhost_flag_clear(host, RRDHOST_FLAG_PENDING_OBSOLETE_CHARTS|RRDHOST_FLAG_PENDING_OBSOLETE_DIMENSIONS); + + size_t full_candidates = 0; + size_t full_archives = 0; + size_t partial_candidates = 0; + size_t partial_archives = 0; + time_t now = now_realtime_sec(); RRDSET *st; rrdset_foreach_reentrant(st, host) { if(rrdset_is_replicating(st)) continue; - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) - && st->last_accessed_time_s + rrdset_free_obsolete_time_s < now - && st->last_updated.tv_sec + rrdset_free_obsolete_time_s < now - && st->last_collected_time.tv_sec + rrdset_free_obsolete_time_s < now - )) { - svc_rrdset_obsolete_to_archive(st); + RRDSET_FLAGS flags = rrdset_flag_get(st); + bool obsolete_chart = flags & RRDSET_FLAG_OBSOLETE; + bool obsolete_dims = flags & RRDSET_FLAG_OBSOLETE_DIMENSIONS; + + if(obsolete_dims) { + partial_candidates++; + + if(svc_rrdset_archive_obsolete_dimensions(st, false)) + partial_archives++; } - else if(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS)) { - rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS); - svc_rrdset_archive_obsolete_dimensions(st, false); + + if(obsolete_chart) { + full_candidates++; + + if(unlikely( st->last_accessed_time_s + rrdset_free_obsolete_time_s < now + && st->last_updated.tv_sec + rrdset_free_obsolete_time_s < now + && st->last_collected_time.tv_sec + rrdset_free_obsolete_time_s < now + )) { + svc_rrdset_obsolete_to_free(st); + full_archives++; + } } } rrdset_foreach_done(st); + + if(partial_archives != partial_candidates) + rrdhost_flag_set(host, RRDHOST_FLAG_PENDING_OBSOLETE_DIMENSIONS); + + if(full_archives != full_candidates) + rrdhost_flag_set(host, RRDHOST_FLAG_PENDING_OBSOLETE_CHARTS); } -static void svc_rrdset_check_obsoletion(RRDHOST *host) { +static void svc_rrdhost_detect_obsolete_charts(RRDHOST *host) { worker_is_busy(WORKER_JOB_CHILD_CHART_OBSOLETION_CHECK); time_t now = now_realtime_sec(); time_t last_entry_t; RRDSET *st; + + time_t child_connect_time = host->child_connect_time; + rrdset_foreach_read(st, host) { if(rrdset_is_replicating(st)) continue; last_entry_t = rrdset_last_entry_s(st); - if(last_entry_t && last_entry_t < host->child_connect_time && - host->child_connect_time + TIME_TO_RUN_OBSOLETIONS_ON_CHILD_CONNECT + ITERATIONS_TO_RUN_OBSOLETIONS_ON_CHILD_CONNECT * st->update_every - < now) + if (last_entry_t && last_entry_t < child_connect_time && + child_connect_time + TIME_TO_RUN_OBSOLETIONS_ON_CHILD_CONNECT + + (ITERATIONS_TO_RUN_OBSOLETIONS_ON_CHILD_CONNECT * st->update_every) < + now) - rrdset_is_obsolete(st); + rrdset_is_obsolete___safe_from_collector_thread(st); } rrdset_foreach_done(st); } @@ -191,24 +233,24 @@ static void svc_rrd_cleanup_obsolete_charts_from_all_hosts() { if(rrdhost_receiver_replicating_charts(host) || rrdhost_sender_replicating_charts(host)) continue; - if(rrdhost_flag_check(host, RRDHOST_FLAG_PENDING_OBSOLETE_CHARTS|RRDHOST_FLAG_PENDING_OBSOLETE_DIMENSIONS)) { - rrdhost_flag_clear(host, RRDHOST_FLAG_PENDING_OBSOLETE_CHARTS|RRDHOST_FLAG_PENDING_OBSOLETE_DIMENSIONS); - svc_rrdhost_cleanup_obsolete_charts(host); - } + svc_rrdhost_cleanup_charts_marked_obsolete(host); - if(host != localhost - && host->trigger_chart_obsoletion_check - && ( - ( - host->child_last_chart_command - && host->child_last_chart_command + host->health.health_delay_up_to < now_realtime_sec() - ) - || (host->child_connect_time + TIME_TO_RUN_OBSOLETIONS_ON_CHILD_CONNECT < now_realtime_sec()) - ) - ) { - svc_rrdset_check_obsoletion(host); + if (host == localhost) + continue; + + netdata_mutex_lock(&host->receiver_lock); + + time_t now = now_realtime_sec(); + + if (host->trigger_chart_obsoletion_check && + ((host->child_last_chart_command && + host->child_last_chart_command + host->health.health_delay_up_to < now) || + (host->child_connect_time + TIME_TO_RUN_OBSOLETIONS_ON_CHILD_CONNECT < now))) { + svc_rrdhost_detect_obsolete_charts(host); host->trigger_chart_obsoletion_check = 0; } + + netdata_mutex_unlock(&host->receiver_lock); } rrd_unlock(); @@ -227,22 +269,45 @@ static void svc_rrdhost_cleanup_orphan_hosts(RRDHOST *protected_host) { if(!rrdhost_should_be_removed(host, protected_host, now)) continue; - netdata_log_info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", rrdhost_hostname(host), host->machine_guid); + bool is_archived = rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED); + if (!is_archived) { + netdata_log_info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", rrdhost_hostname(host), host->machine_guid); - if (rrdhost_option_check(host, RRDHOST_OPTION_DELETE_ORPHAN_HOST) - /* don't delete multi-host DB host files */ - && !(host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && is_storage_engine_shared(host->db[0].instance)) - ) { - worker_is_busy(WORKER_JOB_DELETE_HOST_CHARTS); - rrdhost_delete_charts(host); + if (rrdhost_option_check(host, RRDHOST_OPTION_DELETE_ORPHAN_HOST) + /* don't delete multi-host DB host files */ + && !(host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && is_storage_engine_shared(host->db[0].instance)) + ) { + worker_is_busy(WORKER_JOB_DELETE_HOST_CHARTS); + rrdhost_delete_charts(host); + } + else { + worker_is_busy(WORKER_JOB_SAVE_HOST_CHARTS); + rrdhost_save_charts(host); + } } - else { - worker_is_busy(WORKER_JOB_SAVE_HOST_CHARTS); - rrdhost_save_charts(host); + + bool force = false; + + if (rrdhost_option_check(host, RRDHOST_OPTION_EPHEMERAL_HOST) && now - host->last_connected > rrdhost_free_ephemeral_time_s) + force = true; + + if (!force && is_archived) + continue; + + if (force) { + netdata_log_info("Host '%s' with machine guid '%s' is archived, ephemeral clean up.", rrdhost_hostname(host), host->machine_guid); } worker_is_busy(WORKER_JOB_FREE_HOST); - rrdhost_free___while_having_rrd_wrlock(host, false); +#ifdef ENABLE_ACLK + // in case we have cloud connection we inform cloud + // a child disconnected + if (netdata_cloud_enabled && force) { + aclk_host_state_update(host, 0, 0); + unregister_node(host->machine_guid); + } +#endif + rrdhost_free___while_having_rrd_wrlock(host, force); goto restart_after_removal; } diff --git a/daemon/signals.c b/daemon/signals.c index ae28874cc5b37e..4f22543342bed6 100644 --- a/daemon/signals.c +++ b/daemon/signals.c @@ -42,7 +42,7 @@ static void signal_handler(int signo) { if(signals_waiting[i].action == NETDATA_SIGNAL_FATAL) { char buffer[200 + 1]; - snprintfz(buffer, 200, "\nSIGNAL HANDLER: received: %s. Oops! This is bad!\n", signals_waiting[i].name); + snprintfz(buffer, sizeof(buffer) - 1, "\nSIGNAL HANDLER: received: %s. Oops! This is bad!\n", signals_waiting[i].name); if(write(STDERR_FILENO, buffer, strlen(buffer)) == -1) { // nothing to do - we cannot write but there is no way to complain about it ; @@ -203,28 +203,28 @@ void signals_handle(void) { switch (signals_waiting[i].action) { case NETDATA_SIGNAL_RELOAD_HEALTH: - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("SIGNAL: Received %s. Reloading HEALTH configuration...", name); - error_log_limit_reset(); + nd_log_limits_reset(); execute_command(CMD_RELOAD_HEALTH, NULL, NULL); break; case NETDATA_SIGNAL_SAVE_DATABASE: - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("SIGNAL: Received %s. Saving databases...", name); - error_log_limit_reset(); + nd_log_limits_reset(); execute_command(CMD_SAVE_DATABASE, NULL, NULL); break; case NETDATA_SIGNAL_REOPEN_LOGS: - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("SIGNAL: Received %s. Reopening all log files...", name); - error_log_limit_reset(); + nd_log_limits_reset(); execute_command(CMD_REOPEN_LOGS, NULL, NULL); break; case NETDATA_SIGNAL_EXIT_CLEANLY: - error_log_limit_unlimited(); + nd_log_limits_unlimited(); netdata_log_info("SIGNAL: Received %s. Cleaning up to exit...", name); commands_exit(); netdata_cleanup_and_exit(0); diff --git a/daemon/static_threads.c b/daemon/static_threads.c index 830b854e6acd3a..0ce010d6edd0d0 100644 --- a/daemon/static_threads.c +++ b/daemon/static_threads.c @@ -61,7 +61,7 @@ const struct netdata_static_thread static_threads_common[] = { .config_name = "netdata monitoring", .env_name = "NETDATA_INTERNALS_MONITORING", .global_variable = &global_statistics_enabled, - .enabled = 1, + .enabled = 0, .thread = NULL, .init_routine = NULL, .start_routine = global_statistics_main @@ -69,10 +69,10 @@ const struct netdata_static_thread static_threads_common[] = { { .name = "STATS_WORKERS", .config_section = CONFIG_SECTION_PLUGINS, - .config_name = "netdata monitoring", + .config_name = "netdata monitoring extended", .env_name = "NETDATA_INTERNALS_MONITORING", .global_variable = &global_statistics_enabled, - .enabled = 1, + .enabled = 0, .thread = NULL, .init_routine = NULL, .start_routine = global_statistics_workers_main @@ -80,10 +80,10 @@ const struct netdata_static_thread static_threads_common[] = { { .name = "STATS_SQLITE3", .config_section = CONFIG_SECTION_PLUGINS, - .config_name = "netdata monitoring", + .config_name = "netdata monitoring extended", .env_name = "NETDATA_INTERNALS_MONITORING", .global_variable = &global_statistics_enabled, - .enabled = 1, + .enabled = 0, .thread = NULL, .init_routine = NULL, .start_routine = global_statistics_sqlite3_main diff --git a/daemon/unit_test.c b/daemon/unit_test.c index b8d229316b12bc..8f44be39bb70b4 100644 --- a/daemon/unit_test.c +++ b/daemon/unit_test.c @@ -97,7 +97,7 @@ static int check_number_printing(void) { int i, failed = 0; for(i = 0; values[i].correct ; i++) { print_netdata_double(netdata, values[i].n); - snprintfz(system, 512, "%0.12" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE)values[i].n); + snprintfz(system, sizeof(system) - 1, "%0.12" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE)values[i].n); int ok = 1; if(strcmp(netdata, values[i].correct) != 0) { @@ -319,7 +319,7 @@ void benchmark_storage_number(int loop, int multiplier) { for(i = 0; i < loop ;i++) { n *= multiplier; if(n > storage_number_positive_max) n = storage_number_positive_min; - snprintfz(buffer, 100, NETDATA_DOUBLE_FORMAT, n); + snprintfz(buffer, sizeof(buffer) - 1, NETDATA_DOUBLE_FORMAT, n); } } @@ -507,7 +507,7 @@ int unit_test_buffer() { const char *fmt = "string1: %s\nstring2: %s\nstring3: %s\nstring4: %s"; buffer_sprintf(wb, fmt, string, string, string, string); - snprintfz(final, 9000, fmt, string, string, string, string); + snprintfz(final, sizeof(final) - 1, fmt, string, string, string, string); const char *s = buffer_tostring(wb); @@ -1272,7 +1272,7 @@ int run_test(struct test *test) default_rrd_update_every = test->update_every; char name[101]; - snprintfz(name, 100, "unittest-%s", test->name); + snprintfz(name, sizeof(name) - 1, "unittest-%s", test->name); // create the chart RRDSET *st = rrdset_create_localhost("netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", NULL, 1 @@ -1534,7 +1534,7 @@ int unit_test(long delay, long shift) repeat++; char name[101]; - snprintfz(name, 100, "unittest-%d-%ld-%ld", repeat, delay, shift); + snprintfz(name, sizeof(name) - 1, "unittest-%d-%ld-%ld", repeat, delay, shift); //debug_flags = 0xffffffff; default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC; @@ -1681,13 +1681,6 @@ int test_sqlite(void) { rc = sqlite3_exec_monitored(db_meta, buffer_tostring(sql), 0, 0, NULL); if (rc != SQLITE_OK) goto error; - buffer_flush(sql); - - buffer_sprintf(sql, INDEX_ACLK_ALERT, uuid_str, uuid_str); - rc = sqlite3_exec_monitored(db_meta, buffer_tostring(sql), 0, 0, NULL); - if (rc != SQLITE_OK) - goto error; - buffer_flush(sql); buffer_free(sql); fprintf(stderr,"SQLite is OK\n"); @@ -1831,30 +1824,29 @@ static RRDHOST *dbengine_rrdhost_find_or_create(char *name) /* We don't want to drop metrics when generating load, we prefer to block data generation itself */ return rrdhost_find_or_create( - name - , name - , name - , os_type - , netdata_configured_timezone - , netdata_configured_abbrev_timezone - , netdata_configured_utc_offset - , "" - , program_name - , program_version - , default_rrd_update_every - , default_rrd_history_entries - , RRD_MEMORY_MODE_DBENGINE - , default_health_enabled - , default_rrdpush_enabled - , default_rrdpush_destination - , default_rrdpush_api_key - , default_rrdpush_send_charts_matching - , default_rrdpush_enable_replication - , default_rrdpush_seconds_to_replicate - , default_rrdpush_replication_step - , NULL - , 0 - ); + name, + name, + name, + os_type, + netdata_configured_timezone, + netdata_configured_abbrev_timezone, + netdata_configured_utc_offset, + "", + program_name, + program_version, + default_rrd_update_every, + default_rrd_history_entries, + RRD_MEMORY_MODE_DBENGINE, + default_health_enabled, + default_rrdpush_enabled, + default_rrdpush_destination, + default_rrdpush_api_key, + default_rrdpush_send_charts_matching, + default_rrdpush_enable_replication, + default_rrdpush_seconds_to_replicate, + default_rrdpush_replication_step, + NULL, + 0); } // constants for test_dbengine @@ -1878,7 +1870,7 @@ static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDI char name[101]; for (i = 0 ; i < CHARTS ; ++i) { - snprintfz(name, 100, "dbengine-chart-%d", i); + snprintfz(name, sizeof(name) - 1, "dbengine-chart-%d", i); // create the chart st[i] = rrdset_create(host, "netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", @@ -1886,7 +1878,7 @@ static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDI rrdset_flag_set(st[i], RRDSET_FLAG_DEBUG); rrdset_flag_set(st[i], RRDSET_FLAG_STORE_FIRST); for (j = 0 ; j < DIMS ; ++j) { - snprintfz(name, 100, "dim-%d", j); + snprintfz(name, sizeof(name) - 1, "dim-%d", j); rd[i][j] = rrddim_add(st[i], name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); } @@ -2108,6 +2100,14 @@ static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS] return errors + value_errors + time_errors; } +void test_dbengine_charts_and_dims_are_not_collected(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS]) { + for(int c = 0; c < CHARTS ; c++) { + st[c]->rrdcontexts.collected = false; + for(int d = 0; d < DIMS ; d++) + rd[c][d]->rrdcontexts.collected = false; + } +} + int test_dbengine(void) { fprintf(stderr, "%s() running...\n", __FUNCTION__ ); @@ -2117,7 +2117,7 @@ int test_dbengine(void) RRDDIM *rd[CHARTS][DIMS]; time_t time_start[REGIONS], time_end[REGIONS]; - error_log_limit_unlimited(); + nd_log_limits_unlimited(); fprintf(stderr, "\nRunning DB-engine test\n"); default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE; @@ -2135,6 +2135,7 @@ int test_dbengine(void) time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]); errors += test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]); + test_dbengine_charts_and_dims_are_not_collected(st, rd); current_region = 1; //this is the second region of data update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 3 seconds @@ -2152,6 +2153,7 @@ int test_dbengine(void) time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]); errors += test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]); + test_dbengine_charts_and_dims_are_not_collected(st, rd); current_region = 2; //this is the third region of data update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 1 seconds @@ -2169,6 +2171,7 @@ int test_dbengine(void) time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]); errors += test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]); + test_dbengine_charts_and_dims_are_not_collected(st, rd); for (current_region = 0 ; current_region < REGIONS ; ++current_region) { errors += test_dbengine_check_rrdr(st, rd, current_region, time_start[current_region], time_end[current_region]); @@ -2343,7 +2346,7 @@ void generate_dbengine_dataset(unsigned history_seconds) (1024 * 1024); default_rrdeng_disk_quota_mb -= default_rrdeng_disk_quota_mb * EXPECTED_COMPRESSION_RATIO / 100; - error_log_limit_unlimited(); + nd_log_limits_unlimited(); fprintf(stderr, "Initializing localhost with hostname 'dbengine-dataset'"); host = dbengine_rrdhost_find_or_create("dbengine-dataset"); @@ -2518,7 +2521,7 @@ void dbengine_stress_test(unsigned TEST_DURATION_SEC, unsigned DSET_CHARTS, unsi unsigned i, j; time_t time_start, test_duration; - error_log_limit_unlimited(); + nd_log_limits_unlimited(); if (!TEST_DURATION_SEC) TEST_DURATION_SEC = 10; diff --git a/database/contexts/api_v1.c b/database/contexts/api_v1.c index bc7fee496de64d..f144e6f7b881f6 100644 --- a/database/contexts/api_v1.c +++ b/database/contexts/api_v1.c @@ -213,7 +213,7 @@ static inline int rrdinstance_to_json_callback(const DICTIONARY_ITEM *item, void buffer_json_array_close(wb); } - if(options & RRDCONTEXT_OPTION_SHOW_LABELS && ri->rrdlabels && dictionary_entries(ri->rrdlabels)) { + if(options & RRDCONTEXT_OPTION_SHOW_LABELS && ri->rrdlabels && rrdlabels_entries(ri->rrdlabels)) { buffer_json_member_add_object(wb, "labels"); rrdlabels_to_buffer_json_members(ri->rrdlabels, wb); buffer_json_object_close(wb); @@ -366,7 +366,7 @@ int rrdcontext_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, R RRDCONTEXT *rc = rrdcontext_acquired_value(rca); if(after != 0 && before != 0) - rrdr_relative_window_to_absolute(&after, &before, NULL, false); + rrdr_relative_window_to_absolute_query(&after, &before, NULL, false); buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); struct rrdcontext_to_json t_contexts = { @@ -403,7 +403,7 @@ int rrdcontexts_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, uuid_unparse(*host->node_id, node_uuid); if(after != 0 && before != 0) - rrdr_relative_window_to_absolute(&after, &before, NULL, false); + rrdr_relative_window_to_absolute_query(&after, &before, NULL, false); buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); diff --git a/database/contexts/api_v2.c b/database/contexts/api_v2.c index 08739160d94dad..3ca49a319522b2 100644 --- a/database/contexts/api_v2.c +++ b/database/contexts/api_v2.c @@ -184,6 +184,7 @@ struct alert_v2_entry { RRDCALC *tmp; STRING *name; + STRING *summary; size_t ati; @@ -315,6 +316,7 @@ static void alerts_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused struct alert_v2_entry *t = value; RRDCALC *rc = t->tmp; t->name = rc->name; + t->summary = rc->summary; t->ati = ctl->alerts.ati++; t->nodes = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); @@ -355,6 +357,7 @@ static void alert_instances_v2_insert_callback(const DICTIONARY_ITEM *item __may t->status = rc->status; t->flags = rc->run_flags; t->info = rc->info; + t->summary = rc->summary; t->value = rc->value; t->last_updated = rc->last_updated; t->last_status_change = rc->last_status_change; @@ -418,7 +421,7 @@ static FTS_MATCH rrdcontext_to_json_v2_full_text_search(struct rrdcontext_to_jso dfe_done(rm); size_t label_searches = 0; - if(unlikely(ri->rrdlabels && dictionary_entries(ri->rrdlabels) && + if(unlikely(ri->rrdlabels && rrdlabels_entries(ri->rrdlabels) && rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, q, ':', &label_searches))) { ctl->q.fts.searches += label_searches; ctl->q.fts.char_searches += label_searches; @@ -504,7 +507,7 @@ static bool rrdcontext_matches_alert(struct rrdcontext_to_json_v2_data *ctl, RRD if (ctl->options & (CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES | CONTEXT_V2_OPTION_ALERTS_WITH_VALUES)) { char key[20 + 1]; - snprintfz(key, 20, "%p", rcl); + snprintfz(key, sizeof(key) - 1, "%p", rcl); struct sql_alert_instance_v2_entry z = { .ati = ati, @@ -613,10 +616,10 @@ static void rrdhost_receiver_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char * buffer_json_member_add_object(wb, "source"); { char buf[1024 + 1]; - snprintfz(buf, 1024, "[%s]:%d%s", s->ingest.peers.local.ip, s->ingest.peers.local.port, s->ingest.ssl ? ":SSL" : ""); + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.local.ip, s->ingest.peers.local.port, s->ingest.ssl ? ":SSL" : ""); buffer_json_member_add_string(wb, "local", buf); - snprintfz(buf, 1024, "[%s]:%d%s", s->ingest.peers.peer.ip, s->ingest.peers.peer.port, s->ingest.ssl ? ":SSL" : ""); + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.peer.ip, s->ingest.peers.peer.port, s->ingest.ssl ? ":SSL" : ""); buffer_json_member_add_string(wb, "remote", buf); stream_capabilities_to_json_array(wb, s->ingest.capabilities, "capabilities"); @@ -656,10 +659,10 @@ static void rrdhost_sender_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *ke buffer_json_member_add_object(wb, "destination"); { char buf[1024 + 1]; - snprintfz(buf, 1024, "[%s]:%d%s", s->stream.peers.local.ip, s->stream.peers.local.port, s->stream.ssl ? ":SSL" : ""); + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.local.ip, s->stream.peers.local.port, s->stream.ssl ? ":SSL" : ""); buffer_json_member_add_string(wb, "local", buf); - snprintfz(buf, 1024, "[%s]:%d%s", s->stream.peers.peer.ip, s->stream.peers.peer.port, s->stream.ssl ? ":SSL" : ""); + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.peer.ip, s->stream.peers.peer.port, s->stream.ssl ? ":SSL" : ""); buffer_json_member_add_string(wb, "remote", buf); stream_capabilities_to_json_array(wb, s->stream.capabilities, "capabilities"); @@ -671,6 +674,7 @@ static void rrdhost_sender_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *ke buffer_json_member_add_uint64(wb, "metadata", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]); buffer_json_member_add_uint64(wb, "functions", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]); buffer_json_member_add_uint64(wb, "replication", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]); + buffer_json_member_add_uint64(wb, "dyncfg", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DYNCFG]); } buffer_json_object_close(wb); // traffic @@ -682,7 +686,7 @@ static void rrdhost_sender_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *ke { if (d->ssl) { - snprintfz(buf, 1024, "%s:SSL", string2str(d->destination)); + snprintfz(buf, sizeof(buf) - 1, "%s:SSL", string2str(d->destination)); buffer_json_member_add_string(wb, "destination", buf); } else @@ -1009,8 +1013,8 @@ void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now STORAGE_ENGINE *eng = localhost->db[tier].eng; if (!eng) continue; - size_t max = storage_engine_disk_space_max(eng->backend, localhost->db[tier].instance); - size_t used = storage_engine_disk_space_used(eng->backend, localhost->db[tier].instance); + uint64_t max = storage_engine_disk_space_max(eng->backend, localhost->db[tier].instance); + uint64_t used = storage_engine_disk_space_used(eng->backend, localhost->db[tier].instance); time_t first_time_s = storage_engine_global_first_time_s(eng->backend, localhost->db[tier].instance); size_t currently_collected_metrics = storage_engine_collected_metrics(eng->backend, localhost->db[tier].instance); @@ -1280,6 +1284,7 @@ static void contexts_v2_alert_config_to_json_from_sql_alert_config_data(struct s buffer_json_member_add_string(wb, "component", t->component); buffer_json_member_add_string(wb, "type", t->type); buffer_json_member_add_string(wb, "info", t->info); + buffer_json_member_add_string(wb, "summary", t->summary); // buffer_json_member_add_string(wb, "source", t->source); // moved to alert instance } @@ -1343,6 +1348,7 @@ static int contexts_v2_alert_instance_to_json_callback(const DICTIONARY_ITEM *it buffer_json_member_add_string(wb, "units", string2str(t->units)); buffer_json_member_add_string(wb, "fami", string2str(t->family)); buffer_json_member_add_string(wb, "info", string2str(t->info)); + buffer_json_member_add_string(wb, "sum", string2str(t->summary)); buffer_json_member_add_string(wb, "ctx", string2str(t->context)); buffer_json_member_add_string(wb, "st", rrdcalc_status2string(t->status)); buffer_json_member_add_uuid(wb, "tr_i", &t->last_transition_id); @@ -1397,6 +1403,7 @@ static void contexts_v2_alerts_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_ { buffer_json_member_add_uint64(wb, "ati", t->ati); buffer_json_member_add_string(wb, "nm", string2str(t->name)); + buffer_json_member_add_string(wb, "sum", string2str(t->summary)); buffer_json_member_add_uint64(wb, "cr", t->critical); buffer_json_member_add_uint64(wb, "wr", t->warning); @@ -1438,6 +1445,7 @@ struct sql_alert_transition_fixed_size { char units[SQL_TRANSITION_DATA_SMALL_STRING]; char exec[SQL_TRANSITION_DATA_BIG_STRING]; char info[SQL_TRANSITION_DATA_BIG_STRING]; + char summary[SQL_TRANSITION_DATA_BIG_STRING]; char classification[SQL_TRANSITION_DATA_SMALL_STRING]; char type[SQL_TRANSITION_DATA_SMALL_STRING]; char component[SQL_TRANSITION_DATA_SMALL_STRING]; @@ -1477,6 +1485,7 @@ static struct sql_alert_transition_fixed_size *contexts_v2_alert_transition_dup( strncpyz(n->units, t->units ? t->units : "", sizeof(n->units) - 1); strncpyz(n->exec, t->exec ? t->exec : "", sizeof(n->exec) - 1); strncpyz(n->info, t->info ? t->info : "", sizeof(n->info) - 1); + strncpyz(n->summary, t->summary ? t->summary : "", sizeof(n->summary) - 1); strncpyz(n->classification, t->classification ? t->classification : "", sizeof(n->classification) - 1); strncpyz(n->type, t->type ? t->type : "", sizeof(n->type) - 1); strncpyz(n->component, t->component ? t->component : "", sizeof(n->component) - 1); @@ -1734,6 +1743,7 @@ static void contexts_v2_alert_transitions_to_json(BUFFER *wb, struct rrdcontext_ buffer_json_member_add_time_t(wb, "when", t->when_key); buffer_json_member_add_string(wb, "info", *t->info ? t->info : ""); + buffer_json_member_add_string(wb, "summary", *t->summary ? t->summary : ""); buffer_json_member_add_string(wb, "units", *t->units ? t->units : NULL); buffer_json_member_add_object(wb, "new"); { @@ -1934,7 +1944,9 @@ int rrdcontext_to_json_v2(BUFFER *wb, struct api_v2_contexts_request *req, CONTE } if(req->after || req->before) { - ctl.window.relative = rrdr_relative_window_to_absolute(&ctl.window.after, &ctl.window.before, &ctl.now, false); + ctl.window.relative = rrdr_relative_window_to_absolute_query(&ctl.window.after, &ctl.window.before, &ctl.now + , false + ); ctl.window.enabled = !(mode & CONTEXTS_V2_ALERT_TRANSITIONS); } else @@ -2023,7 +2035,7 @@ int rrdcontext_to_json_v2(BUFFER *wb, struct api_v2_contexts_request *req, CONTE } else { buffer_strcat(wb, "query interrupted"); - resp = HTTP_RESP_BACKEND_FETCH_FAILED; + resp = HTTP_RESP_CLIENT_CLOSED_REQUEST; } goto cleanup; } diff --git a/database/contexts/instance.c b/database/contexts/instance.c index 7e572fb80a6510..39837dbf677d67 100644 --- a/database/contexts/instance.c +++ b/database/contexts/instance.c @@ -35,7 +35,7 @@ inline STRING *rrdinstance_acquired_units_dup(RRDINSTANCE_ACQUIRED *ria) { return string_dup(ri->units); } -inline DICTIONARY *rrdinstance_acquired_labels(RRDINSTANCE_ACQUIRED *ria) { +inline RRDLABELS *rrdinstance_acquired_labels(RRDINSTANCE_ACQUIRED *ria) { RRDINSTANCE *ri = rrdinstance_acquired_value(ria); return ri->rrdlabels; } @@ -68,7 +68,7 @@ inline time_t rrdinstance_acquired_update_every(RRDINSTANCE_ACQUIRED *ria) { static void rrdinstance_free(RRDINSTANCE *ri) { if(rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) - dictionary_destroy(ri->rrdlabels); + rrdlabels_destroy(ri->rrdlabels); rrdmetrics_destroy_from_rrdinstance(ri); string_freez(ri->id); @@ -211,7 +211,7 @@ static bool rrdinstance_conflict_callback(const DICTIONARY_ITEM *item __maybe_un ri->rrdset = ri_new->rrdset; if(ri->rrdset && rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) { - DICTIONARY *old = ri->rrdlabels; + RRDLABELS *old = ri->rrdlabels; ri->rrdlabels = ri->rrdset->rrdlabels; rrd_flag_clear(ri, RRD_FLAG_OWN_LABELS); rrdlabels_destroy(old); @@ -329,11 +329,11 @@ inline void rrdinstance_from_rrdset(RRDSET *st) { RRDINSTANCE_ACQUIRED *ria = (RRDINSTANCE_ACQUIRED *)dictionary_set_and_acquire_item(rc->rrdinstances, string2str(tri.id), &tri, sizeof(tri)); - RRDCONTEXT_ACQUIRED *rca_old = st->rrdcontext; - RRDINSTANCE_ACQUIRED *ria_old = st->rrdinstance; + RRDCONTEXT_ACQUIRED *rca_old = st->rrdcontexts.rrdcontext; + RRDINSTANCE_ACQUIRED *ria_old = st->rrdcontexts.rrdinstance; - st->rrdcontext = rca; - st->rrdinstance = ria; + st->rrdcontexts.rrdcontext = rca; + st->rrdcontexts.rrdinstance = ria; if(rca == rca_old) { rrdcontext_release(rca_old); @@ -354,16 +354,16 @@ inline void rrdinstance_from_rrdset(RRDSET *st) { // migrate all dimensions to the new metrics RRDDIM *rd; rrddim_foreach_read(rd, st) { - if (!rd->rrdmetric) continue; + if (!rd->rrdcontexts.rrdmetric) continue; - RRDMETRIC *rm_old = rrdmetric_acquired_value(rd->rrdmetric); + RRDMETRIC *rm_old = rrdmetric_acquired_value(rd->rrdcontexts.rrdmetric); rrd_flags_replace(rm_old, RRD_FLAG_DELETED|RRD_FLAG_UPDATED|RRD_FLAG_LIVE_RETENTION|RRD_FLAG_UPDATE_REASON_UNUSED|RRD_FLAG_UPDATE_REASON_ZERO_RETENTION); rm_old->rrddim = NULL; rm_old->first_time_s = 0; rm_old->last_time_s = 0; - rrdmetric_release(rd->rrdmetric); - rd->rrdmetric = NULL; + rrdmetric_release(rd->rrdcontexts.rrdmetric); + rd->rrdcontexts.rrdmetric = NULL; rrdmetric_from_rrddim(rd); } @@ -406,12 +406,12 @@ inline void rrdinstance_from_rrdset(RRDSET *st) { #define rrdset_get_rrdinstance(st) rrdset_get_rrdinstance_with_trace(st, __FUNCTION__); static inline RRDINSTANCE *rrdset_get_rrdinstance_with_trace(RRDSET *st, const char *function) { - if(unlikely(!st->rrdinstance)) { + if(unlikely(!st->rrdcontexts.rrdinstance)) { netdata_log_error("RRDINSTANCE: RRDSET '%s' is not linked to an RRDINSTANCE at %s()", rrdset_id(st), function); return NULL; } - RRDINSTANCE *ri = rrdinstance_acquired_value(st->rrdinstance); + RRDINSTANCE *ri = rrdinstance_acquired_value(st->rrdcontexts.rrdinstance); if(unlikely(!ri)) { netdata_log_error("RRDINSTANCE: RRDSET '%s' lost its link to an RRDINSTANCE at %s()", rrdset_id(st), function); return NULL; @@ -439,14 +439,17 @@ inline void rrdinstance_rrdset_is_freed(RRDSET *st) { rrdinstance_trigger_updates(ri, __FUNCTION__ ); - rrdinstance_release(st->rrdinstance); - st->rrdinstance = NULL; + rrdinstance_release(st->rrdcontexts.rrdinstance); + st->rrdcontexts.rrdinstance = NULL; - rrdcontext_release(st->rrdcontext); - st->rrdcontext = NULL; + rrdcontext_release(st->rrdcontexts.rrdcontext); + st->rrdcontexts.rrdcontext = NULL; + st->rrdcontexts.collected = false; } inline void rrdinstance_rrdset_has_updated_retention(RRDSET *st) { + st->rrdcontexts.collected = false; + RRDINSTANCE *ri = rrdset_get_rrdinstance(st); if(unlikely(!ri)) return; @@ -455,8 +458,10 @@ inline void rrdinstance_rrdset_has_updated_retention(RRDSET *st) { } inline void rrdinstance_updated_rrdset_name(RRDSET *st) { + st->rrdcontexts.collected = false; + // the chart may not be initialized when this is called - if(unlikely(!st->rrdinstance)) return; + if(unlikely(!st->rrdcontexts.rrdinstance)) return; RRDINSTANCE *ri = rrdset_get_rrdinstance(st); if(unlikely(!ri)) return; @@ -491,10 +496,12 @@ inline void rrdinstance_updated_rrdset_flags_no_action(RRDINSTANCE *ri, RRDSET * } inline void rrdinstance_updated_rrdset_flags(RRDSET *st) { + st->rrdcontexts.collected = false; + RRDINSTANCE *ri = rrdset_get_rrdinstance(st); if(unlikely(!ri)) return; - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED|RRDSET_FLAG_OBSOLETE))) + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) rrd_flag_set_archived(ri); rrdinstance_updated_rrdset_flags_no_action(ri, st); @@ -503,6 +510,11 @@ inline void rrdinstance_updated_rrdset_flags(RRDSET *st) { } inline void rrdinstance_collected_rrdset(RRDSET *st) { + if(st->rrdcontexts.collected) + return; + + st->rrdcontexts.collected = true; + RRDINSTANCE *ri = rrdset_get_rrdinstance(st); if(unlikely(!ri)) { rrdcontext_updated_rrdset(st); diff --git a/database/contexts/internal.h b/database/contexts/internal.h index 04ad0883a36212..293659fdd0a00d 100644 --- a/database/contexts/internal.h +++ b/database/contexts/internal.h @@ -230,7 +230,7 @@ typedef struct rrdinstance { time_t update_every_s; // data collection frequency RRDSET *rrdset; // pointer to RRDSET when collected, or NULL - DICTIONARY *rrdlabels; // linked to RRDSET->chart_labels or own version + RRDLABELS *rrdlabels; // linked to RRDSET->chart_labels or own version struct rrdcontext *rc; DICTIONARY *rrdmetrics; diff --git a/database/contexts/metric.c b/database/contexts/metric.c index 55efde4e9f6d33..0f0785972dfe1a 100644 --- a/database/contexts/metric.c +++ b/database/contexts/metric.c @@ -239,10 +239,10 @@ void rrdmetric_from_rrddim(RRDDIM *rd) { if(unlikely(!rd->rrdset->rrdhost)) fatal("RRDMETRIC: rrdset '%s' does not have a rrdhost", rrdset_id(rd->rrdset)); - if(unlikely(!rd->rrdset->rrdinstance)) + if(unlikely(!rd->rrdset->rrdcontexts.rrdinstance)) fatal("RRDMETRIC: rrdset '%s' does not have a rrdinstance", rrdset_id(rd->rrdset)); - RRDINSTANCE *ri = rrdinstance_acquired_value(rd->rrdset->rrdinstance); + RRDINSTANCE *ri = rrdinstance_acquired_value(rd->rrdset->rrdcontexts.rrdinstance); RRDMETRIC trm = { .id = string_dup(rd->id), @@ -254,20 +254,21 @@ void rrdmetric_from_rrddim(RRDDIM *rd) { RRDMETRIC_ACQUIRED *rma = (RRDMETRIC_ACQUIRED *)dictionary_set_and_acquire_item(ri->rrdmetrics, string2str(trm.id), &trm, sizeof(trm)); - if(rd->rrdmetric) - rrdmetric_release(rd->rrdmetric); + if(rd->rrdcontexts.rrdmetric) + rrdmetric_release(rd->rrdcontexts.rrdmetric); - rd->rrdmetric = rma; + rd->rrdcontexts.rrdmetric = rma; + rd->rrdcontexts.collected = false; } #define rrddim_get_rrdmetric(rd) rrddim_get_rrdmetric_with_trace(rd, __FUNCTION__) static inline RRDMETRIC *rrddim_get_rrdmetric_with_trace(RRDDIM *rd, const char *function) { - if(unlikely(!rd->rrdmetric)) { + if(unlikely(!rd->rrdcontexts.rrdmetric)) { netdata_log_error("RRDMETRIC: RRDDIM '%s' is not linked to an RRDMETRIC at %s()", rrddim_id(rd), function); return NULL; } - RRDMETRIC *rm = rrdmetric_acquired_value(rd->rrdmetric); + RRDMETRIC *rm = rrdmetric_acquired_value(rd->rrdcontexts.rrdmetric); if(unlikely(!rm)) { netdata_log_error("RRDMETRIC: RRDDIM '%s' lost the link to its RRDMETRIC at %s()", rrddim_id(rd), function); return NULL; @@ -288,11 +289,14 @@ inline void rrdmetric_rrddim_is_freed(RRDDIM *rd) { rm->rrddim = NULL; rrdmetric_trigger_updates(rm, __FUNCTION__ ); - rrdmetric_release(rd->rrdmetric); - rd->rrdmetric = NULL; + rrdmetric_release(rd->rrdcontexts.rrdmetric); + rd->rrdcontexts.rrdmetric = NULL; + rd->rrdcontexts.collected = false; } inline void rrdmetric_updated_rrddim_flags(RRDDIM *rd) { + rd->rrdcontexts.collected = false; + RRDMETRIC *rm = rrddim_get_rrdmetric(rd); if(unlikely(!rm)) return; @@ -305,6 +309,11 @@ inline void rrdmetric_updated_rrddim_flags(RRDDIM *rd) { } inline void rrdmetric_collected_rrddim(RRDDIM *rd) { + if(rd->rrdcontexts.collected) + return; + + rd->rrdcontexts.collected = true; + RRDMETRIC *rm = rrddim_get_rrdmetric(rd); if(unlikely(!rm)) return; @@ -316,4 +325,3 @@ inline void rrdmetric_collected_rrddim(RRDDIM *rd) { rrdmetric_trigger_updates(rm, __FUNCTION__ ); } - diff --git a/database/contexts/query_target.c b/database/contexts/query_target.c index 829640b90902d5..95abc3e654ae2e 100644 --- a/database/contexts/query_target.c +++ b/database/contexts/query_target.c @@ -835,8 +835,8 @@ static ssize_t query_context_add(void *data, RRDCONTEXT_ACQUIRED *rca, bool quer if(query_instance_add(qtl, qn, qc, qt->request.ria, queryable_context, false)) added++; } - else if(unlikely(qtl->st && qtl->st->rrdcontext == rca && qtl->st->rrdinstance)) { - if(query_instance_add(qtl, qn, qc, qtl->st->rrdinstance, queryable_context, false)) + else if(unlikely(qtl->st && qtl->st->rrdcontexts.rrdcontext == rca && qtl->st->rrdcontexts.rrdinstance)) { + if(query_instance_add(qtl, qn, qc, qtl->st->rrdcontexts.rrdinstance, queryable_context, false)) added++; } else { @@ -894,11 +894,11 @@ static ssize_t query_node_add(void *data, RRDHOST *host, bool queryable_host) { qn->node_id[0] = '\0'; // is the chart given valid? - if(unlikely(qtl->st && (!qtl->st->rrdinstance || !qtl->st->rrdcontext))) { + if(unlikely(qtl->st && (!qtl->st->rrdcontexts.rrdinstance || !qtl->st->rrdcontexts.rrdcontext))) { netdata_log_error("QUERY TARGET: RRDSET '%s' given, but it is not linked to rrdcontext structures. Linking it now.", rrdset_name(qtl->st)); rrdinstance_from_rrdset(qtl->st); - if(unlikely(qtl->st && (!qtl->st->rrdinstance || !qtl->st->rrdcontext))) { + if(unlikely(qtl->st && (!qtl->st->rrdcontexts.rrdinstance || !qtl->st->rrdcontexts.rrdcontext))) { netdata_log_error("QUERY TARGET: RRDSET '%s' given, but failed to be linked to rrdcontext structures. Switching to context query.", rrdset_name(qtl->st)); @@ -918,7 +918,7 @@ static ssize_t query_node_add(void *data, RRDHOST *host, bool queryable_host) { } else if(unlikely(qtl->st)) { // single chart data queries - if(query_context_add(qtl, qtl->st->rrdcontext, true)) + if(query_context_add(qtl, qtl->st->rrdcontexts.rrdcontext, true)) added++; } else { @@ -1052,8 +1052,9 @@ QUERY_TARGET *query_target_create(QUERY_TARGET_REQUEST *qtr) { if(query_target_has_percentage_of_group(qt)) qt->window.options &= ~RRDR_OPTION_PERCENTAGE; - qt->internal.relative = rrdr_relative_window_to_absolute(&qt->window.after, &qt->window.before, &qt->window.now, - unittest_running); + qt->internal.relative = rrdr_relative_window_to_absolute_query(&qt->window.after, &qt->window.before + , &qt->window.now, unittest_running + ); // prepare our local variables - we need these across all these functions QUERY_TARGET_LOCALS qtl = { diff --git a/database/contexts/rrdcontext.c b/database/contexts/rrdcontext.c index 8538d17f28919b..9dee39be2a437d 100644 --- a/database/contexts/rrdcontext.c +++ b/database/contexts/rrdcontext.c @@ -224,26 +224,31 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { struct ctxs_checkpoint *cmd = ptr; if(!rrdhost_check_our_claim_id(cmd->claim_id)) { - netdata_log_error("RRDCONTEXT: received checkpoint command for claim_id '%s', node id '%s', but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", - cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "RRDCONTEXT: received checkpoint command for claim_id '%s', node id '%s', " + "but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", + cmd->claim_id, cmd->node_id, + localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", + cmd->claim_id); return; } RRDHOST *host = rrdhost_find_by_node_id(cmd->node_id); if(!host) { - netdata_log_error("RRDCONTEXT: received checkpoint command for claim id '%s', node id '%s', but there is no node with such node id here. Ignoring command.", - cmd->claim_id, - cmd->node_id); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "RRDCONTEXT: received checkpoint command for claim id '%s', node id '%s', " + "but there is no node with such node id here. Ignoring command.", + cmd->claim_id, cmd->node_id); return; } if(rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS)) { - netdata_log_info("RRDCONTEXT: received checkpoint command for claim id '%s', node id '%s', while node '%s' has an active context streaming.", - cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "RRDCONTEXT: received checkpoint command for claim id '%s', node id '%s', " + "while node '%s' has an active context streaming.", + cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); // disable it temporarily, so that our worker will not attempt to send messages in parallel rrdhost_flag_clear(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS); @@ -252,8 +257,10 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { uint64_t our_version_hash = rrdcontext_version_hash(host); if(cmd->version_hash != our_version_hash) { - netdata_log_error("RRDCONTEXT: received version hash %"PRIu64" for host '%s', does not match our version hash %"PRIu64". Sending snapshot of all contexts.", - cmd->version_hash, rrdhost_hostname(host), our_version_hash); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "RRDCONTEXT: received version hash %"PRIu64" for host '%s', does not match our version hash %"PRIu64". " + "Sending snapshot of all contexts.", + cmd->version_hash, rrdhost_hostname(host), our_version_hash); #ifdef ENABLE_ACLK // prepare the snapshot @@ -275,41 +282,55 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { #endif } - internal_error(true, "RRDCONTEXT: host '%s' enabling streaming of contexts", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRDCONTEXT: host '%s' enabling streaming of contexts", + rrdhost_hostname(host)); + rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS); char node_str[UUID_STR_LEN]; uuid_unparse_lower(*host->node_id, node_str); - netdata_log_access("ACLK REQ [%s (%s)]: STREAM CONTEXTS ENABLED", node_str, rrdhost_hostname(host)); + nd_log(NDLS_ACCESS, NDLP_DEBUG, + "ACLK REQ [%s (%s)]: STREAM CONTEXTS ENABLED", + node_str, rrdhost_hostname(host)); } void rrdcontext_hub_stop_streaming_command(void *ptr) { struct stop_streaming_ctxs *cmd = ptr; if(!rrdhost_check_our_claim_id(cmd->claim_id)) { - netdata_log_error("RRDCONTEXT: received stop streaming command for claim_id '%s', node id '%s', but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", - cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "RRDCONTEXT: received stop streaming command for claim_id '%s', node id '%s', " + "but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", + cmd->claim_id, cmd->node_id, + localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", + cmd->claim_id); return; } RRDHOST *host = rrdhost_find_by_node_id(cmd->node_id); if(!host) { - netdata_log_error("RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', but there is no node with such node id here. Ignoring command.", - cmd->claim_id, cmd->node_id); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', " + "but there is no node with such node id here. Ignoring command.", + cmd->claim_id, cmd->node_id); return; } if(!rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS)) { - netdata_log_error("RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', but node '%s' does not have active context streaming. Ignoring command.", - cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', " + "but node '%s' does not have active context streaming. Ignoring command.", + cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); return; } - internal_error(true, "RRDCONTEXT: host '%s' disabling streaming of contexts", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRDCONTEXT: host '%s' disabling streaming of contexts", + rrdhost_hostname(host)); + rrdhost_flag_clear(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS); } diff --git a/database/contexts/rrdcontext.h b/database/contexts/rrdcontext.h index 0bcdb68ded4859..9c497a5a5ee7c6 100644 --- a/database/contexts/rrdcontext.h +++ b/database/contexts/rrdcontext.h @@ -40,7 +40,7 @@ const char *rrdinstance_acquired_name(RRDINSTANCE_ACQUIRED *ria); bool rrdinstance_acquired_has_name(RRDINSTANCE_ACQUIRED *ria); const char *rrdinstance_acquired_units(RRDINSTANCE_ACQUIRED *ria); STRING *rrdinstance_acquired_units_dup(RRDINSTANCE_ACQUIRED *ria); -DICTIONARY *rrdinstance_acquired_labels(RRDINSTANCE_ACQUIRED *ria); +RRDLABELS *rrdinstance_acquired_labels(RRDINSTANCE_ACQUIRED *ria); DICTIONARY *rrdinstance_acquired_functions(RRDINSTANCE_ACQUIRED *ria); RRDHOST *rrdinstance_acquired_rrdhost(RRDINSTANCE_ACQUIRED *ria); RRDSET *rrdinstance_acquired_rrdset(RRDINSTANCE_ACQUIRED *ria); @@ -432,6 +432,7 @@ struct sql_alert_transition_data { const char *units; const char *exec; const char *info; + const char *summary; const char *classification; const char *type; const char *component; @@ -472,6 +473,7 @@ struct sql_alert_config_data { const char *classification; const char *component; const char *type; + const char *summary; struct { struct { @@ -531,6 +533,7 @@ struct sql_alert_instance_v2_entry { RRDCALC_STATUS status; RRDCALC_FLAGS flags; STRING *info; + STRING *summary; NETDATA_DOUBLE value; time_t last_updated; time_t last_status_change; diff --git a/database/engine/cache.c b/database/engine/cache.c index 7a9ccf8d1bb32d..eb1c35298d560c 100644 --- a/database/engine/cache.c +++ b/database/engine/cache.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-3.0-or-later #include "cache.h" /* STATES AND TRANSITIONS @@ -1170,9 +1171,10 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic if(all_of_them && !filter) { pgc_ll_lock(cache, &cache->clean); if(cache->clean.stats->entries) { - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "DBENGINE CACHE: cannot free all clean pages, %zu are still in the clean queue", - cache->clean.stats->entries); + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_NOTICE, + "DBENGINE CACHE: cannot free all clean pages, %zu are still in the clean queue", + cache->clean.stats->entries); } pgc_ll_unlock(cache, &cache->clean); } @@ -1801,7 +1803,7 @@ PGC *pgc_create(const char *name, cache->aral = callocz(cache->config.partitions, sizeof(ARAL *)); for(size_t part = 0; part < cache->config.partitions ; part++) { char buf[100 +1]; - snprintfz(buf, 100, "%s[%zu]", name, part); + snprintfz(buf, sizeof(buf) - 1, "%s[%zu]", name, part); cache->aral[part] = aral_create( buf, sizeof(PGC_PAGE) + cache->config.additional_bytes_per_page, @@ -1860,7 +1862,7 @@ void pgc_destroy(PGC *cache) { freez(cache->aral); #endif - + freez(cache->index); freez(cache); } } @@ -2517,7 +2519,7 @@ void unittest_stress_test(void) { for(size_t i = 0; i < pgc_uts.collect_threads ;i++) { collect_thread_ids[i] = i; char buffer[100 + 1]; - snprintfz(buffer, 100, "COLLECT_%zu", i); + snprintfz(buffer, sizeof(buffer) - 1, "COLLECT_%zu", i); netdata_thread_create(&collect_threads[i], buffer, NETDATA_THREAD_OPTION_JOINABLE | NETDATA_THREAD_OPTION_DONT_LOG, unittest_stress_test_collector, &collect_thread_ids[i]); @@ -2529,7 +2531,7 @@ void unittest_stress_test(void) { for(size_t i = 0; i < pgc_uts.query_threads ;i++) { query_thread_ids[i] = i; char buffer[100 + 1]; - snprintfz(buffer, 100, "QUERY_%zu", i); + snprintfz(buffer, sizeof(buffer) - 1, "QUERY_%zu", i); initstate_r(1, pgc_uts.rand_statebufs, 1024, &pgc_uts.random_data[i]); netdata_thread_create(&queries_threads[i], buffer, NETDATA_THREAD_OPTION_JOINABLE | NETDATA_THREAD_OPTION_DONT_LOG, diff --git a/database/engine/cache.h b/database/engine/cache.h index 1486fdc1667136..7cd7c0636f6fe0 100644 --- a/database/engine/cache.h +++ b/database/engine/cache.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef DBENGINE_CACHE_H #define DBENGINE_CACHE_H @@ -71,8 +72,8 @@ struct pgc_statistics { PGC_CACHE_LINE_PADDING(3); - size_t entries; // all the entries (includes clean, dirty, host) - size_t size; // all the entries (includes clean, dirty, host) + size_t entries; // all the entries (includes clean, dirty, hot) + size_t size; // all the entries (includes clean, dirty, hot) size_t evicting_entries; size_t evicting_size; diff --git a/database/engine/datafile.c b/database/engine/datafile.c index d5c1285be17822..7322039cd3688a 100644 --- a/database/engine/datafile.c +++ b/database/engine/datafile.c @@ -112,7 +112,7 @@ bool datafile_acquire_for_deletion(struct rrdengine_datafile *df) { "but it has %u lockers (oc:%u, pd:%u), " "%zu clean and %zu hot open cache pages " "- will be deleted shortly " - "(scanned open cache in %llu usecs)", + "(scanned open cache in %"PRIu64" usecs)", df->fileno, df->ctx->config.tier, df->users.lockers, df->users.lockers_by_reason[DATAFILE_ACQUIRE_OPEN_CACHE], @@ -129,7 +129,7 @@ bool datafile_acquire_for_deletion(struct rrdengine_datafile *df) { "but it has %u lockers (oc:%u, pd:%u), " "%zu clean and %zu hot open cache pages " "- will be deleted now " - "(scanned open cache in %llu usecs)", + "(scanned open cache in %"PRIu64" usecs)", df->fileno, df->ctx->config.tier, df->users.lockers, df->users.lockers_by_reason[DATAFILE_ACQUIRE_OPEN_CACHE], @@ -143,7 +143,7 @@ bool datafile_acquire_for_deletion(struct rrdengine_datafile *df) { internal_error(true, "DBENGINE: datafile %u of tier %d " "has %u lockers (oc:%u, pd:%u), " "%zu clean and %zu hot open cache pages " - "(scanned open cache in %llu usecs)", + "(scanned open cache in %"PRIu64" usecs)", df->fileno, df->ctx->config.tier, df->users.lockers, df->users.lockers_by_reason[DATAFILE_ACQUIRE_OPEN_CACHE], @@ -160,7 +160,7 @@ bool datafile_acquire_for_deletion(struct rrdengine_datafile *df) { void generate_datafilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen) { - (void) snprintfz(str, maxlen, "%s/" DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION, + (void) snprintfz(str, maxlen - 1, "%s/" DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION, datafile->ctx->config.dbfiles_path, datafile->tier, datafile->fileno); } @@ -338,7 +338,8 @@ static int load_data_file(struct rrdengine_datafile *datafile) ctx_fs_error(ctx); return fd; } - netdata_log_info("DBENGINE: initializing data file \"%s\".", path); + + nd_log_daemon(NDLP_DEBUG, "DBENGINE: initializing data file \"%s\".", path); ret = check_file_properties(file, &file_size, sizeof(struct rrdeng_df_sb)); if (ret) @@ -354,7 +355,8 @@ static int load_data_file(struct rrdengine_datafile *datafile) datafile->file = file; datafile->pos = file_size; - netdata_log_info("DBENGINE: data file \"%s\" initialized (size:%"PRIu64").", path, file_size); + nd_log_daemon(NDLP_DEBUG, "DBENGINE: data file \"%s\" initialized (size:%" PRIu64 ").", path, file_size); + return 0; error: @@ -422,6 +424,7 @@ static int scan_data_files(struct rrdengine_instance *ctx) ctx->atomic.last_fileno = datafiles[matched_files - 1]->fileno; + netdata_log_info("DBENGINE: loading %d data/journal of tier %d...", matched_files, ctx->config.tier); for (failed_to_load = 0, i = 0 ; i < matched_files ; ++i) { uint8_t must_delete_pair = 0; @@ -479,14 +482,18 @@ int create_new_datafile_pair(struct rrdengine_instance *ctx, bool having_lock) int ret; char path[RRDENG_PATH_MAX]; - netdata_log_info("DBENGINE: creating new data and journal files in path %s", ctx->config.dbfiles_path); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "DBENGINE: creating new data and journal files in path %s", + ctx->config.dbfiles_path); + datafile = datafile_alloc_and_init(ctx, 1, fileno); ret = create_data_file(datafile); if(ret) goto error_after_datafile; generate_datafilepath(datafile, path, sizeof(path)); - netdata_log_info("DBENGINE: created data file \"%s\".", path); + nd_log(NDLS_DAEMON, NDLP_INFO, + "DBENGINE: created data file \"%s\".", path); journalfile = journalfile_alloc_and_init(datafile); ret = journalfile_create(journalfile, datafile); @@ -494,7 +501,8 @@ int create_new_datafile_pair(struct rrdengine_instance *ctx, bool having_lock) goto error_after_journalfile; journalfile_v1_generate_path(datafile, path, sizeof(path)); - netdata_log_info("DBENGINE: created journal file \"%s\".", path); + nd_log(NDLS_DAEMON, NDLP_INFO, + "DBENGINE: created journal file \"%s\".", path); ctx_current_disk_space_increase(ctx, datafile->pos + journalfile->unsafe.pos); datafile_list_insert(ctx, datafile, having_lock); diff --git a/database/engine/journalfile.c b/database/engine/journalfile.c index abb9d2eb951ab7..9005b81ca2116d 100644 --- a/database/engine/journalfile.c +++ b/database/engine/journalfile.c @@ -67,7 +67,7 @@ void journalfile_v2_generate_path(struct rrdengine_datafile *datafile, char *str void journalfile_v1_generate_path(struct rrdengine_datafile *datafile, char *str, size_t maxlen) { - (void) snprintfz(str, maxlen, "%s/" WALFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL WALFILE_EXTENSION, + (void) snprintfz(str, maxlen - 1, "%s/" WALFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL WALFILE_EXTENSION, datafile->ctx->config.dbfiles_path, datafile->tier, datafile->fileno); } @@ -169,7 +169,7 @@ static void njfv2idx_add(struct rrdengine_datafile *datafile) { *PValue = datafile; break; } - } while(0); + } while(1); rw_spinlock_write_unlock(&datafile->ctx->njfv2idx.spinlock); } @@ -1013,7 +1013,7 @@ void journalfile_v2_populate_retention_to_mrg(struct rrdengine_instance *ctx, st journalfile_v2_data_release(journalfile); usec_t ended_ut = now_monotonic_usec(); - netdata_log_info("DBENGINE: journal v2 of tier %d, datafile %u populated, size: %0.2f MiB, metrics: %0.2f k, %0.2f ms" + nd_log_daemon(NDLP_DEBUG, "DBENGINE: journal v2 of tier %d, datafile %u populated, size: %0.2f MiB, metrics: %0.2f k, %0.2f ms" , ctx->config.tier, journalfile->datafile->fileno , (double)data_size / 1024 / 1024 , (double)entries / 1000 @@ -1073,7 +1073,8 @@ int journalfile_v2_load(struct rrdengine_instance *ctx, struct rrdengine_journal return 1; } - netdata_log_info("DBENGINE: checking integrity of '%s'", path_v2); + nd_log_daemon(NDLP_DEBUG, "DBENGINE: checking integrity of '%s'", path_v2); + usec_t validation_start_ut = now_monotonic_usec(); int rc = journalfile_v2_validate(data_start, journal_v2_file_size, journal_v1_file_size); if (unlikely(rc)) { @@ -1104,7 +1105,7 @@ int journalfile_v2_load(struct rrdengine_instance *ctx, struct rrdengine_journal usec_t finished_ut = now_monotonic_usec(); - netdata_log_info("DBENGINE: journal v2 '%s' loaded, size: %0.2f MiB, metrics: %0.2f k, " + nd_log_daemon(NDLP_DEBUG, "DBENGINE: journal v2 '%s' loaded, size: %0.2f MiB, metrics: %0.2f k, " "mmap: %0.2f ms, validate: %0.2f ms" , path_v2 , (double)journal_v2_file_size / 1024 / 1024 @@ -1535,13 +1536,13 @@ int journalfile_load(struct rrdengine_instance *ctx, struct rrdengine_journalfil } ctx_io_read_op_bytes(ctx, sizeof(struct rrdeng_jf_sb)); - netdata_log_info("DBENGINE: loading journal file '%s'", path); + nd_log_daemon(NDLP_DEBUG, "DBENGINE: loading journal file '%s'", path); max_id = journalfile_iterate_transactions(ctx, journalfile); __atomic_store_n(&ctx->atomic.transaction_id, MAX(__atomic_load_n(&ctx->atomic.transaction_id, __ATOMIC_RELAXED), max_id + 1), __ATOMIC_RELAXED); - netdata_log_info("DBENGINE: journal file '%s' loaded (size:%"PRIu64").", path, file_size); + nd_log_daemon(NDLP_DEBUG, "DBENGINE: journal file '%s' loaded (size:%" PRIu64 ").", path, file_size); bool is_last_file = (ctx_last_fileno_get(ctx) == journalfile->datafile->fileno); if (is_last_file && journalfile->datafile->pos <= rrdeng_target_data_file_size(ctx) / 3) { diff --git a/database/engine/metric.c b/database/engine/metric.c index 0b248c09b55999..735ae7ace13e78 100644 --- a/database/engine/metric.c +++ b/database/engine/metric.c @@ -1,30 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later #include "metric.h" typedef int32_t REFCOUNT; #define REFCOUNT_DELETING (-100) -typedef enum __attribute__ ((__packed__)) { - METRIC_FLAG_HAS_RETENTION = (1 << 0), -} METRIC_FLAGS; - struct metric { uuid_t uuid; // never changes Word_t section; // never changes - time_t first_time_s; // - time_t latest_time_s_clean; // archived pages latest time - time_t latest_time_s_hot; // latest time of the currently collected page - uint32_t latest_update_every_s; // + time_t first_time_s; // the timestamp of the oldest point in the database + time_t latest_time_s_clean; // the timestamp of the newest point in the database + time_t latest_time_s_hot; // the timestamp of the latest point that has been collected (not yet stored) + uint32_t latest_update_every_s; // the latest data collection frequency pid_t writer; uint8_t partition; - METRIC_FLAGS flags; REFCOUNT refcount; - SPINLOCK spinlock; // protects all variable members // THIS IS allocated with malloc() // YOU HAVE TO INITIALIZE IT YOURSELF ! }; +#define set_metric_field_with_condition(field, value, condition) ({ \ + typeof(field) _current = __atomic_load_n(&(field), __ATOMIC_RELAXED); \ + typeof(field) _wanted = value; \ + bool did_it = true; \ + \ + do { \ + if((condition) && (_current != _wanted)) { \ + ; \ + } \ + else { \ + did_it = false; \ + break; \ + } \ + } while(!__atomic_compare_exchange_n(&(field), &_current, _wanted, \ + false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); \ + \ + did_it; \ +}) + static struct aral_statistics mrg_aral_statistics; struct mrg { @@ -73,9 +87,6 @@ static inline void MRG_STATS_DELETE_MISS(MRG *mrg, size_t partition) { #define mrg_index_write_lock(mrg, partition) rw_spinlock_write_lock(&(mrg)->index[partition].rw_spinlock) #define mrg_index_write_unlock(mrg, partition) rw_spinlock_write_unlock(&(mrg)->index[partition].rw_spinlock) -#define metric_lock(metric) spinlock_lock(&(metric)->spinlock) -#define metric_unlock(metric) spinlock_unlock(&(metric)->spinlock) - static inline void mrg_stats_size_judyl_change(MRG *mrg, size_t mem_before_judyl, size_t mem_after_judyl, size_t partition) { if(mem_after_judyl > mem_before_judyl) __atomic_add_fetch(&mrg->index[partition].stats.size, mem_after_judyl - mem_before_judyl, __ATOMIC_RELAXED); @@ -97,40 +108,34 @@ static inline size_t uuid_partition(MRG *mrg __maybe_unused, uuid_t *uuid) { return *n % mrg->partitions; } -static inline bool metric_has_retention_unsafe(MRG *mrg __maybe_unused, METRIC *metric) { - size_t partition = metric->partition; +static inline time_t mrg_metric_get_first_time_s_smart(MRG *mrg __maybe_unused, METRIC *metric) { + time_t first_time_s = __atomic_load_n(&metric->first_time_s, __ATOMIC_RELAXED); - bool has_retention = (metric->first_time_s > 0 || metric->latest_time_s_clean > 0 || metric->latest_time_s_hot > 0); + if(first_time_s <= 0) { + first_time_s = __atomic_load_n(&metric->latest_time_s_clean, __ATOMIC_RELAXED); + if(first_time_s <= 0) + first_time_s = __atomic_load_n(&metric->latest_time_s_hot, __ATOMIC_RELAXED); - if(has_retention && !(metric->flags & METRIC_FLAG_HAS_RETENTION)) { - metric->flags |= METRIC_FLAG_HAS_RETENTION; - __atomic_add_fetch(&mrg->index[partition].stats.entries_with_retention, 1, __ATOMIC_RELAXED); - } - else if(!has_retention && (metric->flags & METRIC_FLAG_HAS_RETENTION)) { - metric->flags &= ~METRIC_FLAG_HAS_RETENTION; - __atomic_sub_fetch(&mrg->index[partition].stats.entries_with_retention, 1, __ATOMIC_RELAXED); + if(first_time_s <= 0) + first_time_s = 0; + else + __atomic_store_n(&metric->first_time_s, first_time_s, __ATOMIC_RELAXED); } - return has_retention; + return first_time_s; } -static inline REFCOUNT metric_acquire(MRG *mrg __maybe_unused, METRIC *metric, bool having_spinlock) { +static inline REFCOUNT metric_acquire(MRG *mrg __maybe_unused, METRIC *metric) { size_t partition = metric->partition; + REFCOUNT expected = metric->refcount; REFCOUNT refcount; - if(!having_spinlock) - metric_lock(metric); - - if(unlikely(metric->refcount < 0)) - fatal("METRIC: refcount is %d (negative) during acquire", metric->refcount); - - refcount = ++metric->refcount; - - // update its retention flags - metric_has_retention_unsafe(mrg, metric); + do { + if(expected < 0) + fatal("METRIC: refcount is %d (negative) during acquire", metric->refcount); - if(!having_spinlock) - metric_unlock(metric); + refcount = expected + 1; + } while(!__atomic_compare_exchange_n(&metric->refcount, &expected, refcount, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); if(refcount == 1) __atomic_add_fetch(&mrg->index[partition].stats.entries_referenced, 1, __ATOMIC_RELAXED); @@ -141,28 +146,25 @@ static inline REFCOUNT metric_acquire(MRG *mrg __maybe_unused, METRIC *metric, b } static inline bool metric_release_and_can_be_deleted(MRG *mrg __maybe_unused, METRIC *metric) { - bool ret = true; size_t partition = metric->partition; + REFCOUNT expected = metric->refcount; REFCOUNT refcount; - metric_lock(metric); - - if(unlikely(metric->refcount <= 0)) - fatal("METRIC: refcount is %d (zero or negative) during release", metric->refcount); - - refcount = --metric->refcount; - - if(likely(metric_has_retention_unsafe(mrg, metric) || refcount != 0)) - ret = false; + do { + if(expected <= 0) + fatal("METRIC: refcount is %d (zero or negative) during release", metric->refcount); - metric_unlock(metric); + refcount = expected - 1; + } while(!__atomic_compare_exchange_n(&metric->refcount, &expected, refcount, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); if(unlikely(!refcount)) __atomic_sub_fetch(&mrg->index[partition].stats.entries_referenced, 1, __ATOMIC_RELAXED); __atomic_sub_fetch(&mrg->index[partition].stats.current_references, 1, __ATOMIC_RELAXED); - return ret; + time_t first, last, ue; + mrg_metric_get_retention(mrg, metric, &first, &last, &ue); + return (!first || !last || first > last); } static inline METRIC *metric_add_and_acquire(MRG *mrg, MRG_ENTRY *entry, bool *ret) { @@ -192,7 +194,7 @@ static inline METRIC *metric_add_and_acquire(MRG *mrg, MRG_ENTRY *entry, bool *r if(unlikely(*PValue != NULL)) { METRIC *metric = *PValue; - metric_acquire(mrg, metric, false); + metric_acquire(mrg, metric); MRG_STATS_DUPLICATE_ADD(mrg, partition); @@ -215,10 +217,8 @@ static inline METRIC *metric_add_and_acquire(MRG *mrg, MRG_ENTRY *entry, bool *r metric->latest_update_every_s = entry->latest_update_every_s; metric->writer = 0; metric->refcount = 0; - metric->flags = 0; metric->partition = partition; - spinlock_init(&metric->spinlock); - metric_acquire(mrg, metric, true); // no spinlock use required here + metric_acquire(mrg, metric); *PValue = metric; MRG_STATS_ADDED_METRIC(mrg, partition); @@ -252,7 +252,7 @@ static inline METRIC *metric_get_and_acquire(MRG *mrg, uuid_t *uuid, Word_t sect METRIC *metric = *PValue; - metric_acquire(mrg, metric, false); + metric_acquire(mrg, metric); mrg_index_read_unlock(mrg, partition); @@ -363,7 +363,7 @@ inline bool mrg_metric_release_and_delete(MRG *mrg, METRIC *metric) { } inline METRIC *mrg_metric_dup(MRG *mrg, METRIC *metric) { - metric_acquire(mrg, metric, false); + metric_acquire(mrg, metric); return metric; } @@ -389,10 +389,7 @@ inline bool mrg_metric_set_first_time_s(MRG *mrg __maybe_unused, METRIC *metric, if(unlikely(first_time_s < 0)) return false; - metric_lock(metric); - metric->first_time_s = first_time_s; - metric_has_retention_unsafe(mrg, metric); - metric_unlock(metric); + __atomic_store_n(&metric->first_time_s, first_time_s, __ATOMIC_RELAXED); return true; } @@ -405,112 +402,56 @@ inline void mrg_metric_expand_retention(MRG *mrg __maybe_unused, METRIC *metric, internal_fatal(last_time_s > max_acceptable_collected_time(), "DBENGINE METRIC: metric last time is in the future"); - if(unlikely(first_time_s < 0)) - first_time_s = 0; - - if(unlikely(last_time_s < 0)) - last_time_s = 0; - - if(unlikely(update_every_s < 0)) - update_every_s = 0; - - if(unlikely(!first_time_s && !last_time_s && !update_every_s)) - return; + if(first_time_s > 0) + set_metric_field_with_condition(metric->first_time_s, first_time_s, _current <= 0 || _wanted < _current); - metric_lock(metric); - - if(unlikely(first_time_s && (!metric->first_time_s || first_time_s < metric->first_time_s))) - metric->first_time_s = first_time_s; - - if(likely(last_time_s && (!metric->latest_time_s_clean || last_time_s > metric->latest_time_s_clean))) { - metric->latest_time_s_clean = last_time_s; - - if(likely(update_every_s)) - metric->latest_update_every_s = (uint32_t) update_every_s; + if(last_time_s > 0) { + if(set_metric_field_with_condition(metric->latest_time_s_clean, last_time_s, _current <= 0 || _wanted > _current) && + update_every_s > 0) + // set the latest update every too + set_metric_field_with_condition(metric->latest_update_every_s, update_every_s, true); } - else if(unlikely(!metric->latest_update_every_s && update_every_s)) - metric->latest_update_every_s = (uint32_t) update_every_s; - - metric_has_retention_unsafe(mrg, metric); - metric_unlock(metric); + else if(update_every_s > 0) + // set it only if it is invalid + set_metric_field_with_condition(metric->latest_update_every_s, update_every_s, _current <= 0); } inline bool mrg_metric_set_first_time_s_if_bigger(MRG *mrg __maybe_unused, METRIC *metric, time_t first_time_s) { internal_fatal(first_time_s < 0, "DBENGINE METRIC: timestamp is negative"); - - bool ret = false; - - metric_lock(metric); - if(first_time_s > metric->first_time_s) { - metric->first_time_s = first_time_s; - ret = true; - } - metric_has_retention_unsafe(mrg, metric); - metric_unlock(metric); - - return ret; + return set_metric_field_with_condition(metric->first_time_s, first_time_s, _wanted > _current); } inline time_t mrg_metric_get_first_time_s(MRG *mrg __maybe_unused, METRIC *metric) { - time_t first_time_s; - - metric_lock(metric); - - if(unlikely(!metric->first_time_s)) { - if(metric->latest_time_s_clean) - metric->first_time_s = metric->latest_time_s_clean; - - else if(metric->latest_time_s_hot) - metric->first_time_s = metric->latest_time_s_hot; - } - - first_time_s = metric->first_time_s; - - metric_unlock(metric); - - return first_time_s; + return mrg_metric_get_first_time_s_smart(mrg, metric); } inline void mrg_metric_get_retention(MRG *mrg __maybe_unused, METRIC *metric, time_t *first_time_s, time_t *last_time_s, time_t *update_every_s) { - metric_lock(metric); - - if(unlikely(!metric->first_time_s)) { - if(metric->latest_time_s_clean) - metric->first_time_s = metric->latest_time_s_clean; - - else if(metric->latest_time_s_hot) - metric->first_time_s = metric->latest_time_s_hot; - } - - *first_time_s = metric->first_time_s; - *last_time_s = MAX(metric->latest_time_s_clean, metric->latest_time_s_hot); - *update_every_s = metric->latest_update_every_s; + time_t clean = __atomic_load_n(&metric->latest_time_s_clean, __ATOMIC_RELAXED); + time_t hot = __atomic_load_n(&metric->latest_time_s_hot, __ATOMIC_RELAXED); - metric_unlock(metric); + *last_time_s = MAX(clean, hot); + *first_time_s = mrg_metric_get_first_time_s_smart(mrg, metric); + *update_every_s = __atomic_load_n(&metric->latest_update_every_s, __ATOMIC_RELAXED); } inline bool mrg_metric_set_clean_latest_time_s(MRG *mrg __maybe_unused, METRIC *metric, time_t latest_time_s) { internal_fatal(latest_time_s < 0, "DBENGINE METRIC: timestamp is negative"); - if(unlikely(latest_time_s < 0)) - return false; - - metric_lock(metric); - // internal_fatal(latest_time_s > max_acceptable_collected_time(), // "DBENGINE METRIC: metric latest time is in the future"); // internal_fatal(metric->latest_time_s_clean > latest_time_s, // "DBENGINE METRIC: metric new clean latest time is older than the previous one"); - metric->latest_time_s_clean = latest_time_s; + if(latest_time_s > 0) { + if(set_metric_field_with_condition(metric->latest_time_s_clean, latest_time_s, true)) { + set_metric_field_with_condition(metric->first_time_s, latest_time_s, _current <= 0 || _wanted < _current); - if(unlikely(!metric->first_time_s)) - metric->first_time_s = latest_time_s; + return true; + } + } - metric_has_retention_unsafe(mrg, metric); - metric_unlock(metric); - return true; + return false; } // returns true when metric still has retention @@ -518,7 +459,6 @@ inline bool mrg_metric_zero_disk_retention(MRG *mrg __maybe_unused, METRIC *metr Word_t section = mrg_metric_section(mrg, metric); bool do_again = false; size_t countdown = 5; - bool ret = true; do { time_t min_first_time_s = LONG_MAX; @@ -547,22 +487,20 @@ inline bool mrg_metric_zero_disk_retention(MRG *mrg __maybe_unused, METRIC *metr if (min_first_time_s == LONG_MAX) min_first_time_s = 0; - metric_lock(metric); - if (--countdown && !min_first_time_s && metric->latest_time_s_hot) + if (--countdown && !min_first_time_s && __atomic_load_n(&metric->latest_time_s_hot, __ATOMIC_RELAXED)) do_again = true; else { internal_error(!countdown, "METRIC: giving up on updating the retention of metric without disk retention"); do_again = false; - metric->first_time_s = min_first_time_s; - metric->latest_time_s_clean = max_end_time_s; - - ret = metric_has_retention_unsafe(mrg, metric); + set_metric_field_with_condition(metric->first_time_s, min_first_time_s, true); + set_metric_field_with_condition(metric->latest_time_s_clean, max_end_time_s, true); } - metric_unlock(metric); } while(do_again); - return ret; + time_t first, last, ue; + mrg_metric_get_retention(mrg, metric, &first, &last, &ue); + return (first && last && first < last); } inline bool mrg_metric_set_hot_latest_time_s(MRG *mrg __maybe_unused, METRIC *metric, time_t latest_time_s) { @@ -571,88 +509,80 @@ inline bool mrg_metric_set_hot_latest_time_s(MRG *mrg __maybe_unused, METRIC *me // internal_fatal(latest_time_s > max_acceptable_collected_time(), // "DBENGINE METRIC: metric latest time is in the future"); - if(unlikely(latest_time_s < 0)) - return false; - - metric_lock(metric); - metric->latest_time_s_hot = latest_time_s; - - if(unlikely(!metric->first_time_s)) - metric->first_time_s = latest_time_s; + if(likely(latest_time_s > 0)) { + __atomic_store_n(&metric->latest_time_s_hot, latest_time_s, __ATOMIC_RELAXED); + return true; + } - metric_has_retention_unsafe(mrg, metric); - metric_unlock(metric); - return true; + return false; } inline time_t mrg_metric_get_latest_time_s(MRG *mrg __maybe_unused, METRIC *metric) { - time_t max; - metric_lock(metric); - max = MAX(metric->latest_time_s_clean, metric->latest_time_s_hot); - metric_unlock(metric); - return max; + time_t clean = __atomic_load_n(&metric->latest_time_s_clean, __ATOMIC_RELAXED); + time_t hot = __atomic_load_n(&metric->latest_time_s_hot, __ATOMIC_RELAXED); + + return MAX(clean, hot); } inline bool mrg_metric_set_update_every(MRG *mrg __maybe_unused, METRIC *metric, time_t update_every_s) { internal_fatal(update_every_s < 0, "DBENGINE METRIC: timestamp is negative"); - if(update_every_s <= 0) - return false; - - metric_lock(metric); - metric->latest_update_every_s = (uint32_t) update_every_s; - metric_unlock(metric); + if(update_every_s > 0) + return set_metric_field_with_condition(metric->latest_update_every_s, update_every_s, true); - return true; + return false; } inline bool mrg_metric_set_update_every_s_if_zero(MRG *mrg __maybe_unused, METRIC *metric, time_t update_every_s) { internal_fatal(update_every_s < 0, "DBENGINE METRIC: timestamp is negative"); - if(update_every_s <= 0) - return false; - - metric_lock(metric); - if(!metric->latest_update_every_s) - metric->latest_update_every_s = (uint32_t) update_every_s; - metric_unlock(metric); + if(update_every_s > 0) + return set_metric_field_with_condition(metric->latest_update_every_s, update_every_s, _current <= 0); - return true; + return false; } inline time_t mrg_metric_get_update_every_s(MRG *mrg __maybe_unused, METRIC *metric) { - time_t update_every_s; - - metric_lock(metric); - update_every_s = metric->latest_update_every_s; - metric_unlock(metric); - - return update_every_s; + return __atomic_load_n(&metric->latest_update_every_s, __ATOMIC_RELAXED); } inline bool mrg_metric_set_writer(MRG *mrg, METRIC *metric) { - bool done = false; - metric_lock(metric); - if(!metric->writer) { - metric->writer = gettid(); + pid_t expected = __atomic_load_n(&metric->writer, __ATOMIC_RELAXED); + pid_t wanted = gettid(); + bool done = true; + + do { + if(expected != 0) { + done = false; + break; + } + } while(!__atomic_compare_exchange_n(&metric->writer, &expected, wanted, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); + + if(done) __atomic_add_fetch(&mrg->index[metric->partition].stats.writers, 1, __ATOMIC_RELAXED); - done = true; - } else __atomic_add_fetch(&mrg->index[metric->partition].stats.writers_conflicts, 1, __ATOMIC_RELAXED); - metric_unlock(metric); + return done; } inline bool mrg_metric_clear_writer(MRG *mrg, METRIC *metric) { - bool done = false; - metric_lock(metric); - if(metric->writer) { - metric->writer = 0; + // this function can be called from a different thread than the one than the writer + + pid_t expected = __atomic_load_n(&metric->writer, __ATOMIC_RELAXED); + pid_t wanted = 0; + bool done = true; + + do { + if(!expected) { + done = false; + break; + } + } while(!__atomic_compare_exchange_n(&metric->writer, &expected, wanted, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); + + if(done) __atomic_sub_fetch(&mrg->index[metric->partition].stats.writers, 1, __ATOMIC_RELAXED); - done = true; - } - metric_unlock(metric); + return done; } @@ -662,27 +592,30 @@ inline void mrg_update_metric_retention_and_granularity_by_uuid( time_t update_every_s, time_t now_s) { if(unlikely(last_time_s > now_s)) { - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "DBENGINE JV2: wrong last time on-disk (%ld - %ld, now %ld), " - "fixing last time to now", - first_time_s, last_time_s, now_s); + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING, + "DBENGINE JV2: wrong last time on-disk (%ld - %ld, now %ld), " + "fixing last time to now", + first_time_s, last_time_s, now_s); last_time_s = now_s; } if (unlikely(first_time_s > last_time_s)) { - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "DBENGINE JV2: wrong first time on-disk (%ld - %ld, now %ld), " - "fixing first time to last time", - first_time_s, last_time_s, now_s); + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING, + "DBENGINE JV2: wrong first time on-disk (%ld - %ld, now %ld), " + "fixing first time to last time", + first_time_s, last_time_s, now_s); first_time_s = last_time_s; } if (unlikely(first_time_s == 0 || last_time_s == 0)) { - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "DBENGINE JV2: zero on-disk timestamps (%ld - %ld, now %ld), " - "using them as-is", - first_time_s, last_time_s, now_s); + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING, + "DBENGINE JV2: zero on-disk timestamps (%ld - %ld, now %ld), " + "using them as-is", + first_time_s, last_time_s, now_s); } bool added = false; @@ -710,7 +643,6 @@ inline void mrg_get_statistics(MRG *mrg, struct mrg_statistics *s) { for(size_t i = 0; i < mrg->partitions ;i++) { s->entries += __atomic_load_n(&mrg->index[i].stats.entries, __ATOMIC_RELAXED); s->entries_referenced += __atomic_load_n(&mrg->index[i].stats.entries_referenced, __ATOMIC_RELAXED); - s->entries_with_retention += __atomic_load_n(&mrg->index[i].stats.entries_with_retention, __ATOMIC_RELAXED); s->size += __atomic_load_n(&mrg->index[i].stats.size, __ATOMIC_RELAXED); s->current_references += __atomic_load_n(&mrg->index[i].stats.current_references, __ATOMIC_RELAXED); s->additions += __atomic_load_n(&mrg->index[i].stats.additions, __ATOMIC_RELAXED); @@ -900,7 +832,7 @@ int mrg_unittest(void) { pthread_t th[threads]; for(size_t i = 0; i < threads ; i++) { char buf[15 + 1]; - snprintfz(buf, 15, "TH[%zu]", i); + snprintfz(buf, sizeof(buf) - 1, "TH[%zu]", i); netdata_thread_create(&th[i], buf, NETDATA_THREAD_OPTION_JOINABLE | NETDATA_THREAD_OPTION_DONT_LOG, mrg_stress, &t); @@ -923,7 +855,7 @@ int mrg_unittest(void) { netdata_log_info("DBENGINE METRIC: did %zu additions, %zu duplicate additions, " "%zu deletions, %zu wrong deletions, " "%zu successful searches, %zu wrong searches, " - "in %llu usecs", + "in %"PRIu64" usecs", stats.additions, stats.additions_duplicate, stats.deletions, stats.delete_misses, stats.search_hits, stats.search_misses, diff --git a/database/engine/metric.h b/database/engine/metric.h index 5d5ebd7b139f6a..dbb9493019cfb1 100644 --- a/database/engine/metric.h +++ b/database/engine/metric.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef DBENGINE_METRIC_H #define DBENGINE_METRIC_H @@ -35,9 +36,6 @@ struct mrg_statistics { size_t entries_referenced; - MRG_CACHE_LINE_PADDING(1); - size_t entries_with_retention; - MRG_CACHE_LINE_PADDING(2); size_t current_references; diff --git a/database/engine/page.c b/database/engine/page.c new file mode 100644 index 00000000000000..b7a3934835e974 --- /dev/null +++ b/database/engine/page.c @@ -0,0 +1,679 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "page.h" + +#include "libnetdata/libnetdata.h" + +typedef enum __attribute__((packed)) { + PAGE_OPTION_ALL_VALUES_EMPTY = (1 << 0), +} PAGE_OPTIONS; + +typedef enum __attribute__((packed)) { + PGD_STATE_CREATED_FROM_COLLECTOR = (1 << 0), + PGD_STATE_CREATED_FROM_DISK = (1 << 1), + PGD_STATE_SCHEDULED_FOR_FLUSHING = (1 << 2), + PGD_STATE_FLUSHED_TO_DISK = (1 << 3), +} PGD_STATES; + +typedef struct { + uint8_t *data; + uint32_t size; +} page_raw_t; + + +typedef struct { + size_t num_buffers; + gorilla_writer_t *writer; + int aral_index; +} page_gorilla_t; + +struct pgd { + // the page type + uint8_t type; + + // options related to the page + PAGE_OPTIONS options; + + PGD_STATES states; + + // the uses number of slots in the page + uint32_t used; + + // the total number of slots available in the page + uint32_t slots; + + union { + page_raw_t raw; + page_gorilla_t gorilla; + }; +}; + +// ---------------------------------------------------------------------------- +// memory management + +struct { + ARAL *aral_pgd; + ARAL *aral_data[RRD_STORAGE_TIERS]; + ARAL *aral_gorilla_buffer[4]; + ARAL *aral_gorilla_writer[4]; +} pgd_alloc_globals = {}; + +static ARAL *pgd_aral_data_lookup(size_t size) +{ + for (size_t tier = 0; tier < storage_tiers; tier++) + if (size == tier_page_size[tier]) + return pgd_alloc_globals.aral_data[tier]; + + return NULL; +} + +void pgd_init_arals(void) +{ + // pgd aral + { + char buf[20 + 1]; + snprintfz(buf, sizeof(buf) - 1, "pgd"); + + // FIXME: add stats + pgd_alloc_globals.aral_pgd = aral_create( + buf, + sizeof(struct pgd), + 64, + 512 * (sizeof(struct pgd)), + pgc_aral_statistics(), + NULL, NULL, false, false); + } + + // tier page aral + { + for (size_t i = storage_tiers; i > 0 ;i--) + { + size_t tier = storage_tiers - i; + + char buf[20 + 1]; + snprintfz(buf, sizeof(buf) - 1, "tier%zu-pages", tier); + + pgd_alloc_globals.aral_data[tier] = aral_create( + buf, + tier_page_size[tier], + 64, + 512 * (tier_page_size[tier]), + pgc_aral_statistics(), + NULL, NULL, false, false); + } + } + + // gorilla buffers aral + for (size_t i = 0; i != 4; i++) { + char buf[20 + 1]; + snprintfz(buf, sizeof(buf) - 1, "gbuffer-%zu", i); + + // FIXME: add stats + pgd_alloc_globals.aral_gorilla_buffer[i] = aral_create( + buf, + GORILLA_BUFFER_SIZE, + 64, + 512 * GORILLA_BUFFER_SIZE, + pgc_aral_statistics(), + NULL, NULL, false, false); + } + + // gorilla writers aral + for (size_t i = 0; i != 4; i++) { + char buf[20 + 1]; + snprintfz(buf, sizeof(buf) - 1, "gwriter-%zu", i); + + // FIXME: add stats + pgd_alloc_globals.aral_gorilla_writer[i] = aral_create( + buf, + sizeof(gorilla_writer_t), + 64, + 512 * sizeof(gorilla_writer_t), + pgc_aral_statistics(), + NULL, NULL, false, false); + } +} + +static void *pgd_data_aral_alloc(size_t size) +{ + ARAL *ar = pgd_aral_data_lookup(size); + if (!ar) + return mallocz(size); + else + return aral_mallocz(ar); +} + +static void pgd_data_aral_free(void *page, size_t size) +{ + ARAL *ar = pgd_aral_data_lookup(size); + if (!ar) + freez(page); + else + aral_freez(ar, page); +} + +// ---------------------------------------------------------------------------- +// management api + +PGD *pgd_create(uint8_t type, uint32_t slots) +{ + PGD *pg = aral_mallocz(pgd_alloc_globals.aral_pgd); + pg->type = type; + pg->used = 0; + pg->slots = slots; + pg->options = PAGE_OPTION_ALL_VALUES_EMPTY; + pg->states = PGD_STATE_CREATED_FROM_COLLECTOR; + + switch (type) { + case PAGE_METRICS: + case PAGE_TIER: { + uint32_t size = slots * page_type_size[type]; + + internal_fatal(!size || slots == 1, + "DBENGINE: invalid number of slots (%u) or page type (%u)", slots, type); + + pg->raw.size = size; + pg->raw.data = pgd_data_aral_alloc(size); + break; + } + case PAGE_GORILLA_METRICS: { + internal_fatal(slots == 1, + "DBENGINE: invalid number of slots (%u) or page type (%u)", slots, type); + + pg->slots = 8 * GORILLA_BUFFER_SLOTS; + + // allocate new gorilla writer + pg->gorilla.aral_index = gettid() % 4; + pg->gorilla.writer = aral_mallocz(pgd_alloc_globals.aral_gorilla_writer[pg->gorilla.aral_index]); + + // allocate new gorilla buffer + gorilla_buffer_t *gbuf = aral_mallocz(pgd_alloc_globals.aral_gorilla_buffer[pg->gorilla.aral_index]); + memset(gbuf, 0, GORILLA_BUFFER_SIZE); + global_statistics_gorilla_buffer_add_hot(); + + *pg->gorilla.writer = gorilla_writer_init(gbuf, GORILLA_BUFFER_SLOTS); + pg->gorilla.num_buffers = 1; + + break; + } + default: + fatal("Unknown page type: %uc", type); + } + + return pg; +} + +PGD *pgd_create_from_disk_data(uint8_t type, void *base, uint32_t size) +{ + if (!size) + return PGD_EMPTY; + + if (size < page_type_size[type]) + return PGD_EMPTY; + + PGD *pg = aral_mallocz(pgd_alloc_globals.aral_pgd); + + pg->type = type; + pg->states = PGD_STATE_CREATED_FROM_DISK; + pg->options = ~PAGE_OPTION_ALL_VALUES_EMPTY; + + switch (type) + { + case PAGE_METRICS: + case PAGE_TIER: + pg->raw.size = size; + pg->used = size / page_type_size[type]; + pg->slots = pg->used; + + pg->raw.data = pgd_data_aral_alloc(size); + memcpy(pg->raw.data, base, size); + break; + case PAGE_GORILLA_METRICS: + internal_fatal(size == 0, "Asked to create page with 0 data!!!"); + internal_fatal(size % sizeof(uint32_t), "Unaligned gorilla buffer size"); + internal_fatal(size % GORILLA_BUFFER_SIZE, "Expected size to be a multiple of %zu-bytes", GORILLA_BUFFER_SIZE); + + pg->raw.data = mallocz(size); + pg->raw.size = size; + + // TODO: rm this + memset(pg->raw.data, 0, size); + memcpy(pg->raw.data, base, size); + + uint32_t total_entries = gorilla_buffer_patch((void *) pg->raw.data); + + pg->used = total_entries; + pg->slots = pg->used; + break; + default: + fatal("Unknown page type: %uc", type); + } + + return pg; +} + +void pgd_free(PGD *pg) +{ + if (!pg) + return; + + if (pg == PGD_EMPTY) + return; + + switch (pg->type) + { + case PAGE_METRICS: + case PAGE_TIER: + pgd_data_aral_free(pg->raw.data, pg->raw.size); + break; + case PAGE_GORILLA_METRICS: { + if (pg->states & PGD_STATE_CREATED_FROM_DISK) + { + internal_fatal(pg->raw.data == NULL, "Tried to free gorilla PGD loaded from disk with NULL data"); + freez(pg->raw.data); + pg->raw.data = NULL; + } + else if ((pg->states & PGD_STATE_CREATED_FROM_COLLECTOR) || + (pg->states & PGD_STATE_SCHEDULED_FOR_FLUSHING) || + (pg->states & PGD_STATE_FLUSHED_TO_DISK)) + { + internal_fatal(pg->gorilla.writer == NULL, + "PGD does not have an active gorilla writer"); + + internal_fatal(pg->gorilla.num_buffers == 0, + "PGD does not have any gorilla buffers allocated"); + + while (true) { + gorilla_buffer_t *gbuf = gorilla_writer_drop_head_buffer(pg->gorilla.writer); + if (!gbuf) + break; + aral_freez(pgd_alloc_globals.aral_gorilla_buffer[pg->gorilla.aral_index], gbuf); + pg->gorilla.num_buffers -= 1; + } + + internal_fatal(pg->gorilla.num_buffers != 0, + "Could not free all gorilla writer buffers"); + + aral_freez(pgd_alloc_globals.aral_gorilla_writer[pg->gorilla.aral_index], pg->gorilla.writer); + pg->gorilla.writer = NULL; + } else { + fatal("pgd_free() called on gorilla page with unsupported state"); + // TODO: should we support any other states? + // if (!(pg->states & PGD_STATE_FLUSHED_TO_DISK)) + // fatal("pgd_free() is not supported yet for pages flushed to disk"); + } + + break; + } + default: + fatal("Unknown page type: %uc", pg->type); + } + + aral_freez(pgd_alloc_globals.aral_pgd, pg); +} + +// ---------------------------------------------------------------------------- +// utility functions + +uint32_t pgd_type(PGD *pg) +{ + return pg->type; +} + +bool pgd_is_empty(PGD *pg) +{ + if (!pg) + return true; + + if (pg == PGD_EMPTY) + return true; + + if (pg->used == 0) + return true; + + if (pg->options & PAGE_OPTION_ALL_VALUES_EMPTY) + return true; + + return false; +} + +uint32_t pgd_slots_used(PGD *pg) +{ + if (!pg) + return 0; + + if (pg == PGD_EMPTY) + return 0; + + return pg->used; +} + +uint32_t pgd_memory_footprint(PGD *pg) +{ + if (!pg) + return 0; + + if (pg == PGD_EMPTY) + return 0; + + size_t footprint = 0; + switch (pg->type) { + case PAGE_METRICS: + case PAGE_TIER: + footprint = sizeof(PGD) + pg->raw.size; + break; + case PAGE_GORILLA_METRICS: { + if (pg->states & PGD_STATE_CREATED_FROM_DISK) + footprint = sizeof(PGD) + pg->raw.size; + else + footprint = sizeof(PGD) + sizeof(gorilla_writer_t) + (pg->gorilla.num_buffers * GORILLA_BUFFER_SIZE); + + break; + } + default: + fatal("Unknown page type: %uc", pg->type); + } + + return footprint; +} + +uint32_t pgd_disk_footprint(PGD *pg) +{ + if (!pgd_slots_used(pg)) + return 0; + + size_t size = 0; + + switch (pg->type) { + case PAGE_METRICS: + case PAGE_TIER: { + uint32_t used_size = pg->used * page_type_size[pg->type]; + internal_fatal(used_size > pg->raw.size, "Wrong disk footprint page size"); + size = used_size; + + break; + } + case PAGE_GORILLA_METRICS: { + if (pg->states & PGD_STATE_CREATED_FROM_COLLECTOR || + pg->states & PGD_STATE_SCHEDULED_FOR_FLUSHING || + pg->states & PGD_STATE_FLUSHED_TO_DISK) + { + internal_fatal(!pg->gorilla.writer, + "pgd_disk_footprint() not implemented for NULL gorilla writers"); + + internal_fatal(pg->gorilla.num_buffers == 0, + "Gorilla writer does not have any buffers"); + + size = pg->gorilla.num_buffers * GORILLA_BUFFER_SIZE; + + if (pg->states & PGD_STATE_CREATED_FROM_COLLECTOR) { + global_statistics_tier0_disk_compressed_bytes(gorilla_writer_nbytes(pg->gorilla.writer)); + global_statistics_tier0_disk_uncompressed_bytes(gorilla_writer_entries(pg->gorilla.writer) * sizeof(storage_number)); + } + } else if (pg->states & PGD_STATE_CREATED_FROM_DISK) { + size = pg->raw.size; + } else { + fatal("Asked disk footprint on unknown page state"); + } + + break; + } + default: + fatal("Unknown page type: %uc", pg->type); + } + + internal_fatal(pg->states & PGD_STATE_CREATED_FROM_DISK, + "Disk footprint asked for page created from disk."); + pg->states = PGD_STATE_SCHEDULED_FOR_FLUSHING; + return size; +} + +void pgd_copy_to_extent(PGD *pg, uint8_t *dst, uint32_t dst_size) +{ + internal_fatal(pgd_disk_footprint(pg) != dst_size, "Wrong disk footprint size requested (need %u, available %u)", + pgd_disk_footprint(pg), dst_size); + + switch (pg->type) { + case PAGE_METRICS: + case PAGE_TIER: + memcpy(dst, pg->raw.data, dst_size); + break; + case PAGE_GORILLA_METRICS: { + if ((pg->states & PGD_STATE_SCHEDULED_FOR_FLUSHING) == 0) + fatal("Copying to extent is supported only for PGDs that are scheduled for flushing."); + + internal_fatal(!pg->gorilla.writer, + "pgd_copy_to_extent() not implemented for NULL gorilla writers"); + + internal_fatal(pg->gorilla.num_buffers == 0, + "pgd_copy_to_extent() gorilla writer does not have any buffers"); + + bool ok = gorilla_writer_serialize(pg->gorilla.writer, dst, dst_size); + UNUSED(ok); + internal_fatal(!ok, + "pgd_copy_to_extent() tried to serialize pg=%p, gw=%p (with dst_size=%u bytes, num_buffers=%zu)", + pg, pg->gorilla.writer, dst_size, pg->gorilla.num_buffers); + break; + } + default: + fatal("Unknown page type: %uc", pg->type); + } + + pg->states = PGD_STATE_FLUSHED_TO_DISK; +} + +// ---------------------------------------------------------------------------- +// data collection + +void pgd_append_point(PGD *pg, + usec_t point_in_time_ut __maybe_unused, + NETDATA_DOUBLE n, + NETDATA_DOUBLE min_value, + NETDATA_DOUBLE max_value, + uint16_t count, + uint16_t anomaly_count, + SN_FLAGS flags, + uint32_t expected_slot) +{ + if (unlikely(pg->used >= pg->slots)) + fatal("DBENGINE: attempted to write beyond page size (page type %u, slots %u, used %u)", + pg->type, pg->slots, pg->used /* FIXME:, pg->size */); + + if (unlikely(pg->used != expected_slot)) + fatal("DBENGINE: page is not aligned to expected slot (used %u, expected %u)", + pg->used, expected_slot); + + if (!(pg->states & PGD_STATE_CREATED_FROM_COLLECTOR)) + fatal("DBENGINE: collection on page not created from a collector"); + + if (pg->states & PGD_STATE_SCHEDULED_FOR_FLUSHING) + fatal("Data collection on page already scheduled for flushing"); + + switch (pg->type) { + case PAGE_METRICS: { + storage_number *tier0_metric_data = (storage_number *)pg->raw.data; + storage_number t = pack_storage_number(n, flags); + tier0_metric_data[pg->used++] = t; + + if ((pg->options & PAGE_OPTION_ALL_VALUES_EMPTY) && does_storage_number_exist(t)) + pg->options &= ~PAGE_OPTION_ALL_VALUES_EMPTY; + + break; + } + case PAGE_TIER: { + storage_number_tier1_t *tier12_metric_data = (storage_number_tier1_t *)pg->raw.data; + storage_number_tier1_t t; + t.sum_value = (float) n; + t.min_value = (float) min_value; + t.max_value = (float) max_value; + t.anomaly_count = anomaly_count; + t.count = count; + tier12_metric_data[pg->used++] = t; + + if ((pg->options & PAGE_OPTION_ALL_VALUES_EMPTY) && fpclassify(n) != FP_NAN) + pg->options &= ~PAGE_OPTION_ALL_VALUES_EMPTY; + + break; + } + case PAGE_GORILLA_METRICS: { + pg->used++; + storage_number t = pack_storage_number(n, flags); + + if ((pg->options & PAGE_OPTION_ALL_VALUES_EMPTY) && does_storage_number_exist(t)) + pg->options &= ~PAGE_OPTION_ALL_VALUES_EMPTY; + + bool ok = gorilla_writer_write(pg->gorilla.writer, t); + if (!ok) { + gorilla_buffer_t *new_buffer = aral_mallocz(pgd_alloc_globals.aral_gorilla_buffer[pg->gorilla.aral_index]); + memset(new_buffer, 0, GORILLA_BUFFER_SIZE); + + gorilla_writer_add_buffer(pg->gorilla.writer, new_buffer, GORILLA_BUFFER_SLOTS); + pg->gorilla.num_buffers += 1; + global_statistics_gorilla_buffer_add_hot(); + + ok = gorilla_writer_write(pg->gorilla.writer, t); + internal_fatal(ok == false, "Failed to writer value in newly allocated gorilla buffer."); + } + break; + } + default: + fatal("DBENGINE: unknown page type id %d", pg->type); + break; + } +} + +// ---------------------------------------------------------------------------- +// querying with cursor + +static void pgdc_seek(PGDC *pgdc, uint32_t position) +{ + PGD *pg = pgdc->pgd; + + switch (pg->type) { + case PAGE_METRICS: + case PAGE_TIER: + pgdc->slots = pgdc->pgd->used; + break; + case PAGE_GORILLA_METRICS: { + if (pg->states & PGD_STATE_CREATED_FROM_DISK) { + pgdc->slots = pgdc->pgd->slots; + pgdc->gr = gorilla_reader_init((void *) pg->raw.data); + } else { + if (!(pg->states & PGD_STATE_CREATED_FROM_COLLECTOR) && + !(pg->states & PGD_STATE_SCHEDULED_FOR_FLUSHING) && + !(pg->states & PGD_STATE_FLUSHED_TO_DISK)) + fatal("pgdc_seek() currently is not supported for pages created from disk."); + + if (!pg->gorilla.writer) + fatal("Seeking from a page without an active gorilla writer is not supported (yet)."); + + pgdc->slots = gorilla_writer_entries(pg->gorilla.writer); + pgdc->gr = gorilla_writer_get_reader(pg->gorilla.writer); + } + + if (position > pgdc->slots) + position = pgdc->slots; + + for (uint32_t i = 0; i != position; i++) { + uint32_t value; + + bool ok = gorilla_reader_read(&pgdc->gr, &value); + + if (!ok) { + // this is fine, the reader will return empty points + break; + } + } + + break; + } + default: + fatal("DBENGINE: unknown page type id %d", pg->type); + break; + } +} + +void pgdc_reset(PGDC *pgdc, PGD *pgd, uint32_t position) +{ + // pgd might be null and position equal to UINT32_MAX + + pgdc->pgd = pgd; + pgdc->position = position; + + if (!pgd) + return; + + if (pgd == PGD_EMPTY) + return; + + if (position == UINT32_MAX) + return; + + pgdc_seek(pgdc, position); +} + +bool pgdc_get_next_point(PGDC *pgdc, uint32_t expected_position, STORAGE_POINT *sp) +{ + if (!pgdc->pgd || pgdc->pgd == PGD_EMPTY || pgdc->position >= pgdc->slots) + { + storage_point_empty(*sp, sp->start_time_s, sp->end_time_s); + return false; + } + + internal_fatal(pgdc->position != expected_position, "Wrong expected cursor position"); + + switch (pgdc->pgd->type) + { + case PAGE_METRICS: { + storage_number *array = (storage_number *) pgdc->pgd->raw.data; + storage_number n = array[pgdc->position++]; + + sp->min = sp->max = sp->sum = unpack_storage_number(n); + sp->flags = (SN_FLAGS)(n & SN_USER_FLAGS); + sp->count = 1; + sp->anomaly_count = is_storage_number_anomalous(n) ? 1 : 0; + + return true; + } + case PAGE_TIER: { + storage_number_tier1_t *array = (storage_number_tier1_t *) pgdc->pgd->raw.data; + storage_number_tier1_t n = array[pgdc->position++]; + + sp->flags = n.anomaly_count ? SN_FLAG_NONE : SN_FLAG_NOT_ANOMALOUS; + sp->count = n.count; + sp->anomaly_count = n.anomaly_count; + sp->min = n.min_value; + sp->max = n.max_value; + sp->sum = n.sum_value; + + return true; + } + case PAGE_GORILLA_METRICS: { + pgdc->position++; + + uint32_t n = 666666666; + bool ok = gorilla_reader_read(&pgdc->gr, &n); + if (ok) { + sp->min = sp->max = sp->sum = unpack_storage_number(n); + sp->flags = (SN_FLAGS)(n & SN_USER_FLAGS); + sp->count = 1; + sp->anomaly_count = is_storage_number_anomalous(n) ? 1 : 0; + } else { + storage_point_empty(*sp, sp->start_time_s, sp->end_time_s); + } + + return ok; + } + default: { + static bool logged = false; + if (!logged) + { + netdata_log_error("DBENGINE: unknown page type %d found. Cannot decode it. Ignoring its metrics.", pgd_type(pgdc->pgd)); + logged = true; + } + + storage_point_empty(*sp, sp->start_time_s, sp->end_time_s); + return false; + } + } +} diff --git a/database/engine/page.h b/database/engine/page.h new file mode 100644 index 00000000000000..32c87c58072272 --- /dev/null +++ b/database/engine/page.h @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef DBENGINE_PAGE_H +#define DBENGINE_PAGE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "libnetdata/libnetdata.h" + +typedef struct pgd_cursor { + struct pgd *pgd; + uint32_t position; + uint32_t slots; + + gorilla_reader_t gr; +} PGDC; + +#include "rrdengine.h" + +typedef struct pgd PGD; + +#define PGD_EMPTY (PGD *)(-1) + +void pgd_init_arals(void); + +PGD *pgd_create(uint8_t type, uint32_t slots); +PGD *pgd_create_from_disk_data(uint8_t type, void *base, uint32_t size); +void pgd_free(PGD *pg); + +uint32_t pgd_type(PGD *pg); +bool pgd_is_empty(PGD *pg); +uint32_t pgd_slots_used(PGD *pg); + +uint32_t pgd_memory_footprint(PGD *pg); +uint32_t pgd_disk_footprint(PGD *pg); + +void pgd_copy_to_extent(PGD *pg, uint8_t *dst, uint32_t dst_size); + +void pgd_append_point(PGD *pg, + usec_t point_in_time_ut, + NETDATA_DOUBLE n, + NETDATA_DOUBLE min_value, + NETDATA_DOUBLE max_value, + uint16_t count, + uint16_t anomaly_count, + SN_FLAGS flags, + uint32_t expected_slot); + +void pgdc_reset(PGDC *pgdc, PGD *pgd, uint32_t position); +bool pgdc_get_next_point(PGDC *pgdc, uint32_t expected_position, STORAGE_POINT *sp); + +#ifdef __cplusplus +} +#endif + +#endif // DBENGINE_PAGE_H diff --git a/database/engine/page_test.cc b/database/engine/page_test.cc new file mode 100644 index 00000000000000..d61299bc4870d8 --- /dev/null +++ b/database/engine/page_test.cc @@ -0,0 +1,405 @@ +#include "page.h" +#include "page_test.h" + +#ifdef HAVE_GTEST + +#include +#include +#include + +bool operator==(const STORAGE_POINT lhs, const STORAGE_POINT rhs) { + if (lhs.min != rhs.min) + return false; + + if (lhs.max != rhs.max) + return false; + + if (lhs.sum != rhs.sum) + return false; + + if (lhs.start_time_s != rhs.start_time_s) + return false; + + if (lhs.end_time_s != rhs.end_time_s) + return false; + + if (lhs.count != rhs.count) + return false; + + if (lhs.flags != rhs.flags) + return false; + + return true; +} + +// TODO: use value-parameterized tests +// http://google.github.io/googletest/advanced.html#value-parameterized-tests +static uint8_t page_type = PAGE_GORILLA_METRICS; + +static size_t slots_for_page(size_t n) { + switch (page_type) { + case PAGE_METRICS: + return 1024; + case PAGE_GORILLA_METRICS: + return n; + default: + fatal("Slots requested for unsupported page: %uc", page_type); + } +} + +TEST(PGD, EmptyOrNull) { + PGD *pg = NULL; + + PGDC cursor; + STORAGE_POINT sp; + + EXPECT_TRUE(pgd_is_empty(pg)); + EXPECT_EQ(pgd_slots_used(pg), 0); + EXPECT_EQ(pgd_memory_footprint(pg), 0); + EXPECT_EQ(pgd_disk_footprint(pg), 0); + + pgdc_reset(&cursor, pg, 0); + EXPECT_FALSE(pgdc_get_next_point(&cursor, 0, &sp)); + + pgd_free(pg); + + pg = PGD_EMPTY; + + EXPECT_TRUE(pgd_is_empty(pg)); + EXPECT_EQ(pgd_slots_used(pg), 0); + EXPECT_EQ(pgd_memory_footprint(pg), 0); + EXPECT_EQ(pgd_disk_footprint(pg), 0); + EXPECT_FALSE(pgdc_get_next_point(&cursor, 0, &sp)); + + pgdc_reset(&cursor, pg, 0); + EXPECT_FALSE(pgdc_get_next_point(&cursor, 0, &sp)); + + pgd_free(pg); +} + +TEST(PGD, Create) { + size_t slots = slots_for_page(1024 * 1024); + PGD *pg = pgd_create(page_type, slots); + + EXPECT_EQ(pgd_type(pg), page_type); + EXPECT_TRUE(pgd_is_empty(pg)); + EXPECT_EQ(pgd_slots_used(pg), 0); + + for (size_t i = 0; i != slots; i++) { + pgd_append_point(pg, i, i, 0, 0, 1, 1, SN_DEFAULT_FLAGS, i); + EXPECT_FALSE(pgd_is_empty(pg)); + } + EXPECT_EQ(pgd_slots_used(pg), slots); + + EXPECT_DEATH( + pgd_append_point(pg, slots, slots, 0, 0, 1, 1, SN_DEFAULT_FLAGS, slots), + ".*" + ); + + pgd_free(pg); +} + +TEST(PGD, CursorFullPage) { + size_t slots = slots_for_page(1024 * 1024); + PGD *pg = pgd_create(page_type, slots); + + for (size_t slot = 0; slot != slots; slot++) + pgd_append_point(pg, slot, slot, 0, 0, 1, 1, SN_DEFAULT_FLAGS, slot); + + for (size_t i = 0; i != 2; i++) { + PGDC cursor; + pgdc_reset(&cursor, pg, 0); + + STORAGE_POINT sp; + for (size_t slot = 0; slot != slots; slot++) { + EXPECT_TRUE(pgdc_get_next_point(&cursor, slot, &sp)); + + EXPECT_EQ(slot, static_cast(sp.min)); + EXPECT_EQ(sp.min, sp.max); + EXPECT_EQ(sp.min, sp.sum); + EXPECT_EQ(sp.count, 1); + EXPECT_EQ(sp.anomaly_count, 0); + } + + EXPECT_FALSE(pgdc_get_next_point(&cursor, slots, &sp)); + } + + for (size_t i = 0; i != 2; i++) { + PGDC cursor; + pgdc_reset(&cursor, pg, slots / 2); + + STORAGE_POINT sp; + for (size_t slot = slots / 2; slot != slots; slot++) { + EXPECT_TRUE(pgdc_get_next_point(&cursor, slot, &sp)); + + EXPECT_EQ(slot, static_cast(sp.min)); + EXPECT_EQ(sp.min, sp.max); + EXPECT_EQ(sp.min, sp.sum); + EXPECT_EQ(sp.count, 1); + EXPECT_EQ(sp.anomaly_count, 0); + } + + EXPECT_FALSE(pgdc_get_next_point(&cursor, slots, &sp)); + } + + // out of bounds seek + { + PGDC cursor; + pgdc_reset(&cursor, pg, 2 * slots); + + STORAGE_POINT sp; + EXPECT_FALSE(pgdc_get_next_point(&cursor, 2 * slots, &sp)); + } + + pgd_free(pg); +} + +TEST(PGD, CursorHalfPage) { + size_t slots = slots_for_page(1024 * 1024); + PGD *pg = pgd_create(page_type, slots); + + PGDC cursor; + STORAGE_POINT sp; + + // fill the 1st half of the page + for (size_t slot = 0; slot != slots / 2; slot++) + pgd_append_point(pg, slot, slot, 0, 0, 1, 1, SN_DEFAULT_FLAGS, slot); + + pgdc_reset(&cursor, pg, 0); + + for (size_t slot = 0; slot != slots / 2; slot++) { + EXPECT_TRUE(pgdc_get_next_point(&cursor, slot, &sp)); + + EXPECT_EQ(slot, static_cast(sp.min)); + EXPECT_EQ(sp.min, sp.max); + EXPECT_EQ(sp.min, sp.sum); + EXPECT_EQ(sp.count, 1); + EXPECT_EQ(sp.anomaly_count, 0); + } + EXPECT_FALSE(pgdc_get_next_point(&cursor, slots / 2, &sp)); + + // reset pgdc to the end of the page, we should not be getting more + // points even if the page has grown in between. + + pgdc_reset(&cursor, pg, slots / 2); + + for (size_t slot = slots / 2; slot != slots; slot++) + pgd_append_point(pg, slot, slot, 0, 0, 1, 1, SN_DEFAULT_FLAGS, slot); + + for (size_t slot = slots / 2; slot != slots; slot++) + EXPECT_FALSE(pgdc_get_next_point(&cursor, slot, &sp)); + + EXPECT_FALSE(pgdc_get_next_point(&cursor, slots, &sp)); + + pgd_free(pg); +} + +TEST(PGD, MemoryFootprint) { + size_t slots = slots_for_page(1024 * 1024); + PGD *pg = pgd_create(page_type, slots); + + uint32_t footprint = 0; + switch (pgd_type(pg)) { + case PAGE_METRICS: + footprint = slots * sizeof(uint32_t); + break; + case PAGE_GORILLA_METRICS: + footprint = 128 * sizeof(uint32_t); + break; + default: + fatal("Uknown page type: %uc", pgd_type(pg)); + } + EXPECT_NEAR(pgd_memory_footprint(pg), footprint, 128); + + std::random_device rand_dev; + std::mt19937 gen(rand_dev()); + std::uniform_int_distribution distr(std::numeric_limits::min(), + std::numeric_limits::max()); // define the range + + for (size_t slot = 0; slot != slots; slot++) { + uint32_t n = distr(gen); + pgd_append_point(pg, slot, n, 0, 0, 1, 1, SN_DEFAULT_FLAGS, slot); + } + + footprint = slots * sizeof(uint32_t); + + uint32_t abs_error = 0; + switch (pgd_type(pg)) { + case PAGE_METRICS: + abs_error = 128; + break; + case PAGE_GORILLA_METRICS: + abs_error = footprint / 10; + break; + default: + fatal("Uknown page type: %uc", pgd_type(pg)); + } + + EXPECT_NEAR(pgd_memory_footprint(pg), footprint, abs_error); +} + +TEST(PGD, DiskFootprint) { + size_t slots = slots_for_page(1024 * 1024); + PGD *pg = pgd_create(page_type, slots); + + std::random_device rand_dev; + std::mt19937 gen(rand_dev()); + std::uniform_int_distribution distr(std::numeric_limits::min(), + std::numeric_limits::max()); // define the range + + size_t used_slots = 16; + + for (size_t slot = 0; slot != used_slots; slot++) { + uint32_t n = distr(gen); + pgd_append_point(pg, slot, n, 0, 0, 1, 1, SN_DEFAULT_FLAGS, slot); + } + + uint32_t footprint = 0; + switch (pgd_type(pg)) { + case PAGE_METRICS: + footprint = used_slots * sizeof(uint32_t); + break; + case PAGE_GORILLA_METRICS: + footprint = 128 * sizeof(uint32_t); + break; + default: + fatal("Uknown page type: %uc", pgd_type(pg)); + } + EXPECT_EQ(pgd_disk_footprint(pg), footprint); + + pgd_free(pg); + + pg = pgd_create(page_type, slots); + + used_slots = 128 + 64; + + for (size_t slot = 0; slot != used_slots; slot++) { + uint32_t n = distr(gen); + pgd_append_point(pg, slot, n, 0, 0, 1, 1, SN_DEFAULT_FLAGS, slot); + } + + switch (pgd_type(pg)) { + case PAGE_METRICS: + footprint = used_slots * sizeof(uint32_t); + break; + case PAGE_GORILLA_METRICS: + footprint = 2 * (128 * sizeof(uint32_t)); + break; + default: + fatal("Uknown page type: %uc", pgd_type(pg)); + } + EXPECT_EQ(pgd_disk_footprint(pg), footprint); + + pgd_free(pg); +} + +TEST(PGD, CopyToExtent) { + size_t slots = slots_for_page(1024 * 1024); + PGD *pg_collector = pgd_create(page_type, slots); + + uint32_t value = 666; + pgd_append_point(pg_collector, 0, value, 0, 0, 1, 0, SN_DEFAULT_FLAGS, 0); + + uint32_t size_in_bytes = pgd_disk_footprint(pg_collector); + EXPECT_EQ(size_in_bytes, 512); + + uint32_t size_in_words = size_in_bytes / sizeof(uint32_t); + alignas(sizeof(uintptr_t)) uint32_t disk_buffer[size_in_words]; + + for (size_t i = 0; i != size_in_words; i++) { + disk_buffer[i] = std::numeric_limits::max(); + } + + pgd_copy_to_extent(pg_collector, (uint8_t *) &disk_buffer[0], size_in_bytes); + + EXPECT_EQ(disk_buffer[0], NULL); + EXPECT_EQ(disk_buffer[1], NULL); + EXPECT_EQ(disk_buffer[2], 1); + EXPECT_EQ(disk_buffer[3], 32); + storage_number sn = pack_storage_number(value, SN_DEFAULT_FLAGS); + EXPECT_EQ(disk_buffer[4], sn); + + // make sure the rest of the page is 0'ed so that it's amenable to compression + for (size_t i = 5; i != size_in_words; i++) + EXPECT_EQ(disk_buffer[i], 0); + + pgd_free(pg_collector); +} + +TEST(PGD, Roundtrip) { + size_t slots = slots_for_page(1024 * 1024); + PGD *pg_collector = pgd_create(page_type, slots); + + for (size_t i = 0; i != slots; i++) + pgd_append_point(pg_collector, i, i, 0, 0, 1, 1, SN_DEFAULT_FLAGS, i); + + uint32_t size_in_bytes = pgd_disk_footprint(pg_collector); + uint32_t size_in_words = size_in_bytes / sizeof(uint32_t); + + alignas(sizeof(uintptr_t)) uint32_t disk_buffer[size_in_words]; + for (size_t i = 0; i != size_in_words; i++) + disk_buffer[i] = std::numeric_limits::max(); + + pgd_copy_to_extent(pg_collector, (uint8_t *) &disk_buffer[0], size_in_bytes); + + PGD *pg_disk = pgd_create_from_disk_data(page_type, &disk_buffer[0], size_in_bytes); + EXPECT_EQ(pgd_slots_used(pg_disk), slots); + + // Expected memory footprint is equal to the disk footprint + a couple + // bytes for the PGD metadata. + EXPECT_NEAR(pgd_memory_footprint(pg_disk), size_in_bytes, 128); + + // Do not allow calling disk footprint for pages created from disk. + EXPECT_DEATH(pgd_disk_footprint(pg_disk), ".*"); + + for (size_t i = 0; i != 10; i++) { + PGDC cursor_collector; + PGDC cursor_disk; + + pgdc_reset(&cursor_collector, pg_collector, i * 1024); + pgdc_reset(&cursor_disk, pg_disk, i * 1024); + + STORAGE_POINT sp_collector = {}; + STORAGE_POINT sp_disk = {}; + + for (size_t slot = i * 1024; slot != slots; slot++) { + EXPECT_TRUE(pgdc_get_next_point(&cursor_collector, slot, &sp_collector)); + EXPECT_TRUE(pgdc_get_next_point(&cursor_disk, slot, &sp_disk)); + + EXPECT_EQ(sp_collector, sp_disk); + } + + EXPECT_FALSE(pgdc_get_next_point(&cursor_collector, slots, &sp_collector)); + EXPECT_FALSE(pgdc_get_next_point(&cursor_disk, slots, &sp_disk)); + } + + pgd_free(pg_disk); + pgd_free(pg_collector); +} + +int pgd_test(int argc, char *argv[]) +{ + // Dummy/necessary initialization stuff + PGC *dummy_cache = pgc_create("pgd-tests-cache", 32 * 1024 * 1024, NULL, 64, NULL, NULL, + 10, 10, 1000, 10, PGC_OPTIONS_NONE, 1, 11); + pgd_init_arals(); + + ::testing::InitGoogleTest(&argc, argv); + int rc = RUN_ALL_TESTS(); + + pgc_destroy(dummy_cache); + + return rc; +} + +#else // HAVE_GTEST + +int pgd_test(int argc, char *argv[]) +{ + (void) argc; + (void) argv; + fprintf(stderr, "Can not run PGD tests because the agent was not build with support for google tests.\n"); + return 0; +} + +#endif // HAVE_GTEST diff --git a/database/engine/page_test.h b/database/engine/page_test.h new file mode 100644 index 00000000000000..30837f0ab73540 --- /dev/null +++ b/database/engine/page_test.h @@ -0,0 +1,14 @@ +#ifndef PAGE_TEST_H +#define PAGE_TEST_H + +#ifdef __cplusplus +extern "C" { +#endif + +int pgd_test(int argc, char *argv[]); + +#ifdef __cplusplus +} +#endif + +#endif /* PAGE_TEST_H */ diff --git a/database/engine/pagecache.c b/database/engine/pagecache.c index c608c327004467..dab9cdd0dd24ca 100644 --- a/database/engine/pagecache.c +++ b/database/engine/pagecache.c @@ -12,8 +12,9 @@ struct rrdeng_cache_efficiency_stats rrdeng_cache_efficiency_stats = {}; static void main_cache_free_clean_page_callback(PGC *cache __maybe_unused, PGC_ENTRY entry __maybe_unused) { // Release storage associated with the page - dbengine_page_free(entry.data, entry.size); + pgd_free(entry.data); } + static void main_cache_flush_dirty_page_init_callback(PGC *cache __maybe_unused, Word_t section) { struct rrdengine_instance *ctx = (struct rrdengine_instance *) section; @@ -28,8 +29,6 @@ static void main_cache_flush_dirty_page_callback(PGC *cache __maybe_unused, PGC_ struct rrdengine_instance *ctx = (struct rrdengine_instance *) entries_array[0].section; - size_t bytes_per_point = CTX_POINT_SIZE_BYTES(ctx); - struct page_descr_with_data *base = NULL; for (size_t Index = 0 ; Index < entries; Index++) { @@ -42,21 +41,15 @@ static void main_cache_flush_dirty_page_callback(PGC *cache __maybe_unused, PGC_ descr->start_time_ut = start_time_s * USEC_PER_SEC; descr->end_time_ut = end_time_s * USEC_PER_SEC; descr->update_every_s = entries_array[Index].update_every_s; - descr->type = ctx->config.page_type; - descr->page_length = (end_time_s - (start_time_s - descr->update_every_s)) / descr->update_every_s * bytes_per_point; + descr->pgd = pgc_page_data(pages_array[Index]); + descr->type = pgd_type(descr->pgd); + descr->page_length = pgd_disk_footprint(descr->pgd); - if(descr->page_length > entries_array[Index].size) { - descr->page_length = entries_array[Index].size; - - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, "DBENGINE: page exceeds the maximum size, adjusting it to max."); - } - - descr->page = pgc_page_data(pages_array[Index]); DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, descr, link.prev, link.next); - internal_fatal(descr->page_length > RRDENG_BLOCK_SIZE, "DBENGINE: faulty page length calculation"); + // TODO: ask @stelfrag/@ktsaou about this. + // internal_fatal(descr->page_length > RRDENG_BLOCK_SIZE, "DBENGINE: faulty page length calculation"); } struct completion completion; @@ -254,7 +247,6 @@ static size_t get_page_list_from_pgc(PGC *cache, METRIC *metric, struct rrdengin time_t page_start_time_s = pgc_page_start_time_s(page); time_t page_end_time_s = pgc_page_end_time_s(page); time_t page_update_every_s = pgc_page_update_every_s(page); - size_t page_length = pgc_page_data_size(cache, page); if(!page_update_every_s) page_update_every_s = dt_s; @@ -277,24 +269,10 @@ static size_t get_page_list_from_pgc(PGC *cache, METRIC *metric, struct rrdengin if (!PValue || PValue == PJERR) fatal("DBENGINE: corrupted judy array in %s()", __FUNCTION__ ); - if (unlikely(*PValue)) { - struct page_details *pd = *PValue; - UNUSED(pd); - -// internal_error( -// pd->first_time_s != page_first_time_s || -// pd->last_time_s != page_last_time_s || -// pd->update_every_s != page_update_every_s, -// "DBENGINE: duplicate page with different retention in %s cache " -// "1st: %ld to %ld, ue %u, size %u " -// "2nd: %ld to %ld, ue %ld size %zu " -// "- ignoring the second", -// cache == open_cache ? "open" : "main", -// pd->first_time_s, pd->last_time_s, pd->update_every_s, pd->page_length, -// page_first_time_s, page_last_time_s, page_update_every_s, page_length); - + if (unlikely(*PValue)) + // already exists in our list pgc_page_release(cache, page); - } + else { internal_fatal(pgc_page_metric(page) != metric_id, "Wrong metric id in page found in cache"); @@ -304,7 +282,6 @@ static size_t get_page_list_from_pgc(PGC *cache, METRIC *metric, struct rrdengin pd->metric_id = metric_id; pd->first_time_s = page_start_time_s; pd->last_time_s = page_end_time_s; - pd->page_length = page_length; pd->update_every_s = (uint32_t) page_update_every_s; pd->page = (open_cache_mode) ? NULL : page; pd->status |= tags; @@ -312,7 +289,7 @@ static size_t get_page_list_from_pgc(PGC *cache, METRIC *metric, struct rrdengin if((pd->page)) { pd->status |= PDC_PAGE_READY | PDC_PAGE_PRELOADED; - if(pgc_page_data(page) == DBENGINE_EMPTY_PAGE) + if(pgd_is_empty(pgc_page_data(page))) pd->status |= PDC_PAGE_EMPTY; } @@ -369,7 +346,7 @@ static void pgc_inject_gap(struct rrdengine_instance *ctx, METRIC *metric, time_ .end_time_s = MIN(end_time_s, db_last_time_s), .update_every_s = 0, .size = 0, - .data = DBENGINE_EMPTY_PAGE, + .data = PGD_EMPTY, }; if(page_entry.start_time_s >= page_entry.end_time_s) @@ -478,7 +455,7 @@ static size_t list_has_time_gaps( pd->status &= ~PDC_PAGE_DISK_PENDING; pd->status |= PDC_PAGE_READY | PDC_PAGE_PRELOADED | PDC_PAGE_PRELOADED_PASS4; - if(pgc_page_data(pd->page) == DBENGINE_EMPTY_PAGE) + if(pgd_is_empty(pgc_page_data(pd->page))) pd->status |= PDC_PAGE_EMPTY; } @@ -642,7 +619,6 @@ void add_page_details_from_journal_v2(PGC_PAGE *page, void *JudyL_pptr) { pd->first_time_s = pgc_page_start_time_s(page); pd->last_time_s = pgc_page_end_time_s(page); pd->datafile.ptr = datafile; - pd->page_length = ei->page_length; pd->update_every_s = (uint32_t) pgc_page_update_every_s(page); pd->metric_id = metric_id; pd->status |= PDC_PAGE_DISK_PENDING | PDC_PAGE_SOURCE_JOURNAL_V2 | PDC_PAGE_DATAFILE_ACQUIRED; @@ -917,7 +893,7 @@ struct pgc_page *pg_cache_lookup_next( } } - if(page && pgc_page_data(page) == DBENGINE_EMPTY_PAGE) + if(page && pgd_is_empty(pgc_page_data(page))) pdc_page_status_set(pd, PDC_PAGE_EMPTY); if(!page || pdc_page_status_check(pd, PDC_PAGE_QUERY_GLOBAL_SKIP_LIST | PDC_PAGE_EMPTY)) { @@ -930,7 +906,6 @@ struct pgc_page *pg_cache_lookup_next( time_t page_start_time_s = pgc_page_start_time_s(page); time_t page_end_time_s = pgc_page_end_time_s(page); time_t page_update_every_s = pgc_page_update_every_s(page); - size_t page_length = pgc_page_data_size(main_cache, page); if(unlikely(page_start_time_s == INVALID_TIME || page_end_time_s == INVALID_TIME)) { __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_zero_time_skipped, 1, __ATOMIC_RELAXED); @@ -939,13 +914,6 @@ struct pgc_page *pg_cache_lookup_next( pd->page = page = NULL; continue; } - else if(page_length > RRDENG_BLOCK_SIZE) { - __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_invalid_size_skipped, 1, __ATOMIC_RELAXED); - pgc_page_to_clean_evict_or_release(main_cache, page); - pdc_page_status_set(pd, PDC_PAGE_INVALID | PDC_PAGE_RELEASED); - pd->page = page = NULL; - continue; - } else { if (unlikely(page_update_every_s <= 0 || page_update_every_s > 86400)) { __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_invalid_update_every_fixed, 1, __ATOMIC_RELAXED); @@ -953,7 +921,7 @@ struct pgc_page *pg_cache_lookup_next( pd->update_every_s = (uint32_t) page_update_every_s; } - size_t entries_by_size = page_entries_by_size(page_length, CTX_POINT_SIZE_BYTES(ctx)); + size_t entries_by_size = pgd_slots_used(pgc_page_data(page)); size_t entries_by_time = page_entries_by_time(page_start_time_s, page_end_time_s, page_update_every_s); if(unlikely(entries_by_size < entries_by_time)) { time_t fixed_page_end_time_s = (time_t)(page_start_time_s + (entries_by_size - 1) * page_update_every_s); diff --git a/database/engine/pagecache.h b/database/engine/pagecache.h index 5242db89e54520..dbcbea53ae2a86 100644 --- a/database/engine/pagecache.h +++ b/database/engine/pagecache.h @@ -27,7 +27,7 @@ struct page_descr_with_data { uint8_t type; uint32_t update_every_s; uint32_t page_length; - uint8_t *page; + struct pgd *pgd; struct { struct page_descr_with_data *prev; diff --git a/database/engine/pdc.c b/database/engine/pdc.c index 7da56878740d72..5fe205e6450551 100644 --- a/database/engine/pdc.c +++ b/database/engine/pdc.c @@ -629,14 +629,33 @@ void collect_page_flags_to_buffer(BUFFER *wb, RRDENG_COLLECT_PAGE_FLAGS flags) { } inline VALIDATED_PAGE_DESCRIPTOR validate_extent_page_descr(const struct rrdeng_extent_page_descr *descr, time_t now_s, time_t overwrite_zero_update_every_s, bool have_read_error) { + time_t start_time_s = (time_t) (descr->start_time_ut / USEC_PER_SEC); + + time_t end_time_s; + size_t entries; + + switch (descr->type) { + case PAGE_METRICS: + case PAGE_TIER: + end_time_s = descr->end_time_ut / USEC_PER_SEC; + entries = 0; + break; + case PAGE_GORILLA_METRICS: + end_time_s = start_time_s + descr->gorilla.delta_time_s; + entries = descr->gorilla.entries; + break; + default: + fatal("Unknown page type: %uc\n", descr->type); + } + return validate_page( (uuid_t *)descr->uuid, - (time_t) (descr->start_time_ut / USEC_PER_SEC), - (time_t) (descr->end_time_ut / USEC_PER_SEC), + start_time_s, + end_time_s, 0, descr->page_length, descr->type, - 0, + entries, now_s, overwrite_zero_update_every_s, have_read_error, @@ -666,13 +685,25 @@ VALIDATED_PAGE_DESCRIPTOR validate_page( .is_valid = true, }; - // always calculate entries by size vd.point_size = page_type_size[vd.type]; - vd.entries = page_entries_by_size(vd.page_length, vd.point_size); - - // allow to be called without entries (when loading pages from disk) - if(!entries) - entries = vd.entries; + switch (page_type) { + case PAGE_METRICS: + case PAGE_TIER: + // always calculate entries by size + vd.entries = page_entries_by_size(vd.page_length, vd.point_size); + + // allow to be called without entries (when loading pages from disk) + if(!entries) + entries = vd.entries; + break; + case PAGE_GORILLA_METRICS: + internal_fatal(entries == 0, "0 number of entries found on gorilla page"); + vd.entries = entries; + break; + default: + // TODO: should set vd.is_valid false instead? + fatal("Unknown page type: %uc", page_type); + } // allow to be called without update every (when loading pages from disk) if(!update_every_s) { @@ -687,19 +718,26 @@ VALIDATED_PAGE_DESCRIPTOR validate_page( bool updated = false; + size_t max_page_length = RRDENG_BLOCK_SIZE; + + // If gorilla can not compress the data we might end up needing slightly more + // than 4KiB. However, gorilla pages extend the page length by increments of + // 512 bytes. + max_page_length += ((page_type == PAGE_GORILLA_METRICS) * GORILLA_BUFFER_SIZE); + if( have_read_error || vd.page_length == 0 || - vd.page_length > RRDENG_BLOCK_SIZE || + vd.page_length > max_page_length || vd.start_time_s > vd.end_time_s || (now_s && vd.end_time_s > now_s) || vd.start_time_s <= 0 || vd.end_time_s <= 0 || vd.update_every_s < 0 || (vd.start_time_s == vd.end_time_s && vd.entries > 1) || - (vd.update_every_s == 0 && vd.entries > 1) - ) + (vd.update_every_s == 0 && vd.entries > 1)) + { vd.is_valid = false; - + } else { if(unlikely(vd.entries != entries || vd.update_every_s != update_every_s)) updated = true; @@ -734,7 +772,7 @@ VALIDATED_PAGE_DESCRIPTOR validate_page( if(unlikely(!vd.is_valid || updated)) { #ifndef NETDATA_INTERNAL_CHECKS - error_limit_static_global_var(erl, 1, 0); + nd_log_limit_static_global_var(erl, 1, 0); #endif char uuid_str[UUID_STR_LEN + 1]; uuid_unparse(*uuid, uuid_str); @@ -750,7 +788,7 @@ VALIDATED_PAGE_DESCRIPTOR validate_page( #ifdef NETDATA_INTERNAL_CHECKS internal_error(true, #else - error_limit(&erl, + nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR, #endif "DBENGINE: metric '%s' %s invalid page of type %u " "from %ld to %ld (now %ld), update every %ld, page length %zu, entries %zu (flags: %s)", @@ -770,7 +808,7 @@ VALIDATED_PAGE_DESCRIPTOR validate_page( #ifdef NETDATA_INTERNAL_CHECKS internal_error(true, #else - error_limit(&erl, + nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR, #endif "DBENGINE: metric '%s' %s page of type %u " "from %ld to %ld (now %ld), update every %ld, page length %zu, entries %zu (flags: %s), " @@ -832,7 +870,15 @@ static void epdl_extent_loading_error_log(struct rrdengine_instance *ctx, EPDL * if (descr) { start_time_s = (time_t)(descr->start_time_ut / USEC_PER_SEC); - end_time_s = (time_t)(descr->end_time_ut / USEC_PER_SEC); + switch (descr->type) { + case PAGE_METRICS: + case PAGE_TIER: + end_time_s = (time_t)(descr->end_time_ut / USEC_PER_SEC); + break; + case PAGE_GORILLA_METRICS: + end_time_s = (time_t) start_time_s + (descr->gorilla.delta_time_s); + break; + } uuid_unparse_lower(descr->uuid, uuid); used_descr = true; } @@ -869,8 +915,8 @@ static void epdl_extent_loading_error_log(struct rrdengine_instance *ctx, EPDL * if(end_time_s) log_date(end_time_str, LOG_DATE_LENGTH, end_time_s); - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR, "DBENGINE: error while reading extent from datafile %u of tier %d, at offset %" PRIu64 " (%u bytes) " "%s from %ld (%s) to %ld (%s) %s%s: " "%s", @@ -952,7 +998,9 @@ static bool epdl_populate_pages_from_extent_data( uncompressed_payload_length = 0; for (i = 0; i < count; ++i) { size_t page_length = header->descr[i].page_length; - if(page_length > RRDENG_BLOCK_SIZE) { + if (page_length > RRDENG_BLOCK_SIZE && (header->descr[i].type != PAGE_GORILLA_METRICS || + (header->descr[i].type == PAGE_GORILLA_METRICS && + (page_length - RRDENG_BLOCK_SIZE) % GORILLA_BUFFER_SIZE))) { have_read_error = true; break; } @@ -993,7 +1041,7 @@ static bool epdl_populate_pages_from_extent_data( if(!page_length || !start_time_s) { char log[200 + 1]; - snprintfz(log, 200, "page %u (out of %u) is EMPTY", i, count); + snprintfz(log, sizeof(log) - 1, "page %u (out of %u) is EMPTY", i, count); epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log); continue; } @@ -1002,7 +1050,7 @@ static bool epdl_populate_pages_from_extent_data( Word_t metric_id = (Word_t)metric; if(!metric) { char log[200 + 1]; - snprintfz(log, 200, "page %u (out of %u) has unknown UUID", i, count); + snprintfz(log, sizeof(log) - 1, "page %u (out of %u) has unknown UUID", i, count); epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log); continue; } @@ -1020,32 +1068,34 @@ static bool epdl_populate_pages_from_extent_data( if(worker) worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION); - void *page_data; + PGD *pgd; if (unlikely(!vd.is_valid)) { - page_data = DBENGINE_EMPTY_PAGE; + pgd = PGD_EMPTY; stats_load_invalid_page++; } else { if (RRD_NO_COMPRESSION == header->compression_algorithm) { - page_data = dbengine_page_alloc(vd.page_length); - memcpy(page_data, data + payload_offset + page_offset, (size_t) vd.page_length); + pgd = pgd_create_from_disk_data(header->descr[i].type, + data + payload_offset + page_offset, + vd.page_length); stats_load_uncompressed++; } else { if (unlikely(page_offset + vd.page_length > uncompressed_payload_length)) { char log[200 + 1]; - snprintfz(log, 200, "page %u (out of %u) offset %u + page length %zu, " + snprintfz(log, sizeof(log) - 1, "page %u (out of %u) offset %u + page length %zu, " "exceeds the uncompressed buffer size %u", i, count, page_offset, vd.page_length, uncompressed_payload_length); epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log); - page_data = DBENGINE_EMPTY_PAGE; + pgd = PGD_EMPTY; stats_load_invalid_page++; } else { - page_data = dbengine_page_alloc(vd.page_length); - memcpy(page_data, uncompressed_buf + page_offset, vd.page_length); + pgd = pgd_create_from_disk_data(header->descr[i].type, + uncompressed_buf + page_offset, + vd.page_length); stats_load_compressed++; } } @@ -1061,14 +1111,14 @@ static bool epdl_populate_pages_from_extent_data( .start_time_s = vd.start_time_s, .end_time_s = vd.end_time_s, .update_every_s = (uint32_t) vd.update_every_s, - .size = (size_t) ((page_data == DBENGINE_EMPTY_PAGE) ? 0 : vd.page_length), - .data = page_data + .size = pgd_memory_footprint(pgd), // the footprint of the entire PGD, for accurate memory management + .data = pgd, }; bool added = true; PGC_PAGE *page = pgc_page_add_and_acquire(main_cache, page_entry, &added); if (false == added) { - dbengine_page_free(page_data, vd.page_length); + pgd_free(pgd); stats_cache_hit_while_inserting++; stats_data_from_main_cache++; } @@ -1081,8 +1131,7 @@ static bool epdl_populate_pages_from_extent_data( pgc_page_dup(main_cache, page); pd->page = page; - pd->page_length = pgc_page_data_size(main_cache, page); - pdc_page_status_set(pd, PDC_PAGE_READY | tags | ((page_data == DBENGINE_EMPTY_PAGE) ? PDC_PAGE_EMPTY : 0)); + pdc_page_status_set(pd, PDC_PAGE_READY | tags | (pgd_is_empty(pgd) ? PDC_PAGE_EMPTY : 0)); pd = pd->load.next; } while(pd); diff --git a/database/engine/rrddiskprotocol.h b/database/engine/rrddiskprotocol.h index 5b4be9498b79fa..86b41f0b3e4050 100644 --- a/database/engine/rrddiskprotocol.h +++ b/database/engine/rrddiskprotocol.h @@ -3,6 +3,8 @@ #ifndef NETDATA_RRDDISKPROTOCOL_H #define NETDATA_RRDDISKPROTOCOL_H +#include + #define RRDENG_BLOCK_SIZE (4096) #define RRDFILE_ALIGNMENT RRDENG_BLOCK_SIZE @@ -36,7 +38,8 @@ struct rrdeng_df_sb { */ #define PAGE_METRICS (0) #define PAGE_TIER (1) -#define PAGE_TYPE_MAX 1 // Maximum page type (inclusive) +#define PAGE_GORILLA_METRICS (2) +#define PAGE_TYPE_MAX 2 // Maximum page type (inclusive) /* * Data file page descriptor @@ -47,7 +50,14 @@ struct rrdeng_extent_page_descr { uint8_t uuid[UUID_SZ]; uint32_t page_length; uint64_t start_time_ut; - uint64_t end_time_ut; + union { + struct { + uint32_t entries; + uint32_t delta_time_s; + } gorilla __attribute__((packed)); + + uint64_t end_time_ut; + }; } __attribute__ ((packed)); /* diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c index ce363183d3bef3..3ae270252b0f94 100644 --- a/database/engine/rrdengine.c +++ b/database/engine/rrdengine.c @@ -575,55 +575,6 @@ static inline struct rrdeng_cmd rrdeng_deq_cmd(bool from_worker) { } -// ---------------------------------------------------------------------------- - -struct { - ARAL *aral[RRD_STORAGE_TIERS]; -} dbengine_page_alloc_globals = {}; - -static inline ARAL *page_size_lookup(size_t size) { - for(size_t tier = 0; tier < storage_tiers ;tier++) - if(size == tier_page_size[tier]) - return dbengine_page_alloc_globals.aral[tier]; - - return NULL; -} - -static void dbengine_page_alloc_init(void) { - for(size_t i = storage_tiers; i > 0 ;i--) { - size_t tier = storage_tiers - i; - - char buf[20 + 1]; - snprintfz(buf, 20, "tier%zu-pages", tier); - - dbengine_page_alloc_globals.aral[tier] = aral_create( - buf, - tier_page_size[tier], - 64, - 512 * tier_page_size[tier], - pgc_aral_statistics(), - NULL, NULL, false, false); - } -} - -void *dbengine_page_alloc(size_t size) { - ARAL *ar = page_size_lookup(size); - if(ar) return aral_mallocz(ar); - - return mallocz(size); -} - -void dbengine_page_free(void *page, size_t size __maybe_unused) { - if(unlikely(!page || page == DBENGINE_EMPTY_PAGE)) - return; - - ARAL *ar = page_size_lookup(size); - if(ar) - aral_freez(ar, page); - else - freez(page); -} - // ---------------------------------------------------------------------------- void *dbengine_extent_alloc(size_t size) { @@ -890,12 +841,25 @@ static struct extent_io_descriptor *datafile_extent_build(struct rrdengine_insta uuid_copy(*(uuid_t *)header->descr[i].uuid, *descr->id); header->descr[i].page_length = descr->page_length; header->descr[i].start_time_ut = descr->start_time_ut; - header->descr[i].end_time_ut = descr->end_time_ut; + + switch (descr->type) { + case PAGE_METRICS: + case PAGE_TIER: + header->descr[i].end_time_ut = descr->end_time_ut; + break; + case PAGE_GORILLA_METRICS: + header->descr[i].gorilla.delta_time_s = (uint32_t) ((descr->end_time_ut - descr->start_time_ut) / USEC_PER_SEC); + header->descr[i].gorilla.entries = pgd_slots_used(descr->pgd); + break; + default: + fatal("Unknown page type: %uc", descr->type); + } + pos += sizeof(header->descr[i]); } for (i = 0 ; i < count ; ++i) { descr = xt_io_descr->descr_array[i]; - (void) memcpy(xt_io_descr->buf + pos, descr->page, descr->page_length); + pgd_copy_to_extent(descr->pgd, xt_io_descr->buf + pos, descr->page_length); pos += descr->page_length; } @@ -1381,9 +1345,6 @@ static void after_ctx_shutdown(struct rrdengine_instance *ctx __maybe_unused, vo static void *ctx_shutdown_tp_worker(struct rrdengine_instance *ctx __maybe_unused, void *data __maybe_unused, struct completion *completion __maybe_unused, uv_work_t *uv_work_req __maybe_unused) { worker_is_busy(UV_EVENT_DBENGINE_SHUTDOWN); - completion_wait_for(&ctx->quiesce.completion); - completion_destroy(&ctx->quiesce.completion); - bool logged = false; while(__atomic_load_n(&ctx->atomic.extents_currently_being_flushed, __ATOMIC_RELAXED) || __atomic_load_n(&ctx->atomic.inflight_queries, __ATOMIC_RELAXED)) { @@ -1427,8 +1388,8 @@ static void *query_prep_tp_worker(struct rrdengine_instance *ctx __maybe_unused, return data; } -unsigned rrdeng_target_data_file_size(struct rrdengine_instance *ctx) { - unsigned target_size = ctx->config.max_disk_space / TARGET_DATAFILES; +uint64_t rrdeng_target_data_file_size(struct rrdengine_instance *ctx) { + uint64_t target_size = ctx->config.max_disk_space / TARGET_DATAFILES; target_size = MIN(target_size, MAX_DATAFILE_SIZE); target_size = MAX(target_size, MIN_DATAFILE_SIZE); return target_size; @@ -1514,12 +1475,19 @@ static void *journal_v2_indexing_tp_worker(struct rrdengine_instance *ctx __mayb spinlock_unlock(&datafile->writers.spinlock); if(!available) { - netdata_log_info("DBENGINE: journal file %u needs to be indexed, but it has writers working on it - skipping it for now", datafile->fileno); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "DBENGINE: journal file %u needs to be indexed, but it has writers working on it - " + "skipping it for now", + datafile->fileno); + datafile = datafile->next; continue; } - netdata_log_info("DBENGINE: journal file %u is ready to be indexed", datafile->fileno); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "DBENGINE: journal file %u is ready to be indexed", + datafile->fileno); + pgc_open_cache_to_journal_v2(open_cache, (Word_t) ctx, (int) datafile->fileno, ctx->config.page_type, journalfile_migrate_to_v2_callback, (void *) datafile->journalfile); @@ -1532,7 +1500,10 @@ static void *journal_v2_indexing_tp_worker(struct rrdengine_instance *ctx __mayb } errno = 0; - internal_error(count, "DBENGINE: journal indexing done; %u files processed", count); + if(count) + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "DBENGINE: journal indexing done; %u files processed", + count); worker_is_idle(); @@ -1628,7 +1599,7 @@ static void dbengine_initialize_structures(void) { rrdeng_query_handle_init(); page_descriptors_init(); extent_buffer_init(); - dbengine_page_alloc_init(); + pgd_init_arals(); extent_io_descriptor_init(); } diff --git a/database/engine/rrdengine.h b/database/engine/rrdengine.h index b5476930aeafa3..73f6fab76fc7da 100644 --- a/database/engine/rrdengine.h +++ b/database/engine/rrdengine.h @@ -22,6 +22,7 @@ #include "metric.h" #include "cache.h" #include "pdc.h" +#include "page.h" extern unsigned rrdeng_pages_per_extent; @@ -119,7 +120,6 @@ struct page_details { time_t first_time_s; time_t last_time_s; uint32_t update_every_s; - uint16_t page_length; PDC_PAGE_STATUS status; struct { @@ -190,10 +190,11 @@ struct rrdeng_collect_handle { RRDENG_COLLECT_HANDLE_OPTIONS options; uint8_t type; + struct rrdengine_instance *ctx; struct metric *metric; - struct pgc_page *page; - void *data; - size_t data_size; + struct pgc_page *pgc_page; + struct pgd *page_data; + size_t page_data_size; struct pg_alignment *alignment; uint32_t page_entries_max; uint32_t page_position; // keep track of the current page size, to make sure we don't exceed it @@ -206,7 +207,7 @@ struct rrdeng_query_handle { struct metric *metric; struct pgc_page *page; struct rrdengine_instance *ctx; - storage_number *metric_data; + struct pgd_cursor pgdc; struct page_details_control *pdc; // the request @@ -445,9 +446,6 @@ static inline void ctx_last_flush_fileno_set(struct rrdengine_instance *ctx, uns #define ctx_is_available_for_queries(ctx) (__atomic_load_n(&(ctx)->quiesce.enabled, __ATOMIC_RELAXED) == false && __atomic_load_n(&(ctx)->quiesce.exit_mode, __ATOMIC_RELAXED) == false) -void *dbengine_page_alloc(size_t size); -void dbengine_page_free(void *page, size_t size); - void *dbengine_extent_alloc(size_t size); void dbengine_extent_free(void *extent, size_t size); @@ -476,7 +474,7 @@ void pdc_route_synchronously(struct rrdengine_instance *ctx, struct page_details void pdc_acquire(PDC *pdc); bool pdc_release_and_destroy_if_unreferenced(PDC *pdc, bool worker, bool router); -unsigned rrdeng_target_data_file_size(struct rrdengine_instance *ctx); +uint64_t rrdeng_target_data_file_size(struct rrdengine_instance *ctx); struct page_descr_with_data *page_descriptor_get(void); @@ -491,8 +489,6 @@ typedef struct validated_page_descriptor { bool is_valid; } VALIDATED_PAGE_DESCRIPTOR; -#define DBENGINE_EMPTY_PAGE (void *)(-1) - #define page_entries_by_time(start_time_s, end_time_s, update_every_s) \ ((update_every_s) ? (((end_time_s) - ((start_time_s) - (update_every_s))) / (update_every_s)) : 1) diff --git a/database/engine/rrdengineapi.c b/database/engine/rrdengineapi.c index c6b1fa2dde4944..1ddce5243869b5 100755 --- a/database/engine/rrdengineapi.c +++ b/database/engine/rrdengineapi.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later + +#include "database/engine/rrddiskprotocol.h" #include "rrdengine.h" /* Default global database instance */ @@ -22,10 +24,15 @@ size_t tier_page_size[RRD_STORAGE_TIERS] = {2048, 1024, 192, 192, 192}; size_t tier_page_size[RRD_STORAGE_TIERS] = {4096, 2048, 384, 384, 384}; #endif -#if PAGE_TYPE_MAX != 1 -#error PAGE_TYPE_MAX is not 1 - you need to add allocations here +#if PAGE_TYPE_MAX != 2 +#error PAGE_TYPE_MAX is not 2 - you need to add allocations here #endif -size_t page_type_size[256] = {sizeof(storage_number), sizeof(storage_number_tier1_t)}; + +size_t page_type_size[256] = { + [PAGE_METRICS] = sizeof(storage_number), + [PAGE_TIER] = sizeof(storage_number_tier1_t), + [PAGE_GORILLA_METRICS] = sizeof(storage_number) +}; __attribute__((constructor)) void initialize_multidb_ctx(void) { multidb_ctx[0] = &multidb_ctx_storage_tier0; @@ -198,15 +205,15 @@ static inline void check_and_fix_mrg_update_every(struct rrdeng_collect_handle * static inline bool check_completed_page_consistency(struct rrdeng_collect_handle *handle __maybe_unused) { #ifdef NETDATA_INTERNAL_CHECKS - if (unlikely(!handle->page || !handle->page_entries_max || !handle->page_position || !handle->page_end_time_ut)) + if (unlikely(!handle->pgc_page || !handle->page_entries_max || !handle->page_position || !handle->page_end_time_ut)) return false; struct rrdengine_instance *ctx = mrg_metric_ctx(handle->metric); uuid_t *uuid = mrg_metric_uuid(main_mrg, handle->metric); - time_t start_time_s = pgc_page_start_time_s(handle->page); - time_t end_time_s = pgc_page_end_time_s(handle->page); - time_t update_every_s = pgc_page_update_every_s(handle->page); + time_t start_time_s = pgc_page_start_time_s(handle->pgc_page); + time_t end_time_s = pgc_page_end_time_s(handle->pgc_page); + time_t update_every_s = pgc_page_update_every_s(handle->pgc_page); size_t page_length = handle->page_position * CTX_POINT_SIZE_BYTES(ctx); size_t entries = handle->page_position; time_t overwrite_zero_update_every_s = (time_t)(handle->update_every_ut / USEC_PER_SEC); @@ -257,9 +264,11 @@ STORAGE_COLLECT_HANDLE *rrdeng_store_metric_init(STORAGE_METRIC_HANDLE *db_metri handle = callocz(1, sizeof(struct rrdeng_collect_handle)); handle->common.backend = STORAGE_ENGINE_BACKEND_DBENGINE; handle->metric = metric; - handle->page = NULL; - handle->data = NULL; - handle->data_size = 0; + + handle->pgc_page = NULL; + handle->page_data = NULL; + handle->page_data_size = 0; + handle->page_position = 0; handle->page_entries_max = 0; handle->update_every_ut = (usec_t)update_every * USEC_PER_SEC; @@ -286,65 +295,29 @@ STORAGE_COLLECT_HANDLE *rrdeng_store_metric_init(STORAGE_METRIC_HANDLE *db_metri return (STORAGE_COLLECT_HANDLE *)handle; } -/* The page must be populated and referenced */ -static bool page_has_only_empty_metrics(struct rrdeng_collect_handle *handle) { - switch(handle->type) { - case PAGE_METRICS: { - size_t slots = handle->page_position; - storage_number *array = (storage_number *)pgc_page_data(handle->page); - for (size_t i = 0 ; i < slots; ++i) { - if(does_storage_number_exist(array[i])) - return false; - } - } - break; - - case PAGE_TIER: { - size_t slots = handle->page_position; - storage_number_tier1_t *array = (storage_number_tier1_t *)pgc_page_data(handle->page); - for (size_t i = 0 ; i < slots; ++i) { - if(fpclassify(array[i].sum_value) != FP_NAN) - return false; - } - } - break; - - default: { - static bool logged = false; - if(!logged) { - netdata_log_error("DBENGINE: cannot check page for nulls on unknown page type id %d", (mrg_metric_ctx(handle->metric))->config.page_type); - logged = true; - } - return false; - } - } - - return true; -} - void rrdeng_store_metric_flush_current_page(STORAGE_COLLECT_HANDLE *collection_handle) { struct rrdeng_collect_handle *handle = (struct rrdeng_collect_handle *)collection_handle; - if (unlikely(!handle->page)) + if (unlikely(!handle->pgc_page)) return; - if(!handle->page_position || page_has_only_empty_metrics(handle)) - pgc_page_to_clean_evict_or_release(main_cache, handle->page); + if(pgd_is_empty(handle->page_data)) + pgc_page_to_clean_evict_or_release(main_cache, handle->pgc_page); else { check_completed_page_consistency(handle); - mrg_metric_set_clean_latest_time_s(main_mrg, handle->metric, pgc_page_end_time_s(handle->page)); - pgc_page_hot_to_dirty_and_release(main_cache, handle->page); + mrg_metric_set_clean_latest_time_s(main_mrg, handle->metric, pgc_page_end_time_s(handle->pgc_page)); + pgc_page_hot_to_dirty_and_release(main_cache, handle->pgc_page); } mrg_metric_set_hot_latest_time_s(main_mrg, handle->metric, 0); - handle->page = NULL; + handle->pgc_page = NULL; handle->page_flags = 0; handle->page_position = 0; handle->page_entries_max = 0; - handle->data = NULL; - handle->data_size = 0; + handle->page_data = NULL; + handle->page_data_size = 0; // important! // we should never zero page end time ut, because this will allow @@ -358,10 +331,10 @@ void rrdeng_store_metric_flush_current_page(STORAGE_COLLECT_HANDLE *collection_h } static void rrdeng_store_metric_create_new_page(struct rrdeng_collect_handle *handle, - struct rrdengine_instance *ctx, - usec_t point_in_time_ut, - void *data, - size_t data_size) { + struct rrdengine_instance *ctx, + usec_t point_in_time_ut, + PGD *data, + size_t data_size) { time_t point_in_time_s = (time_t)(point_in_time_ut / USEC_PER_SEC); const time_t update_every_s = (time_t)(handle->update_every_ut / USEC_PER_SEC); @@ -378,7 +351,7 @@ static void rrdeng_store_metric_create_new_page(struct rrdeng_collect_handle *ha size_t conflicts = 0; bool added = true; - PGC_PAGE *page = pgc_page_add_and_acquire(main_cache, page_entry, &added); + PGC_PAGE *pgc_page = pgc_page_add_and_acquire(main_cache, page_entry, &added); while (unlikely(!added)) { conflicts++; @@ -388,33 +361,33 @@ static void rrdeng_store_metric_create_new_page(struct rrdeng_collect_handle *ha #ifdef NETDATA_INTERNAL_CHECKS internal_error(true, #else - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING, #endif - "DBENGINE: metric '%s' new page from %ld to %ld, update every %ld, has a conflict in main cache " - "with existing %s%s page from %ld to %ld, update every %ld - " - "is it collected more than once?", - uuid, - page_entry.start_time_s, page_entry.end_time_s, (time_t)page_entry.update_every_s, - pgc_is_page_hot(page) ? "hot" : "not-hot", - pgc_page_data(page) == DBENGINE_EMPTY_PAGE ? " gap" : "", - pgc_page_start_time_s(page), pgc_page_end_time_s(page), pgc_page_update_every_s(page) + "DBENGINE: metric '%s' new page from %ld to %ld, update every %ld, has a conflict in main cache " + "with existing %s%s page from %ld to %ld, update every %ld - " + "is it collected more than once?", + uuid, + page_entry.start_time_s, page_entry.end_time_s, (time_t)page_entry.update_every_s, + pgc_is_page_hot(pgc_page) ? "hot" : "not-hot", + pgc_page_data(pgc_page) == PGD_EMPTY ? " gap" : "", + pgc_page_start_time_s(pgc_page), pgc_page_end_time_s(pgc_page), pgc_page_update_every_s(pgc_page) ); - pgc_page_release(main_cache, page); + pgc_page_release(main_cache, pgc_page); point_in_time_ut -= handle->update_every_ut; point_in_time_s = (time_t)(point_in_time_ut / USEC_PER_SEC); page_entry.start_time_s = point_in_time_s; page_entry.end_time_s = point_in_time_s; - page = pgc_page_add_and_acquire(main_cache, page_entry, &added); + pgc_page = pgc_page_add_and_acquire(main_cache, page_entry, &added); } handle->page_entries_max = data_size / CTX_POINT_SIZE_BYTES(ctx); handle->page_start_time_ut = point_in_time_ut; handle->page_end_time_ut = point_in_time_ut; handle->page_position = 1; // zero is already in our data - handle->page = page; + handle->pgc_page = pgc_page; handle->page_flags = conflicts? RRDENG_PAGE_CONFLICT : 0; if(point_in_time_s > max_acceptable_collected_time()) @@ -441,9 +414,11 @@ static size_t aligned_allocation_entries(size_t max_slots, size_t target_slot, t return slots; } -static void *rrdeng_alloc_new_metric_data(struct rrdeng_collect_handle *handle, size_t *data_size, usec_t point_in_time_ut) { +static PGD *rrdeng_alloc_new_page_data(struct rrdeng_collect_handle *handle, size_t *data_size, usec_t point_in_time_ut) { struct rrdengine_instance *ctx = mrg_metric_ctx(handle->metric); + PGD *d = NULL; + size_t max_size = tier_page_size[ctx->config.tier]; size_t max_slots = max_size / CTX_POINT_SIZE_BYTES(ctx); @@ -467,10 +442,22 @@ static void *rrdeng_alloc_new_metric_data(struct rrdeng_collect_handle *handle, internal_fatal(size > tier_page_size[ctx->config.tier] || size < CTX_POINT_SIZE_BYTES(ctx) * 2, "ooops! wrong page size"); *data_size = size; - void *d = dbengine_page_alloc(size); - timing_step(TIMING_STEP_DBENGINE_PAGE_ALLOC); + switch (ctx->config.page_type) { + case PAGE_METRICS: + case PAGE_TIER: + d = pgd_create(ctx->config.page_type, slots); + break; + case PAGE_GORILLA_METRICS: + // ignore slots, and use the fixed number of slots per gorilla buffer. + // gorilla will automatically add more buffers if needed. + d = pgd_create(ctx->config.page_type, GORILLA_BUFFER_SLOTS); + break; + default: + fatal("Unknown page type: %uc\n", ctx->config.page_type); + } + timing_step(TIMING_STEP_DBENGINE_PAGE_ALLOC); return d; } @@ -486,37 +473,25 @@ static void rrdeng_store_metric_append_point(STORAGE_COLLECT_HANDLE *collection_ struct rrdeng_collect_handle *handle = (struct rrdeng_collect_handle *)collection_handle; struct rrdengine_instance *ctx = mrg_metric_ctx(handle->metric); - if(unlikely(!handle->data)) - handle->data = rrdeng_alloc_new_metric_data(handle, &handle->data_size, point_in_time_ut); + if(unlikely(!handle->page_data)) + handle->page_data = rrdeng_alloc_new_page_data(handle, &handle->page_data_size, point_in_time_ut); timing_step(TIMING_STEP_DBENGINE_CHECK_DATA); - if(likely(ctx->config.page_type == PAGE_METRICS)) { - storage_number *tier0_metric_data = handle->data; - tier0_metric_data[handle->page_position] = pack_storage_number(n, flags); - } - else if(likely(ctx->config.page_type == PAGE_TIER)) { - storage_number_tier1_t *tier12_metric_data = handle->data; - storage_number_tier1_t number_tier1; - number_tier1.sum_value = (float) n; - number_tier1.min_value = (float) min_value; - number_tier1.max_value = (float) max_value; - number_tier1.anomaly_count = anomaly_count; - number_tier1.count = count; - tier12_metric_data[handle->page_position] = number_tier1; - } - else - fatal("DBENGINE: cannot store metric on unknown page type id %d", ctx->config.page_type); + pgd_append_point(handle->page_data, + point_in_time_ut, + n, min_value, max_value, count, anomaly_count, flags, + handle->page_position); timing_step(TIMING_STEP_DBENGINE_PACK); - if(unlikely(!handle->page)){ - rrdeng_store_metric_create_new_page(handle, ctx, point_in_time_ut, handle->data, handle->data_size); + if(unlikely(!handle->pgc_page)) { + rrdeng_store_metric_create_new_page(handle, ctx, point_in_time_ut, handle->page_data, handle->page_data_size); // handle->position is set to 1 already } else { // update an existing page - pgc_page_hot_set_end_time_s(main_cache, handle->page, (time_t) (point_in_time_ut / USEC_PER_SEC)); + pgc_page_hot_set_end_time_s(main_cache, handle->pgc_page, (time_t) (point_in_time_ut / USEC_PER_SEC)); handle->page_end_time_ut = point_in_time_ut; if(unlikely(++handle->page_position >= handle->page_entries_max)) { @@ -534,19 +509,20 @@ static void rrdeng_store_metric_append_point(STORAGE_COLLECT_HANDLE *collection_ timing_step(TIMING_STEP_DBENGINE_MRG_UPDATE); } -static void store_metric_next_error_log(struct rrdeng_collect_handle *handle, usec_t point_in_time_ut, const char *msg) { +static void store_metric_next_error_log(struct rrdeng_collect_handle *handle __maybe_unused, usec_t point_in_time_ut __maybe_unused, const char *msg __maybe_unused) { +#ifdef NETDATA_INTERNAL_CHECKS time_t point_in_time_s = (time_t)(point_in_time_ut / USEC_PER_SEC); char uuid[UUID_STR_LEN + 1]; uuid_unparse(*mrg_metric_uuid(main_mrg, handle->metric), uuid); BUFFER *wb = NULL; - if(handle->page && handle->page_flags) { + if(handle->pgc_page && handle->page_flags) { wb = buffer_create(0, NULL); collect_page_flags_to_buffer(wb, handle->page_flags); } - error_limit_static_global_var(erl, 1, 0); - error_limit(&erl, + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_DAEMON, NDLP_NOTICE, "DBENGINE: metric '%s' collected point at %ld, %s last collection at %ld, " "update every %ld, %s page from %ld to %ld, position %u (of %u), flags: %s", uuid, @@ -554,14 +530,17 @@ static void store_metric_next_error_log(struct rrdeng_collect_handle *handle, us msg, (time_t)(handle->page_end_time_ut / USEC_PER_SEC), (time_t)(handle->update_every_ut / USEC_PER_SEC), - handle->page ? "current" : "*LAST*", + handle->pgc_page ? "current" : "*LAST*", (time_t)(handle->page_start_time_ut / USEC_PER_SEC), (time_t)(handle->page_end_time_ut / USEC_PER_SEC), handle->page_position, handle->page_entries_max, wb ? buffer_tostring(wb) : "" - ); + ); buffer_free(wb); +#else + ; +#endif } void rrdeng_store_metric_next(STORAGE_COLLECT_HANDLE *collection_handle, @@ -589,7 +568,7 @@ void rrdeng_store_metric_next(STORAGE_COLLECT_HANDLE *collection_handle, ; } else if(unlikely(point_in_time_ut > handle->page_end_time_ut)) { - if(handle->page) { + if(handle->pgc_page) { if (unlikely(delta_ut < handle->update_every_ut)) { handle->page_flags |= RRDENG_PAGE_STEP_TOO_SMALL; rrdeng_store_metric_flush_current_page(collection_handle); @@ -797,23 +776,26 @@ void rrdeng_load_metric_init(STORAGE_METRIC_HANDLE *db_metric_handle, static bool rrdeng_load_page_next(struct storage_engine_query_handle *rrddim_handle, bool debug_this __maybe_unused) { struct rrdeng_query_handle *handle = (struct rrdeng_query_handle *)rrddim_handle->handle; - struct rrdengine_instance *ctx = handle->ctx; + struct rrdengine_instance *ctx = mrg_metric_ctx(handle->metric); if (likely(handle->page)) { // we have a page to release pgc_page_release(main_cache, handle->page); handle->page = NULL; + pgdc_reset(&handle->pgdc, NULL, UINT32_MAX); } if (unlikely(handle->now_s > rrddim_handle->end_time_s)) return false; - size_t entries; + size_t entries = 0; handle->page = pg_cache_lookup_next(ctx, handle->pdc, handle->now_s, handle->dt_s, &entries); - if (unlikely(!handle->page)) - return false; - internal_fatal(pgc_page_data(handle->page) == DBENGINE_EMPTY_PAGE, "Empty page returned"); + internal_fatal(handle->page && (pgc_page_data(handle->page) == PGD_EMPTY || !entries), + "A page was returned, but it is empty - pg_cache_lookup_next() should be handling this case"); + + if (unlikely(!handle->page || pgc_page_data(handle->page) == PGD_EMPTY || !entries)) + return false; time_t page_start_time_s = pgc_page_start_time_s(handle->page); time_t page_end_time_s = pgc_page_end_time_s(handle->page); @@ -853,8 +835,10 @@ static bool rrdeng_load_page_next(struct storage_engine_query_handle *rrddim_han handle->entries = entries; handle->position = position; - handle->metric_data = pgc_page_data((PGC_PAGE *)handle->page); handle->dt_s = page_update_every_s; + + pgdc_reset(&handle->pgdc, pgc_page_data(handle->page), handle->position); + return true; } @@ -883,38 +867,7 @@ STORAGE_POINT rrdeng_load_metric_next(struct storage_engine_query_handle *rrddim sp.start_time_s = handle->now_s - handle->dt_s; sp.end_time_s = handle->now_s; - switch(handle->ctx->config.page_type) { - case PAGE_METRICS: { - storage_number n = handle->metric_data[handle->position]; - sp.min = sp.max = sp.sum = unpack_storage_number(n); - sp.flags = n & SN_USER_FLAGS; - sp.count = 1; - sp.anomaly_count = is_storage_number_anomalous(n) ? 1 : 0; - } - break; - - case PAGE_TIER: { - storage_number_tier1_t tier1_value = ((storage_number_tier1_t *)handle->metric_data)[handle->position]; - sp.flags = tier1_value.anomaly_count ? SN_FLAG_NONE : SN_FLAG_NOT_ANOMALOUS; - sp.count = tier1_value.count; - sp.anomaly_count = tier1_value.anomaly_count; - sp.min = tier1_value.min_value; - sp.max = tier1_value.max_value; - sp.sum = tier1_value.sum_value; - } - break; - - // we don't know this page type - default: { - static bool logged = false; - if(!logged) { - netdata_log_error("DBENGINE: unknown page type %d found. Cannot decode it. Ignoring its metrics.", handle->ctx->config.page_type); - logged = true; - } - storage_point_empty(sp, sp.start_time_s, sp.end_time_s); - } - break; - } + pgdc_get_next_point(&handle->pgdc, handle->position, &sp); prepare_for_next_iteration: internal_fatal(sp.end_time_s < rrddim_handle->start_time_s, "DBENGINE: this point is too old for this query"); @@ -938,8 +891,10 @@ void rrdeng_load_metric_finalize(struct storage_engine_query_handle *rrddim_hand { struct rrdeng_query_handle *handle = (struct rrdeng_query_handle *)rrddim_handle->handle; - if (handle->page) + if (handle->page) { pgc_page_release(main_cache, handle->page); + pgdc_reset(&handle->pgdc, NULL, UINT32_MAX); + } if(!pdc_release_and_destroy_if_unreferenced(handle->pdc, false, false)) __atomic_store_n(&handle->pdc->workers_should_stop, true, __ATOMIC_RELAXED); @@ -1002,12 +957,12 @@ bool rrdeng_metric_retention_by_uuid(STORAGE_INSTANCE *db_instance, uuid_t *dim_ return true; } -size_t rrdeng_disk_space_max(STORAGE_INSTANCE *db_instance) { +uint64_t rrdeng_disk_space_max(STORAGE_INSTANCE *db_instance) { struct rrdengine_instance *ctx = (struct rrdengine_instance *)db_instance; return ctx->config.max_disk_space; } -size_t rrdeng_disk_space_used(STORAGE_INSTANCE *db_instance) { +uint64_t rrdeng_disk_space_used(STORAGE_INSTANCE *db_instance) { struct rrdengine_instance *ctx = (struct rrdengine_instance *)db_instance; return __atomic_load_n(&ctx->atomic.current_disk_space, __ATOMIC_RELAXED); } @@ -1234,12 +1189,14 @@ int rrdeng_exit(struct rrdengine_instance *ctx) { // 4. then wait for completion bool logged = false; - while(__atomic_load_n(&ctx->atomic.collectors_running, __ATOMIC_RELAXED) && !unittest_running) { + size_t count = 10; + while(__atomic_load_n(&ctx->atomic.collectors_running, __ATOMIC_RELAXED) && count && !unittest_running) { if(!logged) { netdata_log_info("DBENGINE: waiting for collectors to finish on tier %d...", (ctx->config.legacy) ? -1 : ctx->config.tier); logged = true; } sleep_usec(100 * USEC_PER_MS); + count--; } netdata_log_info("DBENGINE: flushing main cache for tier %d", (ctx->config.legacy) ? -1 : ctx->config.tier); diff --git a/database/engine/rrdengineapi.h b/database/engine/rrdengineapi.h index 12f1becd1755a5..7ae0e7079a7799 100644 --- a/database/engine/rrdengineapi.h +++ b/database/engine/rrdengineapi.h @@ -20,6 +20,7 @@ extern int default_multidb_disk_quota_mb; extern struct rrdengine_instance *multidb_ctx[RRD_STORAGE_TIERS]; extern size_t page_type_size[]; extern size_t tier_page_size[]; +extern uint8_t tier_page_type[]; #define CTX_POINT_SIZE_BYTES(ctx) page_type_size[(ctx)->config.page_type] @@ -222,7 +223,7 @@ RRDENG_SIZE_STATS rrdeng_size_statistics(struct rrdengine_instance *ctx); size_t rrdeng_collectors_running(struct rrdengine_instance *ctx); bool rrdeng_is_legacy(STORAGE_INSTANCE *db_instance); -size_t rrdeng_disk_space_max(STORAGE_INSTANCE *db_instance); -size_t rrdeng_disk_space_used(STORAGE_INSTANCE *db_instance); +uint64_t rrdeng_disk_space_max(STORAGE_INSTANCE *db_instance); +uint64_t rrdeng_disk_space_used(STORAGE_INSTANCE *db_instance); #endif /* NETDATA_RRDENGINEAPI_H */ diff --git a/database/engine/rrdenginelib.h b/database/engine/rrdenginelib.h index 831e4853164229..fc587799968637 100644 --- a/database/engine/rrdenginelib.h +++ b/database/engine/rrdenginelib.h @@ -58,7 +58,7 @@ static inline void modify_bit(unsigned *x, unsigned pos, uint8_t val) } } -#define RRDENG_PATH_MAX (4096) +#define RRDENG_PATH_MAX (FILENAME_MAX + 1) /* returns old *ptr value */ static inline unsigned long ulong_compare_and_swap(volatile unsigned long *ptr, diff --git a/database/rrd.h b/database/rrd.h index 5884abce44028b..1d94a5d8c99f92 100644 --- a/database/rrd.h +++ b/database/rrd.h @@ -23,6 +23,8 @@ typedef struct rrdcalc RRDCALC; typedef struct rrdcalctemplate RRDCALCTEMPLATE; typedef struct alarm_entry ALARM_ENTRY; +typedef struct rrdlabels RRDLABELS; + typedef struct rrdfamily_acquired RRDFAMILY_ACQUIRED; typedef struct rrdvar_acquired RRDVAR_ACQUIRED; typedef struct rrdsetvar_acquired RRDSETVAR_ACQUIRED; @@ -113,6 +115,7 @@ struct ml_metrics_statistics { #include "rrddimvar.h" #include "rrdcalc.h" #include "rrdcalctemplate.h" +#include "rrdlabels.h" #include "streaming/rrdpush.h" #include "aclk/aclk_rrdhost_state.h" #include "sqlite/sqlite_health.h" @@ -236,7 +239,6 @@ typedef enum __attribute__ ((__packed__)) rrddim_options { RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS = (1 << 1), // do not offer RESET or OVERFLOW info to callers RRDDIM_OPTION_BACKFILLED_HIGH_TIERS = (1 << 2), // when set, we have backfilled higher tiers RRDDIM_OPTION_UPDATED = (1 << 3), // single-threaded collector updated flag - RRDDIM_OPTION_EXPOSED = (1 << 4), // single-threaded collector exposed flag // this is 8-bit } RRDDIM_OPTIONS; @@ -250,74 +252,22 @@ typedef enum __attribute__ ((__packed__)) rrddim_flags { RRDDIM_FLAG_NONE = 0, RRDDIM_FLAG_PENDING_HEALTH_INITIALIZATION = (1 << 0), - RRDDIM_FLAG_OBSOLETE = (1 << 2), // this is marked by the collector/module as obsolete + RRDDIM_FLAG_OBSOLETE = (1 << 1), // this is marked by the collector/module as obsolete // No new values have been collected for this dimension since agent start, or it was marked RRDDIM_FLAG_OBSOLETE at // least rrdset_free_obsolete_time seconds ago. - RRDDIM_FLAG_ARCHIVED = (1 << 3), - RRDDIM_FLAG_METADATA_UPDATE = (1 << 4), // Metadata needs to go to the database + RRDDIM_FLAG_ARCHIVED = (1 << 2), + RRDDIM_FLAG_METADATA_UPDATE = (1 << 3), // Metadata needs to go to the database - RRDDIM_FLAG_META_HIDDEN = (1 << 6), // Status of hidden option in the metadata database + RRDDIM_FLAG_META_HIDDEN = (1 << 4), // Status of hidden option in the metadata database + RRDDIM_FLAG_ML_MODEL_LOAD = (1 << 5), // Do ML LOAD for this dimension // this is 8 bit } RRDDIM_FLAGS; -#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & (flag)) -#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_SEQ_CST) -#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_SEQ_CST) - -typedef enum __attribute__ ((__packed__)) rrdlabel_source { - RRDLABEL_SRC_AUTO = (1 << 0), // set when Netdata found the label by some automation - RRDLABEL_SRC_CONFIG = (1 << 1), // set when the user configured the label - RRDLABEL_SRC_K8S = (1 << 2), // set when this label is found from k8s (RRDLABEL_SRC_AUTO should also be set) - RRDLABEL_SRC_ACLK = (1 << 3), // set when this label is found from ACLK (RRDLABEL_SRC_AUTO should also be set) - - // more sources can be added here - - RRDLABEL_FLAG_PERMANENT = (1 << 29), // set when this label should never be removed (can be overwritten though) - RRDLABEL_FLAG_OLD = (1 << 30), // marks for rrdlabels internal use - they are not exposed outside rrdlabels - RRDLABEL_FLAG_NEW = (1 << 31) // marks for rrdlabels internal use - they are not exposed outside rrdlabels -} RRDLABEL_SRC; - -#define RRDLABEL_FLAG_INTERNAL (RRDLABEL_FLAG_OLD | RRDLABEL_FLAG_NEW | RRDLABEL_FLAG_PERMANENT) - -size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length); - -DICTIONARY *rrdlabels_create(void); -void rrdlabels_destroy(DICTIONARY *labels_dict); -void rrdlabels_add(DICTIONARY *dict, const char *name, const char *value, RRDLABEL_SRC ls); -void rrdlabels_add_pair(DICTIONARY *dict, const char *string, RRDLABEL_SRC ls); -void rrdlabels_get_value_to_buffer_or_null(DICTIONARY *labels, BUFFER *wb, const char *key, const char *quote, const char *null); -void rrdlabels_value_to_buffer_array_item_or_null(DICTIONARY *labels, BUFFER *wb, const char *key); -void rrdlabels_get_value_strdup_or_null(DICTIONARY *labels, char **value, const char *key); -void rrdlabels_get_value_strcpyz(DICTIONARY *labels, char *dst, size_t dst_len, const char *key); -STRING *rrdlabels_get_value_string_dup(DICTIONARY *labels, const char *key); -STRING *rrdlabels_get_value_to_buffer_or_unset(DICTIONARY *labels, BUFFER *wb, const char *key, const char *unset); -void rrdlabels_flush(DICTIONARY *labels_dict); - -void rrdlabels_unmark_all(DICTIONARY *labels); -void rrdlabels_remove_all_unmarked(DICTIONARY *labels); - -int rrdlabels_walkthrough_read(DICTIONARY *labels, int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *data); -int rrdlabels_sorted_walkthrough_read(DICTIONARY *labels, int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *data); - -void rrdlabels_log_to_buffer(DICTIONARY *labels, BUFFER *wb); -bool rrdlabels_match_simple_pattern(DICTIONARY *labels, const char *simple_pattern_txt); - -bool rrdlabels_match_simple_pattern_parsed(DICTIONARY *labels, SIMPLE_PATTERN *pattern, char equal, size_t *searches); -int rrdlabels_to_buffer(DICTIONARY *labels, BUFFER *wb, const char *before_each, const char *equal, const char *quote, const char *between_them, bool (*filter_callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *filter_data, void (*name_sanitizer)(char *dst, const char *src, size_t dst_size), void (*value_sanitizer)(char *dst, const char *src, size_t dst_size)); -void rrdlabels_to_buffer_json_members(DICTIONARY *labels, BUFFER *wb); - -void rrdlabels_migrate_to_these(DICTIONARY *dst, DICTIONARY *src); -void rrdlabels_copy(DICTIONARY *dst, DICTIONARY *src); - -void reload_host_labels(void); -void rrdset_update_rrdlabels(RRDSET *st, DICTIONARY *new_rrdlabels); -void rrdset_save_rrdlabels_to_sql(RRDSET *st); -void rrdhost_set_is_parent_label(void); -int rrdlabels_unittest(void); - -// unfortunately this break when defined in exporting_engine.h -bool exporting_labels_filter_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data); +#define rrddim_flag_get(rd) __atomic_load_n(&((rd)->flags), __ATOMIC_ACQUIRE) +#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_ACQUIRE) & (flag)) +#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_RELEASE) +#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_RELEASE) // ---------------------------------------------------------------------------- // engine-specific iterator state for dimension data collection @@ -363,7 +313,11 @@ struct rrddim { struct rrdset *rrdset; rrd_ml_dimension_t *ml_dimension; // machine learning data about this dimension - RRDMETRIC_ACQUIRED *rrdmetric; // the rrdmetric of this dimension + + struct { + RRDMETRIC_ACQUIRED *rrdmetric; // the rrdmetric of this dimension + bool collected; + } rrdcontexts; #ifdef NETDATA_LOG_COLLECTION_ERRORS usec_t rrddim_store_metric_last_ut; // the timestamp we last called rrddim_store_metric() @@ -382,6 +336,16 @@ struct rrddim { storage_number *data; // the array of values } db; + // ------------------------------------------------------------------------ + // streaming + + struct { + struct { + uint32_t sent_version; + uint32_t dim_slot; + } sender; + } rrdpush; + // ------------------------------------------------------------------------ // data collection members @@ -412,16 +376,12 @@ struct rrddim { size_t rrddim_size(void); #define rrddim_id(rd) string2str((rd)->id) -#define rrddim_name(rd) string2str((rd)->name) +#define rrddim_name(rd) string2str((rd) ->name) #define rrddim_check_updated(rd) ((rd)->collector.options & RRDDIM_OPTION_UPDATED) #define rrddim_set_updated(rd) (rd)->collector.options |= RRDDIM_OPTION_UPDATED #define rrddim_clear_updated(rd) (rd)->collector.options &= ~RRDDIM_OPTION_UPDATED -#define rrddim_check_exposed(rd) ((rd)->collector.options & RRDDIM_OPTION_EXPOSED) -#define rrddim_set_exposed(rd) (rd)->collector.options |= RRDDIM_OPTION_EXPOSED -#define rrddim_clear_exposed(rd) (rd)->collector.options &= ~RRDDIM_OPTION_EXPOSED - // returns the RRDDIM cache filename, or NULL if it does not exist const char *rrddim_cache_filename(RRDDIM *rd); @@ -505,8 +465,8 @@ static inline void storage_engine_store_metric( count, anomaly_count, flags); } -size_t rrdeng_disk_space_max(STORAGE_INSTANCE *db_instance); -static inline size_t storage_engine_disk_space_max(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance) { +uint64_t rrdeng_disk_space_max(STORAGE_INSTANCE *db_instance); +static inline uint64_t storage_engine_disk_space_max(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance __maybe_unused) { #ifdef ENABLE_DBENGINE if(likely(backend == STORAGE_ENGINE_BACKEND_DBENGINE)) return rrdeng_disk_space_max(db_instance); @@ -515,8 +475,8 @@ static inline size_t storage_engine_disk_space_max(STORAGE_ENGINE_BACKEND backen return 0; } -size_t rrdeng_disk_space_used(STORAGE_INSTANCE *db_instance); -static inline size_t storage_engine_disk_space_used(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance) { +uint64_t rrdeng_disk_space_used(STORAGE_INSTANCE *db_instance); +static inline uint64_t storage_engine_disk_space_used(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance __maybe_unused) { #ifdef ENABLE_DBENGINE if(likely(backend == STORAGE_ENGINE_BACKEND_DBENGINE)) return rrdeng_disk_space_used(db_instance); @@ -527,7 +487,7 @@ static inline size_t storage_engine_disk_space_used(STORAGE_ENGINE_BACKEND backe } time_t rrdeng_global_first_time_s(STORAGE_INSTANCE *db_instance); -static inline time_t storage_engine_global_first_time_s(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance) { +static inline time_t storage_engine_global_first_time_s(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance __maybe_unused) { #ifdef ENABLE_DBENGINE if(likely(backend == STORAGE_ENGINE_BACKEND_DBENGINE)) return rrdeng_global_first_time_s(db_instance); @@ -537,7 +497,7 @@ static inline time_t storage_engine_global_first_time_s(STORAGE_ENGINE_BACKEND b } size_t rrdeng_currently_collected_metrics(STORAGE_INSTANCE *db_instance); -static inline size_t storage_engine_collected_metrics(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance) { +static inline size_t storage_engine_collected_metrics(STORAGE_ENGINE_BACKEND backend __maybe_unused, STORAGE_INSTANCE *db_instance __maybe_unused) { #ifdef ENABLE_DBENGINE if(likely(backend == STORAGE_ENGINE_BACKEND_DBENGINE)) return rrdeng_currently_collected_metrics(db_instance); @@ -716,13 +676,6 @@ STORAGE_ENGINE* storage_engine_find(const char* name); #define rrddim_foreach_read(rd, st) \ dfe_start_read((st)->rrddim_root_index, rd) - -#define rrddim_foreach_write(rd, st) \ - dfe_start_write((st)->rrddim_root_index, rd) - -#define rrddim_foreach_reentrant(rd, st) \ - dfe_start_reentrant((st)->rrddim_root_index, rd) - #define rrddim_foreach_done(rd) \ dfe_done(rd) @@ -743,43 +696,47 @@ typedef enum __attribute__ ((__packed__)) rrdset_flags { RRDSET_FLAG_UPSTREAM_SEND = (1 << 6), // if set, this chart should be sent upstream (streaming) RRDSET_FLAG_UPSTREAM_IGNORE = (1 << 7), // if set, this chart should not be sent upstream (streaming) - RRDSET_FLAG_UPSTREAM_EXPOSED = (1 << 8), // if set, we have sent this chart definition to netdata parent (streaming) - - RRDSET_FLAG_STORE_FIRST = (1 << 9), // if set, do not eliminate the first collection during interpolation - RRDSET_FLAG_HETEROGENEOUS = (1 << 10), // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers) - RRDSET_FLAG_HOMOGENEOUS_CHECK = (1 << 11), // if set, the chart should be checked to determine if the dimensions are homogeneous - RRDSET_FLAG_HIDDEN = (1 << 12), // if set, do not show this chart on the dashboard, but use it for exporting - RRDSET_FLAG_SYNC_CLOCK = (1 << 13), // if set, microseconds on next data collection will be ignored (the chart will be synced to now) - RRDSET_FLAG_OBSOLETE_DIMENSIONS = (1 << 14), // this is marked by the collector/module when a chart has obsolete dimensions - // No new values have been collected for this chart since agent start, or it was marked RRDSET_FLAG_OBSOLETE at - // least rrdset_free_obsolete_time seconds ago. - RRDSET_FLAG_ARCHIVED = (1 << 15), - RRDSET_FLAG_METADATA_UPDATE = (1 << 16), // Mark that metadata needs to be stored - RRDSET_FLAG_ANOMALY_DETECTION = (1 << 18), // flag to identify anomaly detection charts. - RRDSET_FLAG_INDEXED_ID = (1 << 19), // the rrdset is indexed by its id - RRDSET_FLAG_INDEXED_NAME = (1 << 20), // the rrdset is indexed by its name - - RRDSET_FLAG_PENDING_HEALTH_INITIALIZATION = (1 << 21), - - RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS = (1 << 22), // the sending side has replication in progress - RRDSET_FLAG_SENDER_REPLICATION_FINISHED = (1 << 23), // the sending side has completed replication - RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS = (1 << 24), // the receiving side has replication in progress - RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED = (1 << 25), // the receiving side has completed replication - - RRDSET_FLAG_UPSTREAM_SEND_VARIABLES = (1 << 26), // a custom variable has been updated and needs to be exposed to parent - - RRDSET_FLAG_COLLECTION_FINISHED = (1 << 27), // when set, data collection is not available for this chart - - RRDSET_FLAG_HAS_RRDCALC_LINKED = (1 << 28), // this chart has at least one rrdcal linked + + RRDSET_FLAG_STORE_FIRST = (1 << 8), // if set, do not eliminate the first collection during interpolation + RRDSET_FLAG_HETEROGENEOUS = (1 << 9), // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers) + RRDSET_FLAG_HOMOGENEOUS_CHECK = (1 << 10), // if set, the chart should be checked to determine if the dimensions are homogeneous + RRDSET_FLAG_HIDDEN = (1 << 11), // if set, do not show this chart on the dashboard, but use it for exporting + RRDSET_FLAG_SYNC_CLOCK = (1 << 12), // if set, microseconds on next data collection will be ignored (the chart will be synced to now) + RRDSET_FLAG_OBSOLETE_DIMENSIONS = (1 << 13), // this is marked by the collector/module when a chart has obsolete dimensions + + RRDSET_FLAG_METADATA_UPDATE = (1 << 14), // Mark that metadata needs to be stored + RRDSET_FLAG_ANOMALY_DETECTION = (1 << 15), // flag to identify anomaly detection charts. + RRDSET_FLAG_INDEXED_ID = (1 << 16), // the rrdset is indexed by its id + RRDSET_FLAG_INDEXED_NAME = (1 << 17), // the rrdset is indexed by its name + + RRDSET_FLAG_PENDING_HEALTH_INITIALIZATION = (1 << 18), + + RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS = (1 << 19), // the sending side has replication in progress + RRDSET_FLAG_SENDER_REPLICATION_FINISHED = (1 << 20), // the sending side has completed replication + RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS = (1 << 21), // the receiving side has replication in progress + RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED = (1 << 22), // the receiving side has completed replication + + RRDSET_FLAG_UPSTREAM_SEND_VARIABLES = (1 << 23), // a custom variable has been updated and needs to be exposed to parent + + RRDSET_FLAG_COLLECTION_FINISHED = (1 << 24), // when set, data collection is not available for this chart + + RRDSET_FLAG_HAS_RRDCALC_LINKED = (1 << 25), // this chart has at least one rrdcal linked } RRDSET_FLAGS; -#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & (flag)) -#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST) -#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~(flag), __ATOMIC_SEQ_CST) +#define rrdset_flag_get(st) __atomic_load_n(&((st)->flags), __ATOMIC_ACQUIRE) +#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_ACQUIRE) & (flag)) +#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_RELEASE) +#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~(flag), __ATOMIC_RELEASE) #define rrdset_is_replicating(st) (rrdset_flag_check(st, RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS|RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS) \ && !rrdset_flag_check(st, RRDSET_FLAG_SENDER_REPLICATION_FINISHED|RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED)) +struct pluginsd_rrddim { + RRDDIM_ACQUIRED *rda; + RRDDIM *rd; + const char *id; +}; + struct rrdset { uuid_t chart_uuid; // the global UUID for this chart @@ -804,11 +761,13 @@ struct rrdset { int32_t priority; // the sorting priority of this chart int32_t update_every; // data collection frequency - DICTIONARY *rrdlabels; // chart labels + RRDLABELS *rrdlabels; // chart labels DICTIONARY *rrdsetvar_root_index; // chart variables DICTIONARY *rrddimvar_root_index; // dimension variables // we use this dictionary to manage their allocation + uint32_t version; // the metadata version (auto-increment) + RRDSET_TYPE chart_type; // line, area, stacked // ------------------------------------------------------------------------ @@ -828,8 +787,11 @@ struct rrdset { RRDHOST *rrdhost; // pointer to RRDHOST this chart belongs to - RRDINSTANCE_ACQUIRED *rrdinstance; // the rrdinstance of this chart - RRDCONTEXT_ACQUIRED *rrdcontext; // the rrdcontext this chart belongs to + struct { + RRDINSTANCE_ACQUIRED *rrdinstance; // the rrdinstance of this chart + RRDCONTEXT_ACQUIRED *rrdcontext; // the rrdcontext this chart belongs to + bool collected; + } rrdcontexts; // ------------------------------------------------------------------------ // data collection members @@ -852,7 +814,15 @@ struct rrdset { // ------------------------------------------------------------------------ // data collection - streaming to parents, temp variables - time_t upstream_resync_time_s; // the timestamp up to which we should resync clock upstream + struct { + struct { + uint32_t sent_version; + uint32_t chart_slot; + uint32_t dim_last_slot_used; + + time_t resync_time_s; // the timestamp up to which we should resync clock upstream + } sender; + } rrdpush; // ------------------------------------------------------------------------ // db mode SAVE, MAP specifics @@ -895,10 +865,12 @@ struct rrdset { struct { SPINLOCK spinlock; // used only for cleanup pid_t collector_tid; + bool dims_with_slots; bool set; uint32_t pos; + int32_t last_slot; uint32_t size; - RRDDIM_ACQUIRED **rda; + struct pluginsd_rrddim *prd_array; } pluginsd; #ifdef NETDATA_LOG_REPLICATION_REQUESTS @@ -921,6 +893,54 @@ struct rrdset { #define rrdset_name(st) string2str((st)->name) #define rrdset_id(st) string2str((st)->id) +static inline uint32_t rrdset_metadata_version(RRDSET *st) { + return __atomic_load_n(&st->version, __ATOMIC_RELAXED); +} + +static inline uint32_t rrdset_metadata_upstream_version(RRDSET *st) { + return __atomic_load_n(&st->rrdpush.sender.sent_version, __ATOMIC_RELAXED); +} + +void rrdset_metadata_updated(RRDSET *st); + +static inline void rrdset_metadata_exposed_upstream(RRDSET *st, uint32_t version) { + __atomic_store_n(&st->rrdpush.sender.sent_version, version, __ATOMIC_RELAXED); +} + +static inline bool rrdset_check_upstream_exposed(RRDSET *st) { + return rrdset_metadata_version(st) == rrdset_metadata_upstream_version(st); +} + +static inline uint32_t rrddim_metadata_version(RRDDIM *rd) { + // the metadata version of the dimension, is the version of the chart + return rrdset_metadata_version(rd->rrdset); +} + +static inline uint32_t rrddim_metadata_upstream_version(RRDDIM *rd) { + return __atomic_load_n(&rd->rrdpush.sender.sent_version, __ATOMIC_RELAXED); +} + +void rrddim_metadata_updated(RRDDIM *rd); + +static inline void rrddim_metadata_exposed_upstream(RRDDIM *rd, uint32_t version) { + __atomic_store_n(&rd->rrdpush.sender.sent_version, version, __ATOMIC_RELAXED); +} + +static inline void rrddim_metadata_exposed_upstream_clear(RRDDIM *rd) { + __atomic_store_n(&rd->rrdpush.sender.sent_version, 0, __ATOMIC_RELAXED); +} + +static inline bool rrddim_check_upstream_exposed(RRDDIM *rd) { + return rrddim_metadata_upstream_version(rd) != 0; +} + +// the collector sets the exposed flag, but anyone can remove it +// still, it can be removed, after the collector has finished +// so, it is safe to check it without atomics +static inline bool rrddim_check_upstream_exposed_collector(RRDDIM *rd) { + return rd->rrdset->version == rd->rrdpush.sender.sent_version; +} + STRING *rrd_string_strdupz(const char *s); // ---------------------------------------------------------------------------- @@ -993,6 +1013,8 @@ typedef enum __attribute__ ((__packed__)) rrdhost_flags { RRDHOST_FLAG_METADATA_CLAIMID = (1 << 28), // metadata needs to be stored in the database RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED = (1 << 29), // set when the receiver part is disconnected + + RRDHOST_FLAG_GLOBAL_FUNCTIONS_UPDATED = (1 << 30), // set when the host has updated global functions } RRDHOST_FLAGS; #define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & (flag)) @@ -1001,7 +1023,7 @@ typedef enum __attribute__ ((__packed__)) rrdhost_flags { #ifdef NETDATA_INTERNAL_CHECKS #define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \ - debug_int(__FILE__, __FUNCTION__, __LINE__, "%s: " fmt, rrdset_name(st), ##args); } while(0) + netdata_logger(NDLS_DEBUG, NDLP_DEBUG, __FILE__, __FUNCTION__, __LINE__, "%s: " fmt, rrdset_name(st), ##args); } while(0) #else #define rrdset_debug(st, fmt, args...) debug_dummy() #endif @@ -1021,6 +1043,7 @@ typedef enum __attribute__ ((__packed__)) { RRDHOST_OPTION_REPLICATION = (1 << 5), // when set, we support replication for this host RRDHOST_OPTION_VIRTUAL_HOST = (1 << 6), // when set, this host is a virtual one + RRDHOST_OPTION_EPHEMERAL_HOST = (1 << 7), // when set, this host is an ephemeral one } RRDHOST_OPTIONS; #define rrdhost_option_check(host, flag) ((host)->options & (flag)) @@ -1050,7 +1073,6 @@ struct alarm_entry { STRING *chart; STRING *chart_context; STRING *chart_name; - STRING *family; STRING *classification; STRING *component; @@ -1064,6 +1086,7 @@ struct alarm_entry { STRING *source; STRING *units; + STRING *summary; STRING *info; NETDATA_DOUBLE old_value; @@ -1094,14 +1117,12 @@ struct alarm_entry { #define ae_chart_id(ae) string2str((ae)->chart) #define ae_chart_name(ae) string2str((ae)->chart_name) #define ae_chart_context(ae) string2str((ae)->chart_context) -#define ae_family(ae) string2str((ae)->family) #define ae_classification(ae) string2str((ae)->classification) -#define ae_component(ae) string2str((ae)->component) -#define ae_type(ae) string2str((ae)->type) #define ae_exec(ae) string2str((ae)->exec) #define ae_recipient(ae) string2str((ae)->recipient) #define ae_source(ae) string2str((ae)->source) #define ae_units(ae) string2str((ae)->units) +#define ae_summary(ae) string2str((ae)->summary) #define ae_info(ae) string2str((ae)->info) #define ae_old_value_string(ae) string2str((ae)->old_value_string) #define ae_new_value_string(ae) string2str((ae)->new_value_string) @@ -1117,13 +1138,14 @@ typedef struct alarm_log { } ALARM_LOG; typedef struct health { - unsigned int health_enabled; // 1 when this host has health enabled time_t health_delay_up_to; // a timestamp to delay alarms processing up to STRING *health_default_exec; // the full path of the alarms notifications program STRING *health_default_recipient; // the default recipient for all alarms - size_t health_log_entries_written; // the number of alarm events written to the alarms event log + int health_log_entries_written; // the number of alarm events written to the alarms event log uint32_t health_default_warn_repeat_every; // the default value for the interval between repeating warning notifications uint32_t health_default_crit_repeat_every; // the default value for the interval between repeating critical notifications + unsigned int health_enabled; // 1 when this host has health enabled + bool use_summary_for_notifications; // whether or not to use the summary field as a subject for notifications } HEALTH; // ---------------------------------------------------------------------------- @@ -1167,7 +1189,7 @@ struct rrdhost_system_info { int mc_version; }; -struct rrdhost_system_info *rrdhost_labels_to_system_info(DICTIONARY *labels); +struct rrdhost_system_info *rrdhost_labels_to_system_info(RRDLABELS *labels); struct rrdhost { char machine_guid[GUID_LEN + 1]; // the unique ID of this host @@ -1210,6 +1232,31 @@ struct rrdhost { // ------------------------------------------------------------------------ // streaming of data to remote hosts - rrdpush sender + struct { + struct { + struct { + struct { + SPINLOCK spinlock; + + bool ignore; // when set, freeing slots will not put them in the available + uint32_t used; + uint32_t size; + uint32_t *array; + } available; // keep track of the available chart slots per host + + uint32_t last_used; // the last slot we used for a chart (increments only) + } pluginsd_chart_slots; + } send; + + struct { + struct { + SPINLOCK spinlock; // lock for the management of the allocation + uint32_t size; + RRDSET **array; + } pluginsd_chart_slots; + } receive; + } rrdpush; + char *rrdpush_send_destination; // where to send metrics to char *rrdpush_send_api_key; // the api key at the receiving netdata struct rrdpush_destinations *destinations; // a linked list of possible destinations @@ -1227,7 +1274,7 @@ struct rrdhost { struct sender_state *sender; netdata_thread_t rrdpush_sender_thread; // the sender thread size_t rrdpush_sender_replicating_charts; // the number of charts currently being replicated to a parent - void *aclk_sync_host_config; + struct aclk_sync_cfg_t *aclk_config; uint32_t rrdpush_receiver_connection_counter; // the number of times this receiver has connected uint32_t rrdpush_sender_connection_counter; // the number of times this sender has connected @@ -1235,6 +1282,7 @@ struct rrdhost { // ------------------------------------------------------------------------ // streaming of data from remote hosts - rrdpush receiver + time_t last_connected; // last time child connected (stored in db) time_t child_connect_time; // the time the last sender was connected time_t child_last_chart_command; // the time of the last CHART streaming command time_t child_disconnected_time; // the time the last sender was disconnected @@ -1274,7 +1322,7 @@ struct rrdhost { // ------------------------------------------------------------------------ // Support for host-level labels - DICTIONARY *rrdlabels; + RRDLABELS *rrdlabels; // ------------------------------------------------------------------------ // Support for functions @@ -1310,6 +1358,8 @@ struct rrdhost { netdata_mutex_t aclk_state_lock; aclk_rrdhost_state aclk_state; + DICTIONARY *configurable_plugins; // configurable plugins for this host + struct rrdhost *next; struct rrdhost *prev; }; @@ -1377,6 +1427,7 @@ void rrddim_index_destroy(RRDSET *st); // ---------------------------------------------------------------------------- extern time_t rrdhost_free_orphan_time_s; +extern time_t rrdhost_free_ephemeral_time_s; int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest); @@ -1385,30 +1436,29 @@ RRDHOST *rrdhost_find_by_guid(const char *guid); RRDHOST *find_host_by_node_id(char *node_id); RRDHOST *rrdhost_find_or_create( - const char *hostname - , const char *registry_hostname - , const char *guid - , const char *os - , const char *timezone - , const char *abbrev_timezone - , int32_t utc_offset - , const char *tags - , const char *program_name - , const char *program_version - , int update_every - , long history - , RRD_MEMORY_MODE mode - , unsigned int health_enabled - , unsigned int rrdpush_enabled - , char *rrdpush_destination - , char *rrdpush_api_key - , char *rrdpush_send_charts_matching - , bool rrdpush_enable_replication - , time_t rrdpush_seconds_to_replicate - , time_t rrdpush_replication_step - , struct rrdhost_system_info *system_info - , bool is_archived -); + const char *hostname, + const char *registry_hostname, + const char *guid, + const char *os, + const char *timezone, + const char *abbrev_timezone, + int32_t utc_offset, + const char *tags, + const char *program_name, + const char *program_version, + int update_every, + long history, + RRD_MEMORY_MODE mode, + unsigned int health_enabled, + unsigned int rrdpush_enabled, + char *rrdpush_destination, + char *rrdpush_api_key, + char *rrdpush_send_charts_matching, + bool rrdpush_enable_replication, + time_t rrdpush_seconds_to_replicate, + time_t rrdpush_replication_step, + struct rrdhost_system_info *system_info, + bool is_archived); int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, char *name, char *value); @@ -1465,8 +1515,6 @@ void rrdset_acquired_release(RRDSET_ACQUIRED *rsa); static inline RRDSET *rrdset_find_active_localhost(const char *id) { RRDSET *st = rrdset_find_localhost(id); - if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))) - return NULL; return st; } @@ -1476,8 +1524,6 @@ RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id); static inline RRDSET *rrdset_find_active_bytype_localhost(const char *type, const char *id) { RRDSET *st = rrdset_find_bytype_localhost(type, id); - if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))) - return NULL; return st; } @@ -1487,8 +1533,6 @@ RRDSET *rrdset_find_byname(RRDHOST *host, const char *name); static inline RRDSET *rrdset_find_active_byname_localhost(const char *name) { RRDSET *st = rrdset_find_byname_localhost(name); - if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))) - return NULL; return st; } @@ -1500,13 +1544,12 @@ void rrdset_timed_next(RRDSET *st, struct timeval now, usec_t microseconds); void rrdset_timed_done(RRDSET *st, struct timeval now, bool pending_rrdset_next); void rrdset_done(RRDSET *st); -void rrdset_is_obsolete(RRDSET *st); -void rrdset_isnot_obsolete(RRDSET *st); +void rrdset_is_obsolete___safe_from_collector_thread(RRDSET *st); +void rrdset_isnot_obsolete___safe_from_collector_thread(RRDSET *st); // checks if the RRDSET should be offered to viewers -#define rrdset_is_available_for_viewers(st) (!rrdset_flag_check(st, RRDSET_FLAG_HIDDEN) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && !rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED) && rrdset_number_of_dimensions(st) && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE) -#define rrdset_is_available_for_exporting_and_alarms(st) (!rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && !rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED) && rrdset_number_of_dimensions(st)) -#define rrdset_is_archived(st) (rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED) && rrdset_number_of_dimensions(st)) +#define rrdset_is_available_for_viewers(st) (!rrdset_flag_check(st, RRDSET_FLAG_HIDDEN) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && rrdset_number_of_dimensions(st) && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE) +#define rrdset_is_available_for_exporting_and_alarms(st) (!rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && rrdset_number_of_dimensions(st)) time_t rrddim_first_entry_s(RRDDIM *rd); time_t rrddim_first_entry_s_of_tier(RRDDIM *rd, size_t tier); @@ -1520,6 +1563,8 @@ time_t rrdset_last_entry_s_of_tier(RRDSET *st, size_t tier); void rrdset_get_retention_of_tier_for_collected_chart(RRDSET *st, time_t *first_time_s, time_t *last_time_s, time_t now_s, size_t tier); +void rrdset_update_rrdlabels(RRDSET *st, RRDLABELS *new_rrdlabels); + // ---------------------------------------------------------------------------- // RRD DIMENSION functions @@ -1549,8 +1594,8 @@ RRDDIM *rrddim_find_active(RRDSET *st, const char *id); int rrddim_hide(RRDSET *st, const char *id); int rrddim_unhide(RRDSET *st, const char *id); -void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd); -void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd); +void rrddim_is_obsolete___safe_from_collector_thread(RRDSET *st, RRDDIM *rd); +void rrddim_isnot_obsolete___safe_from_collector_thread(RRDSET *st, RRDDIM *rd); collected_number rrddim_timed_set_by_pointer(RRDSET *st, RRDDIM *rd, struct timeval collected_time, collected_number value); collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value); @@ -1574,6 +1619,8 @@ void rrddim_store_metric(RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, // Miscellaneous functions char *rrdset_strncpyz_name(char *to, const char *from, size_t length); +void reload_host_labels(void); +void rrdhost_set_is_parent_label(void); // ---------------------------------------------------------------------------- // RRD internal functions @@ -1616,7 +1663,28 @@ static inline void rrdhost_retention(RRDHOST *host, time_t now, bool online, tim *to = online ? now : last_time_s; } +void rrdhost_pluginsd_send_chart_slots_free(RRDHOST *host); +void rrdhost_pluginsd_receive_chart_slots_free(RRDHOST *host); +void rrdset_pluginsd_receive_unslot_and_cleanup(RRDSET *st); +void rrdset_pluginsd_receive_unslot(RRDSET *st); + // ---------------------------------------------------------------------------- +static inline double rrddim_get_last_stored_value(RRDDIM *rd_dim, double *max_value, double div) { + if (!rd_dim) + return NAN; + + if (isnan(div) || div == 0.0) + div = 1.0; + + double value = rd_dim->collector.last_stored_value / div; + value = ABS(value); + + *max_value = MAX(*max_value, value); + + return value; +} + +// // RRD DB engine declarations #ifdef ENABLE_DBENGINE diff --git a/database/rrdcalc.c b/database/rrdcalc.c index 398ddb32b3be05..199d9080301085 100644 --- a/database/rrdcalc.c +++ b/database/rrdcalc.c @@ -98,7 +98,7 @@ uint32_t rrdcalc_get_unique_id(RRDHOST *host, STRING *chart, STRING *name, uint3 } // ---------------------------------------------------------------------------- -// RRDCALC replacing info text variables with RRDSET labels +// RRDCALC replacing info/summary text variables with RRDSET labels static STRING *rrdcalc_replace_variables_with_rrdset_labels(const char *line, RRDCALC *rc) { if (!line || !*line) @@ -135,6 +135,7 @@ static STRING *rrdcalc_replace_variables_with_rrdset_labels(const char *line, RR label_val[i - RRDCALC_VAR_LABEL_LEN - 1] = '\0'; if(likely(rc->rrdset && rc->rrdset->rrdlabels)) { + lbl_value = NULL; rrdlabels_get_value_strdup_or_null(rc->rrdset->rrdlabels, &lbl_value, label_val); if (lbl_value) { char *buf = find_and_replace(temp, var, lbl_value, m); @@ -155,12 +156,20 @@ static STRING *rrdcalc_replace_variables_with_rrdset_labels(const char *line, RR void rrdcalc_update_info_using_rrdset_labels(RRDCALC *rc) { if(!rc->rrdset || !rc->original_info || !rc->rrdset->rrdlabels) return; - size_t labels_version = dictionary_version(rc->rrdset->rrdlabels); + size_t labels_version = rrdlabels_version(rc->rrdset->rrdlabels); if(rc->labels_version != labels_version) { - STRING *old = rc->info; - rc->info = rrdcalc_replace_variables_with_rrdset_labels(rrdcalc_original_info(rc), rc); - string_freez(old); + if (rc->original_info) { + STRING *old = rc->info; + rc->info = rrdcalc_replace_variables_with_rrdset_labels(rrdcalc_original_info(rc), rc); + string_freez(old); + } + + if (rc->original_summary) { + STRING *old = rc->summary; + rc->summary = rrdcalc_replace_variables_with_rrdset_labels(rrdcalc_original_summary(rc), rc); + string_freez(old); + } rc->labels_version = labels_version; } @@ -221,7 +230,7 @@ static void rrdcalc_link_to_rrdset(RRDSET *st, RRDCALC *rc) { rw_spinlock_write_unlock(&st->alerts.spinlock); if(rc->update_every < rc->rrdset->update_every) { - netdata_log_error("Health alarm '%s.%s' has update every %d, less than chart update every %d. Setting alarm update frequency to %d.", rrdset_id(rc->rrdset), rrdcalc_name(rc), rc->update_every, rc->rrdset->update_every, rc->rrdset->update_every); + netdata_log_info("Health alarm '%s.%s' has update every %d, less than chart update every %d. Setting alarm update frequency to %d.", rrdset_id(rc->rrdset), rrdcalc_name(rc), rc->update_every, rc->rrdset->update_every, rc->rrdset->update_every); rc->update_every = rc->rrdset->update_every; } @@ -285,6 +294,11 @@ static void rrdcalc_link_to_rrdset(RRDSET *st, RRDCALC *rc) { rrdcalc_update_info_using_rrdset_labels(rc); + if(!rc->summary) { + rc->summary = string_dup(rc->name); + rc->original_summary = string_dup(rc->name); + } + time_t now = now_realtime_sec(); ALARM_ENTRY *ae = health_create_alarm_entry( @@ -297,7 +311,6 @@ static void rrdcalc_link_to_rrdset(RRDSET *st, RRDCALC *rc) { rc->rrdset->id, rc->rrdset->context, rc->rrdset->name, - rc->rrdset->family, rc->classification, rc->component, rc->type, @@ -310,6 +323,7 @@ static void rrdcalc_link_to_rrdset(RRDSET *st, RRDCALC *rc) { rc->status, rc->source, rc->units, + rc->summary, rc->info, 0, rrdcalc_isrepeating(rc)?HEALTH_ENTRY_FLAG_IS_REPEATING:0); @@ -343,7 +357,6 @@ static void rrdcalc_unlink_from_rrdset(RRDCALC *rc, bool having_ll_wrlock) { rc->rrdset->id, rc->rrdset->context, rc->rrdset->name, - rc->rrdset->family, rc->classification, rc->component, rc->type, @@ -356,6 +369,7 @@ static void rrdcalc_unlink_from_rrdset(RRDCALC *rc, bool having_ll_wrlock) { RRDCALC_STATUS_REMOVED, rc->source, rc->units, + rc->summary, rc->info, 0, 0); @@ -512,6 +526,11 @@ static void rrdcalc_rrdhost_insert_callback(const DICTIONARY_ITEM *item __maybe_ rc->info = string_dup(rt->info); rc->original_info = string_dup(rt->info); + if (!rt->summary) + rt->summary = string_dup(rc->name); + rc->summary = string_dup(rt->summary); + rc->original_summary = string_dup(rt->summary); + rc->classification = string_dup(rt->classification); rc->component = string_dup(rt->component); rc->type = string_dup(rt->type); @@ -790,10 +809,10 @@ void rrdcalc_delete_alerts_not_matching_host_labels_from_this_host(RRDHOST *host continue; if(!rrdlabels_match_simple_pattern_parsed(host->rrdlabels, rc->host_labels_pattern, '=', NULL)) { - netdata_log_health("Health configuration for alarm '%s' cannot be applied, because the host %s does not have the label(s) '%s'", - rrdcalc_name(rc), - rrdhost_hostname(host), - rrdcalc_host_labels(rc)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Health configuration for alarm '%s' cannot be applied, " + "because the host %s does not have the label(s) '%s'", + rrdcalc_name(rc), rrdhost_hostname(host), rrdcalc_host_labels(rc)); rrdcalc_unlink_and_delete(host, rc, false); } diff --git a/database/rrdcalc.h b/database/rrdcalc.h index 2081452c7a9d02..71f43633c4ffab 100644 --- a/database/rrdcalc.h +++ b/database/rrdcalc.h @@ -64,8 +64,10 @@ struct rrdcalc { STRING *source; // the source of this alarm STRING *units; // the units of the alarm + STRING *summary; // a short alert summary + STRING *original_summary; // the original summary field before any variable replacement STRING *original_info; // the original info field before any variable replacement - STRING *info; // a short description of the alarm + STRING *info; // a description of the alarm int update_every; // update frequency for the alarm @@ -170,6 +172,8 @@ struct rrdcalc { #define rrdcalc_module_match(rc) string2str((rc)->module_match) #define rrdcalc_source(rc) string2str((rc)->source) #define rrdcalc_units(rc) string2str((rc)->units) +#define rrdcalc_original_summary(rc) string2str((rc)->original_summary) +#define rrdcalc_summary(rc) string2str((rc)->summary) #define rrdcalc_original_info(rc) string2str((rc)->original_info) #define rrdcalc_info(rc) string2str((rc)->info) #define rrdcalc_dimensions(rc) string2str((rc)->dimensions) @@ -192,7 +196,6 @@ struct alert_config { STRING *os; STRING *host; STRING *on; - STRING *families; STRING *plugin; STRING *module; STRING *charts; @@ -206,6 +209,7 @@ struct alert_config { STRING *exec; STRING *to; STRING *units; + STRING *summary; STRING *info; STRING *classification; STRING *component; diff --git a/database/rrdcalctemplate.c b/database/rrdcalctemplate.c index a874039637c6db..f0e5da80bfd7d9 100644 --- a/database/rrdcalctemplate.c +++ b/database/rrdcalctemplate.c @@ -37,9 +37,6 @@ bool rrdcalctemplate_check_rrdset_conditions(RRDCALCTEMPLATE *rt, RRDSET *st, RR if (rt->charts_pattern && !simple_pattern_matches_string(rt->charts_pattern, st->name) && !simple_pattern_matches_string(rt->charts_pattern, st->id)) return false; - if (rt->family_pattern && !simple_pattern_matches_string(rt->family_pattern, st->family)) - return false; - if (rt->module_pattern && !simple_pattern_matches_string(rt->module_pattern, st->module_name)) return false; @@ -100,9 +97,6 @@ static void rrdcalctemplate_free_internals(RRDCALCTEMPLATE *rt) { expression_free(rt->warning); expression_free(rt->critical); - string_freez(rt->family_match); - simple_pattern_free(rt->family_pattern); - string_freez(rt->plugin_match); simple_pattern_free(rt->plugin_pattern); @@ -217,10 +211,6 @@ inline void rrdcalctemplate_delete_all(RRDHOST *host) { } #define RRDCALCTEMPLATE_MAX_KEY_SIZE 1024 -static size_t rrdcalctemplate_key(char *dst, size_t dst_len, const char *name, const char *family_match) { - return snprintfz(dst, dst_len, "%s/%s", name, (family_match && *family_match)?family_match:"*"); -} - void rrdcalctemplate_add_from_config(RRDHOST *host, RRDCALCTEMPLATE *rt) { if(unlikely(!rt->context)) { netdata_log_error("Health configuration for template '%s' does not have a context", rrdcalctemplate_name(rt)); @@ -238,7 +228,7 @@ void rrdcalctemplate_add_from_config(RRDHOST *host, RRDCALCTEMPLATE *rt) { } char key[RRDCALCTEMPLATE_MAX_KEY_SIZE + 1]; - size_t key_len = rrdcalctemplate_key(key, RRDCALCTEMPLATE_MAX_KEY_SIZE, rrdcalctemplate_name(rt), rrdcalctemplate_family_match(rt)); + size_t key_len = snprintfz(key, RRDCALCTEMPLATE_MAX_KEY_SIZE, "%s", rrdcalctemplate_name(rt)); bool added = false; dictionary_set_advanced(host->rrdcalctemplate_root_index, key, (ssize_t)(key_len + 1), rt, sizeof(*rt), &added); diff --git a/database/rrdcalctemplate.h b/database/rrdcalctemplate.h index 965a818a1b2d18..ca2c436568925f 100644 --- a/database/rrdcalctemplate.h +++ b/database/rrdcalctemplate.h @@ -22,9 +22,6 @@ struct rrdcalctemplate { STRING *context; - STRING *family_match; - SIMPLE_PATTERN *family_pattern; - STRING *plugin_match; SIMPLE_PATTERN *plugin_pattern; @@ -36,7 +33,8 @@ struct rrdcalctemplate { STRING *source; // the source of this alarm STRING *units; // the units of the alarm - STRING *info; // a short description of the alarm + STRING *summary; // a short summary of the alarm + STRING *info; // a description of the alarm int update_every; // update frequency for the alarm @@ -100,11 +98,11 @@ struct rrdcalctemplate { #define rrdcalctemplate_classification(rt) string2str((rt)->classification) #define rrdcalctemplate_component(rt) string2str((rt)->component) #define rrdcalctemplate_type(rt) string2str((rt)->type) -#define rrdcalctemplate_family_match(rt) string2str((rt)->family_match) #define rrdcalctemplate_plugin_match(rt) string2str((rt)->plugin_match) #define rrdcalctemplate_module_match(rt) string2str((rt)->module_match) #define rrdcalctemplate_charts_match(rt) string2str((rt)->charts_match) #define rrdcalctemplate_units(rt) string2str((rt)->units) +#define rrdcalctemplate_summary(rt) string2str((rt)->summary) #define rrdcalctemplate_info(rt) string2str((rt)->info) #define rrdcalctemplate_source(rt) string2str((rt)->source) #define rrdcalctemplate_dimensions(rt) string2str((rt)->dimensions) diff --git a/database/rrddim.c b/database/rrddim.c index 0f99f98df7be94..46226a548abf78 100644 --- a/database/rrddim.c +++ b/database/rrddim.c @@ -4,6 +4,11 @@ #include "rrd.h" #include "storage_engine.h" +void rrddim_metadata_updated(RRDDIM *rd) { + rrdcontext_updated_rrddim(rd); + rrdset_metadata_updated(rd->rrdset); +} + // ---------------------------------------------------------------------------- // RRDDIM index @@ -48,6 +53,8 @@ static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v rd->rrdset = st; + rd->rrdpush.sender.dim_slot = __atomic_add_fetch(&st->rrdpush.sender.dim_last_slot_used, 1, __ATOMIC_RELAXED); + if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)) rd->collector.counter = 1; @@ -126,7 +133,7 @@ static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v if(rrdset_number_of_dimensions(st) != 0) { RRDDIM *td; dfe_start_write(st->rrddim_root_index, td) { - if(!td) break; + if(td) break; } dfe_done(td); @@ -155,7 +162,6 @@ static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v // let the chart resync rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); ml_dimension_new(rd); @@ -283,10 +289,9 @@ static void rrddim_react_callback(const DICTIONARY_ITEM *item __maybe_unused, vo if(ctr->react_action == RRDDIM_REACT_UPDATED) { // the chart needs to be updated to the parent rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); } - rrdcontext_updated_rrddim(rd); + rrddim_metadata_updated(rd); } size_t rrddim_size(void) { @@ -369,8 +374,7 @@ inline int rrddim_reset_name(RRDSET *st, RRDDIM *rd, const char *name) { rrddimvar_rename_all(rd); - rrddim_clear_exposed(rd); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrddim_metadata_updated(rd); return 1; } @@ -381,8 +385,7 @@ inline int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm) netdata_log_debug(D_RRD_CALLS, "Updating algorithm of dimension '%s/%s' from %s to %s", rrdset_id(st), rrddim_name(rd), rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm)); rd->algorithm = algorithm; - rrddim_clear_exposed(rd); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrddim_metadata_updated(rd); rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK); rrdcontext_updated_rrddim_algorithm(rd); return 1; @@ -395,8 +398,7 @@ inline int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, int32_t multiplier) { netdata_log_debug(D_RRD_CALLS, "Updating multiplier of dimension '%s/%s' from %d to %d", rrdset_id(st), rrddim_name(rd), rd->multiplier, multiplier); rd->multiplier = multiplier; - rrddim_clear_exposed(rd); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrddim_metadata_updated(rd); rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK); rrdcontext_updated_rrddim_multiplier(rd); return 1; @@ -409,8 +411,7 @@ inline int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, int32_t divisor) { netdata_log_debug(D_RRD_CALLS, "Updating divisor of dimension '%s/%s' from %d to %d", rrdset_id(st), rrddim_name(rd), rd->divisor, divisor); rd->divisor = divisor; - rrddim_clear_exposed(rd); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrddim_metadata_updated(rd); rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK); rrdcontext_updated_rrddim_divisor(rd); return 1; @@ -532,8 +533,8 @@ int rrddim_unhide(RRDSET *st, const char *id) { return 0; } -inline void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd) { - netdata_log_debug(D_RRD_CALLS, "rrddim_is_obsolete() for chart %s, dimension %s", rrdset_name(st), rrddim_name(rd)); +inline void rrddim_is_obsolete___safe_from_collector_thread(RRDSET *st, RRDDIM *rd) { + netdata_log_debug(D_RRD_CALLS, "rrddim_is_obsolete___safe_from_collector_thread() for chart %s, dimension %s", rrdset_name(st), rrddim_name(rd)); if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED))) { netdata_log_info("Cannot obsolete already archived dimension %s from chart %s", rrddim_name(rd), rrdset_name(st)); @@ -545,8 +546,8 @@ inline void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd) { rrdcontext_updated_rrddim_flags(rd); } -inline void rrddim_isnot_obsolete(RRDSET *st __maybe_unused, RRDDIM *rd) { - netdata_log_debug(D_RRD_CALLS, "rrddim_isnot_obsolete() for chart %s, dimension %s", rrdset_name(st), rrddim_name(rd)); +inline void rrddim_isnot_obsolete___safe_from_collector_thread(RRDSET *st __maybe_unused, RRDDIM *rd) { + netdata_log_debug(D_RRD_CALLS, "rrddim_isnot_obsolete___safe_from_collector_thread() for chart %s, dimension %s", rrdset_name(st), rrddim_name(rd)); rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE); rrdcontext_updated_rrddim_flags(rd); diff --git a/database/rrdfunctions.c b/database/rrdfunctions.c index d32a4b8c91eeca..2659130f036639 100644 --- a/database/rrdfunctions.c +++ b/database/rrdfunctions.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-3.0-or-later #define NETDATA_RRD_INTERNALS #include "rrd.h" @@ -37,17 +38,17 @@ static unsigned char functions_allowed_chars[256] = { [30] = '_', // [31] = '_', // [32] = ' ', // SPACE keep - [33] = '_', // ! - [34] = '_', // " - [35] = '_', // # - [36] = '_', // $ - [37] = '_', // % - [38] = '_', // & - [39] = '_', // ' - [40] = '_', // ( - [41] = '_', // ) - [42] = '_', // * - [43] = '_', // + + [33] = '!', // ! keep + [34] = '"', // " keep + [35] = '#', // # keep + [36] = '$', // $ keep + [37] = '%', // % keep + [38] = '&', // & keep + [39] = '\'', // ' keep + [40] = '(', // ( keep + [41] = ')', // ) keep + [42] = '*', // * keep + [43] = '+', // + keep [44] = ',', // , keep [45] = '-', // - keep [46] = '.', // . keep @@ -63,12 +64,12 @@ static unsigned char functions_allowed_chars[256] = { [56] = '8', // 8 keep [57] = '9', // 9 keep [58] = ':', // : keep - [59] = ':', // ; convert ; to : - [60] = '_', // < - [61] = ':', // = convert = to : - [62] = '_', // > - [63] = '_', // ? - [64] = '_', // @ + [59] = ';', // ; keep + [60] = '<', // < keep + [61] = '=', // = keep + [62] = '>', // > keep + [63] = '?', // ? keep + [64] = '@', // @ keep [65] = 'A', // A keep [66] = 'B', // B keep [67] = 'C', // C keep @@ -95,12 +96,12 @@ static unsigned char functions_allowed_chars[256] = { [88] = 'X', // X keep [89] = 'Y', // Y keep [90] = 'Z', // Z keep - [91] = '_', // [ - [92] = '/', // backslash convert \ to / - [93] = '_', // ] - [94] = '_', // ^ + [91] = '[', // [ keep + [92] = '\\', // backslash keep + [93] = ']', // ] keep + [94] = '^', // ^ keep [95] = '_', // _ keep - [96] = '_', // ` + [96] = '`', // ` keep [97] = 'a', // a keep [98] = 'b', // b keep [99] = 'c', // c keep @@ -127,10 +128,10 @@ static unsigned char functions_allowed_chars[256] = { [120] = 'x', // x keep [121] = 'y', // y keep [122] = 'z', // z keep - [123] = '_', // { - [124] = '_', // | - [125] = '_', // } - [126] = '_', // ~ + [123] = '{', // { keep + [124] = '|', // | keep + [125] = '}', // } keep + [126] = '~', // ~ keep [127] = '_', // [128] = '_', // [129] = '_', // @@ -277,16 +278,15 @@ typedef enum __attribute__((packed)) { // this is 8-bit } RRD_FUNCTION_OPTIONS; -struct rrd_collector_function { +struct rrd_host_function { bool sync; // when true, the function is called synchronously RRD_FUNCTION_OPTIONS options; // RRD_FUNCTION_OPTIONS STRING *help; int timeout; // the default timeout of the function - int (*function)(BUFFER *wb, int timeout, const char *function, void *collector_data, - function_data_ready_callback callback, void *callback_data); + rrd_function_execute_cb_t execute_cb; - void *collector_data; + void *execute_cb_data; struct rrd_collector *collector; }; @@ -299,6 +299,7 @@ struct rrd_collector_function { struct rrd_collector { int32_t refcount; + int32_t refcount_canceller; pid_t tid; bool running; }; @@ -310,8 +311,11 @@ struct rrd_collector { static __thread struct rrd_collector *thread_rrd_collector = NULL; static void rrd_collector_free(struct rrd_collector *rdc) { + if(rdc->running) + return; + int32_t expected = 0; - if(likely(!__atomic_compare_exchange_n(&rdc->refcount, &expected, -1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))) { + if(!__atomic_compare_exchange_n(&rdc->refcount, &expected, -1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { // the collector is still referenced by charts. // leave it hanging there, the last chart will actually free it. return; @@ -323,11 +327,11 @@ static void rrd_collector_free(struct rrd_collector *rdc) { // called once per collector void rrd_collector_started(void) { - if(likely(thread_rrd_collector)) return; + if(!thread_rrd_collector) + thread_rrd_collector = callocz(1, sizeof(struct rrd_collector)); - thread_rrd_collector = callocz(1, sizeof(struct rrd_collector)); thread_rrd_collector->tid = gettid(); - thread_rrd_collector->running = true; + __atomic_store_n(&thread_rrd_collector->running, true, __ATOMIC_RELAXED); } // called once per collector @@ -335,65 +339,110 @@ void rrd_collector_finished(void) { if(!thread_rrd_collector) return; - thread_rrd_collector->running = false; + __atomic_store_n(&thread_rrd_collector->running, false, __ATOMIC_RELAXED); + + int32_t expected = 0; + while(!__atomic_compare_exchange_n(&thread_rrd_collector->refcount_canceller, &expected, -1, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { + expected = 0; + sleep_usec(1 * USEC_PER_MS); + } + rrd_collector_free(thread_rrd_collector); thread_rrd_collector = NULL; } +#define rrd_collector_running(c) __atomic_load_n(&(c)->running, __ATOMIC_RELAXED) + static struct rrd_collector *rrd_collector_acquire(void) { - __atomic_add_fetch(&thread_rrd_collector->refcount, 1, __ATOMIC_SEQ_CST); + rrd_collector_started(); + + int32_t expected = __atomic_load_n(&thread_rrd_collector->refcount, __ATOMIC_RELAXED), wanted = 0; + do { + if(expected < 0 || !rrd_collector_running(thread_rrd_collector)) { + internal_fatal(true, "FUNCTIONS: Trying to acquire a collector that is exiting."); + return thread_rrd_collector; + } + + wanted = expected + 1; + + } while(!__atomic_compare_exchange_n(&thread_rrd_collector->refcount, &expected, wanted, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)); + return thread_rrd_collector; } static void rrd_collector_release(struct rrd_collector *rdc) { if(unlikely(!rdc)) return; - int32_t refcount = __atomic_sub_fetch(&rdc->refcount, 1, __ATOMIC_SEQ_CST); - if(refcount == 0 && !rdc->running) + int32_t expected = __atomic_load_n(&rdc->refcount, __ATOMIC_RELAXED), wanted = 0; + do { + if(expected < 0) { + internal_fatal(true, "FUNCTIONS: Trying to release a collector that is exiting."); + return; + } + + if(expected == 0) { + internal_fatal(true, "FUNCTIONS: Trying to release a collector that is not acquired."); + return; + } + + wanted = expected - 1; + + } while(!__atomic_compare_exchange_n(&rdc->refcount, &expected, wanted, false, __ATOMIC_RELEASE, __ATOMIC_RELAXED)); + + if(wanted == 0) rrd_collector_free(rdc); } -static void rrd_functions_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, - void *rrdhost __maybe_unused) { - struct rrd_collector_function *rdcf = func; - - if(!thread_rrd_collector) - fatal("RRDSET_COLLECTOR: called %s() for function '%s' without calling rrd_collector_started() first.", - __FUNCTION__, dictionary_acquired_item_name(item)); +static void rrd_functions_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *rrdhost) { + RRDHOST *host = rrdhost; (void)host; + struct rrd_host_function *rdcf = func; + rrd_collector_started(); rdcf->collector = rrd_collector_acquire(); + +// internal_error(true, "FUNCTIONS: adding function '%s' on host '%s', collection tid %d, %s", +// dictionary_acquired_item_name(item), rrdhost_hostname(host), +// rdcf->collector->tid, rdcf->collector->running ? "running" : "NOT running"); } -static void rrd_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, +static void rrd_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *rrdhost __maybe_unused) { - struct rrd_collector_function *rdcf = func; + struct rrd_host_function *rdcf = func; rrd_collector_release(rdcf->collector); } -static bool rrd_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, - void *new_func __maybe_unused, void *rrdhost __maybe_unused) { - struct rrd_collector_function *rdcf = func; - struct rrd_collector_function *new_rdcf = new_func; +static bool rrd_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, + void *new_func, void *rrdhost) { + RRDHOST *host = rrdhost; (void)host; + struct rrd_host_function *rdcf = func; + struct rrd_host_function *new_rdcf = new_func; - if(!thread_rrd_collector) - fatal("RRDSET_COLLECTOR: called %s() for function '%s' without calling rrd_collector_started() first.", - __FUNCTION__, dictionary_acquired_item_name(item)); + rrd_collector_started(); bool changed = false; if(rdcf->collector != thread_rrd_collector) { + netdata_log_info("FUNCTIONS: function '%s' of host '%s' changed collector from %d to %d", + dictionary_acquired_item_name(item), rrdhost_hostname(host), rdcf->collector->tid, thread_rrd_collector->tid); + struct rrd_collector *old_rdc = rdcf->collector; rdcf->collector = rrd_collector_acquire(); rrd_collector_release(old_rdc); changed = true; } - if(rdcf->function != new_rdcf->function) { - rdcf->function = new_rdcf->function; + if(rdcf->execute_cb != new_rdcf->execute_cb) { + netdata_log_info("FUNCTIONS: function '%s' of host '%s' changed execute callback", + dictionary_acquired_item_name(item), rrdhost_hostname(host)); + + rdcf->execute_cb = new_rdcf->execute_cb; changed = true; } if(rdcf->help != new_rdcf->help) { + netdata_log_info("FUNCTIONS: function '%s' of host '%s' changed help text", + dictionary_acquired_item_name(item), rrdhost_hostname(host)); + STRING *old = rdcf->help; rdcf->help = new_rdcf->help; string_freez(old); @@ -403,41 +452,53 @@ static bool rrd_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_ string_freez(new_rdcf->help); if(rdcf->timeout != new_rdcf->timeout) { + netdata_log_info("FUNCTIONS: function '%s' of host '%s' changed timeout", + dictionary_acquired_item_name(item), rrdhost_hostname(host)); + rdcf->timeout = new_rdcf->timeout; changed = true; } if(rdcf->sync != new_rdcf->sync) { + netdata_log_info("FUNCTIONS: function '%s' of host '%s' changed sync/async mode", + dictionary_acquired_item_name(item), rrdhost_hostname(host)); + rdcf->sync = new_rdcf->sync; changed = true; } - if(rdcf->collector_data != new_rdcf->collector_data) { - rdcf->collector_data = new_rdcf->collector_data; + if(rdcf->execute_cb_data != new_rdcf->execute_cb_data) { + netdata_log_info("FUNCTIONS: function '%s' of host '%s' changed execute callback data", + dictionary_acquired_item_name(item), rrdhost_hostname(host)); + + rdcf->execute_cb_data = new_rdcf->execute_cb_data; changed = true; } +// internal_error(true, "FUNCTIONS: adding function '%s' on host '%s', collection tid %d, %s", +// dictionary_acquired_item_name(item), rrdhost_hostname(host), +// rdcf->collector->tid, rdcf->collector->running ? "running" : "NOT running"); + return changed; } - -void rrdfunctions_init(RRDHOST *host) { +void rrdfunctions_host_init(RRDHOST *host) { if(host->functions) return; host->functions = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - &dictionary_stats_category_functions, sizeof(struct rrd_collector_function)); + &dictionary_stats_category_functions, sizeof(struct rrd_host_function)); dictionary_register_insert_callback(host->functions, rrd_functions_insert_callback, host); dictionary_register_delete_callback(host->functions, rrd_functions_delete_callback, host); dictionary_register_conflict_callback(host->functions, rrd_functions_conflict_callback, host); } -void rrdfunctions_destroy(RRDHOST *host) { +void rrdfunctions_host_destroy(RRDHOST *host) { dictionary_destroy(host->functions); } -void rrd_collector_add_function(RRDHOST *host, RRDSET *st, const char *name, int timeout, const char *help, - bool sync, function_execute_at_collector function, void *collector_data) { +void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, const char *help, + bool sync, rrd_function_execute_cb_t execute_cb, void *execute_cb_data) { // RRDSET *st may be NULL in this function // to create a GLOBAL function @@ -448,18 +509,20 @@ void rrd_collector_add_function(RRDHOST *host, RRDSET *st, const char *name, int char key[PLUGINSD_LINE_MAX + 1]; sanitize_function_text(key, name, PLUGINSD_LINE_MAX); - struct rrd_collector_function tmp = { + struct rrd_host_function tmp = { .sync = sync, .timeout = timeout, .options = (st)?RRD_FUNCTION_LOCAL:RRD_FUNCTION_GLOBAL, - .function = function, - .collector_data = collector_data, + .execute_cb = execute_cb, + .execute_cb_data = execute_cb_data, .help = string_strdupz(help), }; const DICTIONARY_ITEM *item = dictionary_set_and_acquire_item(host->functions, key, &tmp, sizeof(tmp)); if(st) dictionary_view_set(st->functions_view, key, item); + else + rrdhost_flag_set(host, RRDHOST_FLAG_GLOBAL_FUNCTIONS_UPDATED); dictionary_acquired_item_release(host->functions, item); } @@ -468,7 +531,7 @@ void rrd_functions_expose_rrdpush(RRDSET *st, BUFFER *wb) { if(!st->functions_view) return; - struct rrd_collector_function *tmp; + struct rrd_host_function *tmp; dfe_start_read(st->functions_view, tmp) { buffer_sprintf(wb , PLUGINSD_KEYWORD_FUNCTION " \"%s\" %d \"%s\"\n" @@ -481,7 +544,9 @@ void rrd_functions_expose_rrdpush(RRDSET *st, BUFFER *wb) { } void rrd_functions_expose_global_rrdpush(RRDHOST *host, BUFFER *wb) { - struct rrd_collector_function *tmp; + rrdhost_flag_clear(host, RRDHOST_FLAG_GLOBAL_FUNCTIONS_UPDATED); + + struct rrd_host_function *tmp; dfe_start_read(host->functions, tmp) { if(!(tmp->options & RRD_FUNCTION_GLOBAL)) continue; @@ -496,20 +561,6 @@ void rrd_functions_expose_global_rrdpush(RRDHOST *host, BUFFER *wb) { dfe_done(tmp); } -struct rrd_function_call_wait { - bool free_with_signal; - bool data_are_ready; - netdata_mutex_t mutex; - pthread_cond_t cond; - int code; -}; - -static void rrd_function_call_wait_free(struct rrd_function_call_wait *tmp) { - pthread_cond_destroy(&tmp->cond); - netdata_mutex_destroy(&tmp->mutex); - freez(tmp); -} - struct { const char *format; HTTP_CONTENT_TYPE content_type; @@ -558,41 +609,171 @@ int rrd_call_function_error(BUFFER *wb, const char *msg, int code) { return code; } -static int rrd_call_function_find(RRDHOST *host, BUFFER *wb, const char *name, size_t key_length, struct rrd_collector_function **rdcf) { +static int rrd_call_function_find(RRDHOST *host, BUFFER *wb, const char *name, size_t key_length, const DICTIONARY_ITEM **item) { char buffer[MAX_FUNCTION_LENGTH + 1]; strncpyz(buffer, name, MAX_FUNCTION_LENGTH); char *s = NULL; - *rdcf = NULL; - while(!(*rdcf) && buffer[0]) { - *rdcf = dictionary_get(host->functions, buffer); - if(*rdcf) break; + bool found = false; + *item = NULL; + if(host->functions) { + while (buffer[0]) { + if((*item = dictionary_get_and_acquire_item(host->functions, buffer))) { + found = true; + + struct rrd_host_function *rdcf = dictionary_acquired_item_value(*item); + if(rrd_collector_running(rdcf->collector)) { + break; + } + else { + dictionary_acquired_item_release(host->functions, *item); + *item = NULL; + } + } - // if s == NULL, set it to the end of the buffer - // this should happen only the first time - if(unlikely(!s)) - s = &buffer[key_length - 1]; + // if s == NULL, set it to the end of the buffer + // this should happen only the first time + if (unlikely(!s)) + s = &buffer[key_length - 1]; - // skip a word from the end - while(s >= buffer && !isspace(*s)) *s-- = '\0'; + // skip a word from the end + while (s >= buffer && !isspace(*s)) *s-- = '\0'; - // skip all spaces - while(s >= buffer && isspace(*s)) *s-- = '\0'; + // skip all spaces + while (s >= buffer && isspace(*s)) *s-- = '\0'; + } } buffer_flush(wb); - if(!(*rdcf)) - return rrd_call_function_error(wb, "No collector is supplying this function on this host at this time.", HTTP_RESP_NOT_FOUND); - - if(!(*rdcf)->collector->running) - return rrd_call_function_error(wb, "The collector that registered this function, is not currently running.", HTTP_RESP_BACKEND_FETCH_FAILED); + if(!(*item)) { + if(found) + return rrd_call_function_error(wb, + "The collector that registered this function, is not currently running.", + HTTP_RESP_SERVICE_UNAVAILABLE); + else + return rrd_call_function_error(wb, + "No collector is supplying this function on this host at this time.", + HTTP_RESP_NOT_FOUND); + } return HTTP_RESP_OK; } -static void rrd_call_function_signal_when_ready(BUFFER *temp_wb __maybe_unused, int code, void *callback_data) { +// ---------------------------------------------------------------------------- + +struct rrd_function_inflight { + bool used; + + RRDHOST *host; + const char *transaction; + const char *cmd; + const char *sanitized_cmd; + size_t sanitized_cmd_length; + int timeout; + bool cancelled; + + const DICTIONARY_ITEM *host_function_acquired; + + // the collector + // we acquire this structure at the beginning, + // and we release it at the end + struct rrd_host_function *rdcf; + + struct { + BUFFER *wb; + + // in async mode, + // the function to call to send the result back + rrd_function_result_callback_t cb; + void *data; + } result; + + struct { + // to be called in sync mode + // while the function is running + // to check if the function has been cancelled + rrd_function_is_cancelled_cb_t cb; + void *data; + } is_cancelled; + + struct { + // to be registered by the function itself + // used to signal the function to cancel + rrd_function_canceller_cb_t cb; + void *data; + } canceller; +}; + +static DICTIONARY *rrd_functions_inflight_requests = NULL; + +static void rrd_functions_inflight_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct rrd_function_inflight *r = value; + + // internal_error(true, "FUNCTIONS: transaction '%s' finished", r->transaction); + + freez((void *)r->transaction); + freez((void *)r->cmd); + freez((void *)r->sanitized_cmd); + dictionary_acquired_item_release(r->host->functions, r->host_function_acquired); +} + +void rrd_functions_inflight_init(void) { + if(rrd_functions_inflight_requests) + return; + + rrd_functions_inflight_requests = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct rrd_function_inflight)); + + dictionary_register_delete_callback(rrd_functions_inflight_requests, rrd_functions_inflight_delete_cb, NULL); +} + +void rrd_functions_inflight_destroy(void) { + if(!rrd_functions_inflight_requests) + return; + + dictionary_destroy(rrd_functions_inflight_requests); + rrd_functions_inflight_requests = NULL; +} + +static void rrd_inflight_async_function_register_canceller_cb(void *register_canceller_cb_data, rrd_function_canceller_cb_t canceller_cb, void *canceller_cb_data) { + struct rrd_function_inflight *r = register_canceller_cb_data; + r->canceller.cb = canceller_cb; + r->canceller.data = canceller_cb_data; +} + +// ---------------------------------------------------------------------------- +// waiting for async function completion + +struct rrd_function_call_wait { + RRDHOST *host; + const DICTIONARY_ITEM *host_function_acquired; + char *transaction; + + bool free_with_signal; + bool data_are_ready; + netdata_mutex_t mutex; + pthread_cond_t cond; + int code; +}; + +static void rrd_inflight_function_cleanup(RRDHOST *host __maybe_unused, + const DICTIONARY_ITEM *host_function_acquired __maybe_unused, + const char *transaction) { + dictionary_del(rrd_functions_inflight_requests, transaction); + dictionary_garbage_collect(rrd_functions_inflight_requests); +} + +static void rrd_function_call_wait_free(struct rrd_function_call_wait *tmp) { + rrd_inflight_function_cleanup(tmp->host, tmp->host_function_acquired, tmp->transaction); + freez(tmp->transaction); + + pthread_cond_destroy(&tmp->cond); + netdata_mutex_destroy(&tmp->mutex); + freez(tmp); +} + +static void rrd_async_function_signal_when_ready(BUFFER *temp_wb __maybe_unused, int code, void *callback_data) { struct rrd_function_call_wait *tmp = callback_data; bool we_should_free = false; @@ -618,140 +799,334 @@ static void rrd_call_function_signal_when_ready(BUFFER *temp_wb __maybe_unused, } } -int rrd_call_function_and_wait(RRDHOST *host, BUFFER *wb, int timeout, const char *name) { - int code; +static void rrd_inflight_async_function_nowait_finished(BUFFER *wb, int code, void *data) { + struct rrd_function_inflight *r = data; - struct rrd_collector_function *rdcf = NULL; + if(r->result.cb) + r->result.cb(wb, code, r->result.data); - char key[PLUGINSD_LINE_MAX + 1]; - size_t key_length = sanitize_function_text(key, name, PLUGINSD_LINE_MAX); - code = rrd_call_function_find(host, wb, key, key_length, &rdcf); - if(code != HTTP_RESP_OK) - return code; + rrd_inflight_function_cleanup(r->host, r->host_function_acquired, r->transaction); +} - if(timeout <= 0) - timeout = rdcf->timeout; +static bool rrd_inflight_async_function_is_cancelled(void *data) { + struct rrd_function_inflight *r = data; + return __atomic_load_n(&r->cancelled, __ATOMIC_RELAXED); +} - struct timespec tp; - clock_gettime(CLOCK_REALTIME, &tp); - tp.tv_sec += (time_t)timeout; +static inline int rrd_call_function_async_and_dont_wait(struct rrd_function_inflight *r) { + int code = r->rdcf->execute_cb(r->result.wb, r->timeout, r->sanitized_cmd, r->rdcf->execute_cb_data, + rrd_inflight_async_function_nowait_finished, r, + rrd_inflight_async_function_is_cancelled, r, + rrd_inflight_async_function_register_canceller_cb, r); - if(rdcf->sync) { - code = rdcf->function(wb, timeout, key, rdcf->collector_data, NULL, NULL); + if(code != HTTP_RESP_OK) { + if (!buffer_strlen(r->result.wb)) + rrd_call_function_error(r->result.wb, "Failed to send request to the collector.", code); + + rrd_inflight_function_cleanup(r->host, r->host_function_acquired, r->transaction); } - else { - struct rrd_function_call_wait *tmp = mallocz(sizeof(struct rrd_function_call_wait)); - tmp->free_with_signal = false; - tmp->data_are_ready = false; - netdata_mutex_init(&tmp->mutex); - pthread_cond_init(&tmp->cond, NULL); - - bool we_should_free = true; - BUFFER *temp_wb = buffer_create(PLUGINSD_LINE_MAX + 1, &netdata_buffers_statistics.buffers_functions); // we need it because we may give up on it - temp_wb->content_type = wb->content_type; - code = rdcf->function(temp_wb, timeout, key, rdcf->collector_data, rrd_call_function_signal_when_ready, tmp); - if (code == HTTP_RESP_OK) { - netdata_mutex_lock(&tmp->mutex); - - int rc = 0; - while (rc == 0 && !tmp->data_are_ready) { - // the mutex is unlocked within pthread_cond_timedwait() - rc = pthread_cond_timedwait(&tmp->cond, &tmp->mutex, &tp); - // the mutex is again ours - } - if (tmp->data_are_ready) { - // we have a response - buffer_fast_strcat(wb, buffer_tostring(temp_wb), buffer_strlen(temp_wb)); - wb->content_type = temp_wb->content_type; - wb->expires = temp_wb->expires; + return code; +} - if(wb->expires) - buffer_cacheable(wb); - else - buffer_no_cacheable(wb); +static int rrd_call_function_async_and_wait(struct rrd_function_inflight *r) { + struct timespec tp; + clock_gettime(CLOCK_REALTIME, &tp); + usec_t now_ut = tp.tv_sec * USEC_PER_SEC + tp.tv_nsec / NSEC_PER_USEC; + usec_t end_ut = now_ut + r->timeout * USEC_PER_SEC + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT; + + struct rrd_function_call_wait *tmp = mallocz(sizeof(struct rrd_function_call_wait)); + tmp->free_with_signal = false; + tmp->data_are_ready = false; + tmp->host = r->host; + tmp->host_function_acquired = r->host_function_acquired; + tmp->transaction = strdupz(r->transaction); + netdata_mutex_init(&tmp->mutex); + pthread_cond_init(&tmp->cond, NULL); + + // we need a temporary BUFFER, because we may time out and the caller supplied one may vanish + // so, we create a new one we guarantee will survive until the collector finishes... + + bool we_should_free = true; + BUFFER *temp_wb = buffer_create(PLUGINSD_LINE_MAX + 1, &netdata_buffers_statistics.buffers_functions); // we need it because we may give up on it + temp_wb->content_type = r->result.wb->content_type; + + int code = r->rdcf->execute_cb(temp_wb, r->timeout, r->sanitized_cmd, r->rdcf->execute_cb_data, + // we overwrite the result callbacks, + // so that we can clean up the allocations made + rrd_async_function_signal_when_ready, tmp, + rrd_inflight_async_function_is_cancelled, r, + rrd_inflight_async_function_register_canceller_cb, r); + + if (code == HTTP_RESP_OK) { + netdata_mutex_lock(&tmp->mutex); + + bool cancelled = false; + int rc = 0; + while (rc == 0 && !cancelled && !tmp->data_are_ready) { + clock_gettime(CLOCK_REALTIME, &tp); + now_ut = tp.tv_sec * USEC_PER_SEC + tp.tv_nsec / NSEC_PER_USEC; + + if(now_ut >= end_ut) { + rc = ETIMEDOUT; + break; + } - code = tmp->code; + tp.tv_nsec += 10 * NSEC_PER_MSEC; + if(tp.tv_nsec > (long)(1 * NSEC_PER_SEC)) { + tp.tv_sec++; + tp.tv_nsec -= 1 * NSEC_PER_SEC; } - else if (rc == ETIMEDOUT) { - // timeout - // we will go away and let the callback free the structure - tmp->free_with_signal = true; - we_should_free = false; - code = rrd_call_function_error(wb, "Timeout while waiting for a response from the collector.", HTTP_RESP_GATEWAY_TIMEOUT); + + // the mutex is unlocked within pthread_cond_timedwait() + rc = pthread_cond_timedwait(&tmp->cond, &tmp->mutex, &tp); + // the mutex is again ours + + if(rc == ETIMEDOUT) { + rc = 0; + if (!tmp->data_are_ready && r->is_cancelled.cb && + r->is_cancelled.cb(r->is_cancelled.data)) { +// internal_error(true, "FUNCTIONS: transaction '%s' is cancelled while waiting for response", +// r->transaction); + rc = 0; + cancelled = true; + rrd_function_cancel(r->transaction); + break; + } } + } + + if (tmp->data_are_ready) { + // we have a response + buffer_fast_strcat(r->result.wb, buffer_tostring(temp_wb), buffer_strlen(temp_wb)); + r->result.wb->content_type = temp_wb->content_type; + r->result.wb->expires = temp_wb->expires; + + if(r->result.wb->expires) + buffer_cacheable(r->result.wb); else - code = rrd_call_function_error(wb, "Failed to get the response from the collector.", HTTP_RESP_INTERNAL_SERVER_ERROR); + buffer_no_cacheable(r->result.wb); - netdata_mutex_unlock(&tmp->mutex); + code = tmp->code; } - else { - if(!buffer_strlen(wb)) - rrd_call_function_error(wb, "Failed to send request to the collector.", code); + else if (rc == ETIMEDOUT || cancelled) { + // timeout + // we will go away and let the callback free the structure + tmp->free_with_signal = true; + we_should_free = false; + + if(cancelled) + code = rrd_call_function_error(r->result.wb, + "Request cancelled", + HTTP_RESP_CLIENT_CLOSED_REQUEST); + else + code = rrd_call_function_error(r->result.wb, + "Timeout while waiting for a response from the collector.", + HTTP_RESP_GATEWAY_TIMEOUT); } + else + code = rrd_call_function_error(r->result.wb, + "Internal error while communicating with the collector", + HTTP_RESP_INTERNAL_SERVER_ERROR); - if (we_should_free) { - rrd_function_call_wait_free(tmp); - buffer_free(temp_wb); - } + netdata_mutex_unlock(&tmp->mutex); + } + else { + if(!buffer_strlen(r->result.wb)) + rrd_call_function_error(r->result.wb, "The collector returned an error.", code); + } + + if (we_should_free) { + rrd_function_call_wait_free(tmp); + buffer_free(temp_wb); } return code; } -int rrd_call_function_async(RRDHOST *host, BUFFER *wb, int timeout, const char *name, - rrd_call_function_async_callback callback, void *callback_data) { +static inline int rrd_call_function_async(struct rrd_function_inflight *r, bool wait) { + if(wait) + return rrd_call_function_async_and_wait(r); + else + return rrd_call_function_async_and_dont_wait(r); +} + + +void call_virtual_function_async(BUFFER *wb, RRDHOST *host, const char *name, const char *payload, rrd_function_result_callback_t callback, void *callback_data); +// ---------------------------------------------------------------------------- + +int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout, const char *cmd, + bool wait, const char *transaction, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, const char *payload) { + int code; + char sanitized_cmd[PLUGINSD_LINE_MAX + 1]; + const DICTIONARY_ITEM *host_function_acquired = NULL; - struct rrd_collector_function *rdcf = NULL; - char key[PLUGINSD_LINE_MAX + 1]; - size_t key_length = sanitize_function_text(key, name, PLUGINSD_LINE_MAX); - code = rrd_call_function_find(host, wb, key, key_length, &rdcf); + // ------------------------------------------------------------------------ + // find the function + + size_t sanitized_cmd_length = sanitize_function_text(sanitized_cmd, cmd, PLUGINSD_LINE_MAX); + + if (is_dyncfg_function(sanitized_cmd, DYNCFG_FUNCTION_TYPE_ALL)) { + call_virtual_function_async(result_wb, host, sanitized_cmd, payload, result_cb, result_cb_data); + return HTTP_RESP_OK; + } + + code = rrd_call_function_find(host, result_wb, sanitized_cmd, sanitized_cmd_length, &host_function_acquired); if(code != HTTP_RESP_OK) return code; + struct rrd_host_function *rdcf = dictionary_acquired_item_value(host_function_acquired); + if(timeout <= 0) timeout = rdcf->timeout; - code = rdcf->function(wb, timeout, key, rdcf->collector_data, callback, callback_data); - if(code != HTTP_RESP_OK) { - if (!buffer_strlen(wb)) - rrd_call_function_error(wb, "Failed to send request to the collector.", code); + // ------------------------------------------------------------------------ + // the function can only be executed in sync mode + + if(rdcf->sync) { + // the caller has to wait + + code = rdcf->execute_cb(result_wb, timeout, sanitized_cmd, rdcf->execute_cb_data, + result_cb, result_cb_data, + is_cancelled_cb, is_cancelled_cb_data, // it is ok to pass these, we block the caller + NULL, NULL); // no need to pass, we will wait + + if (code != HTTP_RESP_OK && !buffer_strlen(result_wb)) + rrd_call_function_error(result_wb, "Collector reported error.", code); + + dictionary_acquired_item_release(host->functions, host_function_acquired); + return code; } - return code; + + // ------------------------------------------------------------------------ + // the function can only be executed in async mode + // put the function into the inflight requests + + char uuid_str[UUID_COMPACT_STR_LEN]; + if(!transaction) { + uuid_t uuid; + uuid_generate_random(uuid); + uuid_unparse_lower_compact(uuid, uuid_str); + transaction = uuid_str; + } + + // put the request into the inflight requests + struct rrd_function_inflight t = { + .used = false, + .host = host, + .cmd = strdupz(cmd), + .sanitized_cmd = strdupz(sanitized_cmd), + .sanitized_cmd_length = sanitized_cmd_length, + .transaction = strdupz(transaction), + .timeout = timeout, + .cancelled = false, + .host_function_acquired = host_function_acquired, + .rdcf = rdcf, + .result = { + .wb = result_wb, + .cb = result_cb, + .data = result_cb_data, + }, + .is_cancelled = { + .cb = is_cancelled_cb, + .data = is_cancelled_cb_data, + } + }; + struct rrd_function_inflight *r = dictionary_set(rrd_functions_inflight_requests, transaction, &t, sizeof(t)); + if(r->used) { + netdata_log_info("FUNCTIONS: duplicate transaction '%s', function: '%s'", t.transaction, t.cmd); + code = rrd_call_function_error(result_wb, "duplicate transaction", HTTP_RESP_BAD_REQUEST); + freez((void *)t.transaction); + freez((void *)t.cmd); + freez((void *)t.sanitized_cmd); + dictionary_acquired_item_release(r->host->functions, t.host_function_acquired); + return code; + } + r->used = true; + // internal_error(true, "FUNCTIONS: transaction '%s' started", r->transaction); + + return rrd_call_function_async(r, wait); } -static void functions2json(DICTIONARY *functions, BUFFER *wb, const char *ident, const char *kq, const char *sq) { - struct rrd_collector_function *t; - dfe_start_read(functions, t) { - if(!t->collector->running) continue; +void rrd_function_cancel(const char *transaction) { + // internal_error(true, "FUNCTIONS: request to cancel transaction '%s'", transaction); + + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(rrd_functions_inflight_requests, transaction); + if(!item) { + netdata_log_info("FUNCTIONS: received a cancel request for transaction '%s', but the transaction is not running.", + transaction); + return; + } - if(t_dfe.counter) - buffer_strcat(wb, ",\n"); + struct rrd_function_inflight *r = dictionary_acquired_item_value(item); - buffer_sprintf(wb, "%s%s%s%s: {", ident, kq, t_dfe.name, kq); - buffer_sprintf(wb, "\n\t%s%shelp%s: %s%s%s", ident, kq, kq, sq, string2str(t->help), sq); - buffer_sprintf(wb, ",\n\t%s%stimeout%s: %d", ident, kq, kq, t->timeout); - buffer_sprintf(wb, ",\n\t%s%soptions%s: \"%s%s\"", ident, kq, kq - , (t->options & RRD_FUNCTION_LOCAL)?"LOCAL ":"" - , (t->options & RRD_FUNCTION_GLOBAL)?"GLOBAL ":"" - ); - buffer_sprintf(wb, "\n%s}", ident); + bool cancelled = __atomic_load_n(&r->cancelled, __ATOMIC_RELAXED); + if(cancelled) { + netdata_log_info("FUNCTIONS: received a cancel request for transaction '%s', but it is already cancelled.", + transaction); + goto cleanup; + } + + __atomic_store_n(&r->cancelled, true, __ATOMIC_RELAXED); + + int32_t expected = __atomic_load_n(&r->rdcf->collector->refcount_canceller, __ATOMIC_RELAXED); + int32_t wanted; + do { + if(expected < 0) { + netdata_log_info("FUNCTIONS: received a cancel request for transaction '%s', but the collector is not running.", + transaction); + goto cleanup; + } + + wanted = expected + 1; + } while(!__atomic_compare_exchange_n(&r->rdcf->collector->refcount_canceller, &expected, wanted, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); + + if(r->canceller.cb) + r->canceller.cb(r->canceller.data); + + __atomic_sub_fetch(&r->rdcf->collector->refcount_canceller, 1, __ATOMIC_RELAXED); + +cleanup: + dictionary_acquired_item_release(rrd_functions_inflight_requests, item); +} + +// ---------------------------------------------------------------------------- + +static void functions2json(DICTIONARY *functions, BUFFER *wb) +{ + struct rrd_host_function *t; + dfe_start_read(functions, t) + { + if (!rrd_collector_running(t->collector)) + continue; + + buffer_json_member_add_object(wb, t_dfe.name); + buffer_json_member_add_string_or_empty(wb, "help", string2str(t->help)); + buffer_json_member_add_int64(wb, "timeout", (int64_t)t->timeout); + + char options[65]; + snprintfz( + options, + 64, + "%s%s", + (t->options & RRD_FUNCTION_LOCAL) ? "LOCAL " : "", + (t->options & RRD_FUNCTION_GLOBAL) ? "GLOBAL" : ""); + + buffer_json_member_add_string_or_empty(wb, "options", options); + buffer_json_object_close(wb); } dfe_done(t); - buffer_strcat(wb, "\n"); } -void chart_functions2json(RRDSET *st, BUFFER *wb, int tabs, const char *kq, const char *sq) { +void chart_functions2json(RRDSET *st, BUFFER *wb) { if(!st || !st->functions_view) return; - char ident[tabs + 1]; - ident[tabs] = '\0'; - while(tabs) ident[--tabs] = '\t'; - - functions2json(st->functions_view, wb, ident, kq, sq); + functions2json(st->functions_view, wb); } void host_functions2json(RRDHOST *host, BUFFER *wb) { @@ -759,9 +1134,9 @@ void host_functions2json(RRDHOST *host, BUFFER *wb) { buffer_json_member_add_object(wb, "functions"); - struct rrd_collector_function *t; + struct rrd_host_function *t; dfe_start_read(host->functions, t) { - if(!t->collector->running) continue; + if(!rrd_collector_running(t->collector)) continue; buffer_json_member_add_object(wb, t_dfe.name); buffer_json_member_add_string(wb, "help", string2str(t->help)); @@ -782,9 +1157,9 @@ void host_functions2json(RRDHOST *host, BUFFER *wb) { void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, void *value, size_t value_size) { if(!rrdset_functions_view || !dst) return; - struct rrd_collector_function *t; + struct rrd_host_function *t; dfe_start_read(rrdset_functions_view, t) { - if(!t->collector->running) continue; + if(!rrd_collector_running(t->collector)) continue; dictionary_set(dst, t_dfe.name, value, value_size); } @@ -794,9 +1169,9 @@ void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t value_size, STRING **help) { if(!host || !host->functions || !dictionary_entries(host->functions) || !dst) return; - struct rrd_collector_function *t; + struct rrd_host_function *t; dfe_start_read(host->functions, t) { - if(!t->collector->running) continue; + if(!rrd_collector_running(t->collector)) continue; if(help) *help = t->help; @@ -806,10 +1181,15 @@ void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t dfe_done(t); } +// ---------------------------------------------------------------------------- int rrdhost_function_streaming(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused, void *collector_data __maybe_unused, - function_data_ready_callback callback __maybe_unused, void *callback_data __maybe_unused) { + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused, + void *register_canceller_cb_data __maybe_unused) { + time_t now = now_realtime_sec(); buffer_flush(wb); @@ -993,19 +1373,19 @@ int rrdhost_function_streaming(BUFFER *wb, int timeout __maybe_unused, const cha RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "dbTo", "DB Data Retention To", RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "dbDuration", "DB Data Retention Duration", RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); + RRDF_FIELD_OPTS_VISIBLE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "dbMetrics", "Time-series Metrics in the DB", RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, @@ -1428,8 +1808,14 @@ int rrdhost_function_streaming(BUFFER *wb, int timeout __maybe_unused, const cha buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); buffer_json_finalize(wb); - if(callback) - callback(wb, HTTP_RESP_OK, callback_data); + int response = HTTP_RESP_OK; + if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) { + buffer_flush(wb); + response = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } - return HTTP_RESP_OK; + if(result_cb) + result_cb(wb, response, result_cb_data); + + return response; } diff --git a/database/rrdfunctions.h b/database/rrdfunctions.h index 71ad96507fbabf..21ca5c73463380 100644 --- a/database/rrdfunctions.h +++ b/database/rrdfunctions.h @@ -1,31 +1,46 @@ +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef NETDATA_RRDFUNCTIONS_H #define NETDATA_RRDFUNCTIONS_H 1 +// ---------------------------------------------------------------------------- + #include "rrd.h" -void rrdfunctions_init(RRDHOST *host); -void rrdfunctions_destroy(RRDHOST *host); +#define RRDFUNCTIONS_TIMEOUT_EXTENSION_UT (1 * USEC_PER_SEC) -void rrd_collector_started(void); -void rrd_collector_finished(void); +typedef void (*rrd_function_result_callback_t)(BUFFER *wb, int code, void *result_cb_data); +typedef bool (*rrd_function_is_cancelled_cb_t)(void *is_cancelled_cb_data); +typedef void (*rrd_function_canceller_cb_t)(void *data); +typedef void (*rrd_function_register_canceller_cb_t)(void *register_cancel_cb_data, rrd_function_canceller_cb_t cancel_cb, void *cancel_cb_data); +typedef int (*rrd_function_execute_cb_t)(BUFFER *wb, int timeout, const char *function, void *collector_data, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_cancel_cb, void *register_cancel_db_data); -typedef void (*function_data_ready_callback)(BUFFER *wb, int code, void *callback_data); +void rrd_functions_inflight_init(void); +void rrdfunctions_host_init(RRDHOST *host); +void rrdfunctions_host_destroy(RRDHOST *host); -typedef int (*function_execute_at_collector)(BUFFER *wb, int timeout, const char *function, void *collector_data, - function_data_ready_callback callback, void *callback_data); +void rrd_collector_started(void); +void rrd_collector_finished(void); -void rrd_collector_add_function(RRDHOST *host, RRDSET *st, const char *name, int timeout, const char *help, - bool sync, function_execute_at_collector function, void *collector_data); +// add a function, to be run from the collector +void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, const char *help, + bool sync, rrd_function_execute_cb_t execute_cb, void *execute_cb_data); -int rrd_call_function_and_wait(RRDHOST *host, BUFFER *wb, int timeout, const char *name); +// call a function, to be run from anywhere +int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout, const char *cmd, + bool wait, const char *transaction, + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, const char *payload); -typedef void (*rrd_call_function_async_callback)(BUFFER *wb, int code, void *callback_data); -int rrd_call_function_async(RRDHOST *host, BUFFER *wb, int timeout, const char *name, rrd_call_function_async_callback, void *callback_data); +// cancel a running function, to be run from anywhere +void rrd_function_cancel(const char *transaction); void rrd_functions_expose_rrdpush(RRDSET *st, BUFFER *wb); void rrd_functions_expose_global_rrdpush(RRDHOST *host, BUFFER *wb); -void chart_functions2json(RRDSET *st, BUFFER *wb, int tabs, const char *kq, const char *sq); +void chart_functions2json(RRDSET *st, BUFFER *wb); void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, void *value, size_t value_size); void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t value_size, STRING **help); void host_functions2json(RRDHOST *host, BUFFER *wb); @@ -35,7 +50,9 @@ const char *functions_content_type_to_format(HTTP_CONTENT_TYPE content_type); int rrd_call_function_error(BUFFER *wb, const char *msg, int code); int rrdhost_function_streaming(BUFFER *wb, int timeout, const char *function, void *collector_data, - function_data_ready_callback callback, void *callback_data); + rrd_function_result_callback_t result_cb, void *result_cb_data, + rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, + rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data); #define RRDFUNCTIONS_STREAMING_HELP "Streaming status for parents and children." diff --git a/database/rrdhost.c b/database/rrdhost.c index bec821ccc04581..a3c2721536b941 100644 --- a/database/rrdhost.c +++ b/database/rrdhost.c @@ -31,6 +31,7 @@ netdata_rwlock_t rrd_rwlock = NETDATA_RWLOCK_INITIALIZER; time_t rrdset_free_obsolete_time_s = 3600; time_t rrdhost_free_orphan_time_s = 3600; +time_t rrdhost_free_ephemeral_time_s = 86400; bool is_storage_engine_shared(STORAGE_INSTANCE *engine __maybe_unused) { #ifdef ENABLE_DBENGINE @@ -80,8 +81,6 @@ static inline void rrdhost_init() { } RRDHOST_ACQUIRED *rrdhost_find_and_acquire(const char *machine_guid) { - netdata_log_debug(D_RRD_CALLS, "rrdhost_find_and_acquire() host %s", machine_guid); - return (RRDHOST_ACQUIRED *)dictionary_get_and_acquire_item(rrdhost_root_index, machine_guid); } @@ -116,8 +115,9 @@ static inline RRDHOST *rrdhost_index_add_by_guid(RRDHOST *host) { rrdhost_option_set(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID); else { rrdhost_option_clear(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID); - netdata_log_error("RRDHOST: %s() host with machine guid '%s' is already indexed", - __FUNCTION__, host->machine_guid); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "RRDHOST: host with machine guid '%s' is already indexed. Not adding it again.", + host->machine_guid); } return host; @@ -126,8 +126,9 @@ static inline RRDHOST *rrdhost_index_add_by_guid(RRDHOST *host) { static void rrdhost_index_del_by_guid(RRDHOST *host) { if(rrdhost_option_check(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID)) { if(!dictionary_del(rrdhost_root_index, host->machine_guid)) - netdata_log_error("RRDHOST: %s() failed to delete machine guid '%s' from index", - __FUNCTION__, host->machine_guid); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "RRDHOST: failed to delete machine guid '%s' from index", + host->machine_guid); rrdhost_option_clear(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID); } @@ -148,8 +149,9 @@ static inline void rrdhost_index_del_hostname(RRDHOST *host) { if(rrdhost_option_check(host, RRDHOST_OPTION_INDEXED_HOSTNAME)) { if(!dictionary_del(rrdhost_root_index_hostname, rrdhost_hostname(host))) - netdata_log_error("RRDHOST: %s() failed to delete hostname '%s' from index", - __FUNCTION__, rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "RRDHOST: failed to delete hostname '%s' from index", + rrdhost_hostname(host)); rrdhost_option_clear(host, RRDHOST_OPTION_INDEXED_HOSTNAME); } @@ -303,11 +305,11 @@ static RRDHOST *rrdhost_create( int is_localhost, bool archived ) { - netdata_log_debug(D_RRDHOST, "Host '%s': adding with guid '%s'", hostname, guid); - if(memory_mode == RRD_MEMORY_MODE_DBENGINE && !dbengine_enabled) { - netdata_log_error("memory mode 'dbengine' is not enabled, but host '%s' is configured for it. Falling back to 'alloc'", - hostname); + nd_log(NDLS_DAEMON, NDLP_ERR, + "memory mode 'dbengine' is not enabled, but host '%s' is configured for it. Falling back to 'alloc'", + hostname); + memory_mode = RRD_MEMORY_MODE_ALLOC; } @@ -331,8 +333,12 @@ int is_legacy = 1; host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries); host->health.health_enabled = ((memory_mode == RRD_MEMORY_MODE_NONE)) ? 0 : health_enabled; + netdata_mutex_init(&host->aclk_state_lock); + netdata_mutex_init(&host->receiver_lock); + if (likely(!archived)) { - rrdfunctions_init(host); + rrdfunctions_host_init(host); + host->last_connected = now_realtime_sec(); host->rrdlabels = rrdlabels_create(); rrdhost_initialize_rrdpush_sender( host, rrdpush_enabled, rrdpush_destination, rrdpush_api_key, rrdpush_send_charts_matching); @@ -361,9 +367,6 @@ int is_legacy = 1; break; } - netdata_mutex_init(&host->aclk_state_lock); - netdata_mutex_init(&host->receiver_lock); - host->system_info = system_info; rrdset_index_init(host); @@ -391,7 +394,9 @@ int is_legacy = 1; (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && is_legacy))) { int r = mkdir(host->cache_dir, 0775); if(r != 0 && errno != EEXIST) - netdata_log_error("Host '%s': cannot create directory '%s'", rrdhost_hostname(host), host->cache_dir); + nd_log(NDLS_DAEMON, NDLP_CRIT, + "Host '%s': cannot create directory '%s'", + rrdhost_hostname(host), host->cache_dir); } } @@ -417,7 +422,9 @@ int is_legacy = 1; ret = mkdir(dbenginepath, 0775); if (ret != 0 && errno != EEXIST) - netdata_log_error("Host '%s': cannot create directory '%s'", rrdhost_hostname(host), dbenginepath); + nd_log(NDLS_DAEMON, NDLP_CRIT, + "Host '%s': cannot create directory '%s'", + rrdhost_hostname(host), dbenginepath); else ret = 0; // succeed @@ -458,8 +465,9 @@ int is_legacy = 1; } if (ret) { // check legacy or multihost initialization success - netdata_log_error("Host '%s': cannot initialize host with machine guid '%s'. Failed to initialize DB engine at '%s'.", - rrdhost_hostname(host), host->machine_guid, host->cache_dir); + nd_log(NDLS_DAEMON, NDLP_CRIT, + "Host '%s': cannot initialize host with machine guid '%s'. Failed to initialize DB engine at '%s'.", + rrdhost_hostname(host), host->machine_guid, host->cache_dir); rrd_wrlock(); rrdhost_free___while_having_rrd_wrlock(host, true); @@ -507,10 +515,13 @@ int is_legacy = 1; RRDHOST *t = rrdhost_index_add_by_guid(host); if(t != host) { - netdata_log_error("Host '%s': cannot add host with machine guid '%s' to index. It already exists as host '%s' with machine guid '%s'.", - rrdhost_hostname(host), host->machine_guid, rrdhost_hostname(t), t->machine_guid); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "Host '%s': cannot add host with machine guid '%s' to index. It already exists as host '%s' with machine guid '%s'.", + rrdhost_hostname(host), host->machine_guid, rrdhost_hostname(t), t->machine_guid); + if (!is_localhost) rrdhost_free___while_having_rrd_wrlock(host, true); + rrd_unlock(); return NULL; } @@ -526,21 +537,22 @@ int is_legacy = 1; // ------------------------------------------------------------------------ - netdata_log_info("Host '%s' (at registry as '%s') with guid '%s' initialized" - ", os '%s'" - ", timezone '%s'" - ", tags '%s'" - ", program_name '%s'" - ", program_version '%s'" - ", update every %d" - ", memory mode %s" - ", history entries %d" - ", streaming %s" - " (to '%s' with api key '%s')" - ", health %s" - ", cache_dir '%s'" - ", alarms default handler '%s'" - ", alarms default recipient '%s'" + nd_log(NDLS_DAEMON, NDLP_INFO, + "Host '%s' (at registry as '%s') with guid '%s' initialized" + ", os '%s'" + ", timezone '%s'" + ", tags '%s'" + ", program_name '%s'" + ", program_version '%s'" + ", update every %d" + ", memory mode %s" + ", history entries %d" + ", streaming %s" + " (to '%s' with api key '%s')" + ", health %s" + ", cache_dir '%s'" + ", alarms default handler '%s'" + ", alarms default recipient '%s'" , rrdhost_hostname(host) , rrdhost_registry_hostname(host) , host->machine_guid @@ -561,6 +573,9 @@ int is_legacy = 1; , string2str(host->health.health_default_recipient) ); + host->configurable_plugins = dyncfg_dictionary_create(); + dictionary_register_delete_callback(host->configurable_plugins, plugin_del_cb, NULL); + if(!archived) { metaqueue_host_update_info(host); rrdhost_load_rrdcontext_data(host); @@ -617,44 +632,56 @@ static void rrdhost_update(RRDHOST *host host->registry_hostname = string_strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname); if(strcmp(rrdhost_hostname(host), hostname) != 0) { - netdata_log_info("Host '%s' has been renamed to '%s'. If this is not intentional it may mean multiple hosts are using the same machine_guid.", rrdhost_hostname(host), hostname); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "Host '%s' has been renamed to '%s'. If this is not intentional it may mean multiple hosts are using the same machine_guid.", + rrdhost_hostname(host), hostname); + rrdhost_init_hostname(host, hostname, true); } else { rrdhost_index_add_hostname(host); } if(strcmp(rrdhost_program_name(host), program_name) != 0) { - netdata_log_info("Host '%s' switched program name from '%s' to '%s'", rrdhost_hostname(host), rrdhost_program_name(host), program_name); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "Host '%s' switched program name from '%s' to '%s'", + rrdhost_hostname(host), rrdhost_program_name(host), program_name); + STRING *t = host->program_name; host->program_name = string_strdupz(program_name); string_freez(t); } if(strcmp(rrdhost_program_version(host), program_version) != 0) { - netdata_log_info("Host '%s' switched program version from '%s' to '%s'", rrdhost_hostname(host), rrdhost_program_version(host), program_version); + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "Host '%s' switched program version from '%s' to '%s'", + rrdhost_hostname(host), rrdhost_program_version(host), program_version); + STRING *t = host->program_version; host->program_version = string_strdupz(program_version); string_freez(t); } if(host->rrd_update_every != update_every) - netdata_log_error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. " - "Restart netdata here to apply the new settings.", - rrdhost_hostname(host), host->rrd_update_every, update_every); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. " + "Restart netdata here to apply the new settings.", + rrdhost_hostname(host), host->rrd_update_every, update_every); if(host->rrd_memory_mode != mode) - netdata_log_error("Host '%s' has memory mode '%s', but the wanted one is '%s'. " - "Restart netdata here to apply the new settings.", - rrdhost_hostname(host), - rrd_memory_mode_name(host->rrd_memory_mode), - rrd_memory_mode_name(mode)); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "Host '%s' has memory mode '%s', but the wanted one is '%s'. " + "Restart netdata here to apply the new settings.", + rrdhost_hostname(host), + rrd_memory_mode_name(host->rrd_memory_mode), + rrd_memory_mode_name(mode)); else if(host->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE && host->rrd_history_entries < history) - netdata_log_error("Host '%s' has history of %d entries, but the wanted one is %ld entries. " - "Restart netdata here to apply the new settings.", - rrdhost_hostname(host), - host->rrd_history_entries, - history); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "Host '%s' has history of %d entries, but the wanted one is %ld entries. " + "Restart netdata here to apply the new settings.", + rrdhost_hostname(host), + host->rrd_history_entries, + history); // update host tags rrdhost_init_tags(host, tags); @@ -662,10 +689,12 @@ static void rrdhost_update(RRDHOST *host if(!host->rrdvars) host->rrdvars = rrdvariables_create(); + host->last_connected = now_realtime_sec(); + if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED)) { rrdhost_flag_clear(host, RRDHOST_FLAG_ARCHIVED); - rrdfunctions_init(host); + rrdfunctions_host_init(host); if(!host->rrdlabels) host->rrdlabels = rrdlabels_create(); @@ -694,7 +723,9 @@ static void rrdhost_update(RRDHOST *host ml_host_new(host); rrdhost_load_rrdcontext_data(host); - netdata_log_info("Host %s is not in archived mode anymore", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Host %s is not in archived mode anymore", + rrdhost_hostname(host)); } spinlock_unlock(&host->rrdhost_update_lock); @@ -725,8 +756,6 @@ RRDHOST *rrdhost_find_or_create( , struct rrdhost_system_info *system_info , bool archived ) { - netdata_log_debug(D_RRDHOST, "Searching for host '%s' with guid '%s'", hostname, guid); - RRDHOST *host = rrdhost_find_by_guid(guid); if (unlikely(host && host->rrd_memory_mode != mode && rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))) { @@ -734,10 +763,11 @@ RRDHOST *rrdhost_find_or_create( return host; /* If a legacy memory mode instantiates all dbengine state must be discarded to avoid inconsistencies */ - netdata_log_error("Archived host '%s' has memory mode '%s', but the wanted one is '%s'. Discarding archived state.", - rrdhost_hostname(host), - rrd_memory_mode_name(host->rrd_memory_mode), - rrd_memory_mode_name(mode)); + nd_log(NDLS_DAEMON, NDLP_INFO, + "Archived host '%s' has memory mode '%s', but the wanted one is '%s'. Discarding archived state.", + rrdhost_hostname(host), + rrd_memory_mode_name(host->rrd_memory_mode), + rrd_memory_mode_name(mode)); rrd_wrlock(); rrdhost_free___while_having_rrd_wrlock(host, true); @@ -809,7 +839,7 @@ inline int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected_host, tim && rrdhost_receiver_replicating_charts(host) == 0 && rrdhost_sender_replicating_charts(host) == 0 && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN) - && !rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED) + && !rrdhost_flag_check(host, RRDHOST_FLAG_PENDING_CONTEXT_LOAD) && !host->receiver && host->child_disconnected_time && host->child_disconnected_time + rrdhost_free_orphan_time_s < now_s) @@ -845,18 +875,26 @@ void dbengine_init(char *hostname) { if (read_num > 0 && read_num <= MAX_PAGES_PER_EXTENT) rrdeng_pages_per_extent = read_num; else { - netdata_log_error("Invalid dbengine pages per extent %u given. Using %u.", read_num, rrdeng_pages_per_extent); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "Invalid dbengine pages per extent %u given. Using %u.", + read_num, rrdeng_pages_per_extent); + config_set_number(CONFIG_SECTION_DB, "dbengine pages per extent", rrdeng_pages_per_extent); } storage_tiers = config_get_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); if(storage_tiers < 1) { - netdata_log_error("At least 1 storage tier is required. Assuming 1."); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "At least 1 storage tier is required. Assuming 1."); + storage_tiers = 1; config_set_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); } if(storage_tiers > RRD_STORAGE_TIERS) { - netdata_log_error("Up to %d storage tier are supported. Assuming %d.", RRD_STORAGE_TIERS, RRD_STORAGE_TIERS); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "Up to %d storage tier are supported. Assuming %d.", + RRD_STORAGE_TIERS, RRD_STORAGE_TIERS); + storage_tiers = RRD_STORAGE_TIERS; config_set_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); } @@ -878,7 +916,9 @@ void dbengine_init(char *hostname) { int ret = mkdir(dbenginepath, 0775); if (ret != 0 && errno != EEXIST) { - netdata_log_error("DBENGINE on '%s': cannot create directory '%s'", hostname, dbenginepath); + nd_log(NDLS_DAEMON, NDLP_CRIT, + "DBENGINE on '%s': cannot create directory '%s'", + hostname, dbenginepath); break; } @@ -890,26 +930,29 @@ void dbengine_init(char *hostname) { RRD_BACKFILL backfill = storage_tiers_backfill[tier]; if(tier > 0) { - snprintfz(dbengineconfig, 200, "dbengine tier %zu multihost disk space MB", tier); + snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu multihost disk space MB", tier); disk_space_mb = config_get_number(CONFIG_SECTION_DB, dbengineconfig, disk_space_mb); - snprintfz(dbengineconfig, 200, "dbengine tier %zu update every iterations", tier); + snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu update every iterations", tier); grouping_iterations = config_get_number(CONFIG_SECTION_DB, dbengineconfig, grouping_iterations); if(grouping_iterations < 2) { grouping_iterations = 2; config_set_number(CONFIG_SECTION_DB, dbengineconfig, grouping_iterations); - netdata_log_error("DBENGINE on '%s': 'dbegnine tier %zu update every iterations' cannot be less than 2. Assuming 2.", - hostname, - tier); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "DBENGINE on '%s': 'dbegnine tier %zu update every iterations' cannot be less than 2. Assuming 2.", + hostname, tier); } - snprintfz(dbengineconfig, 200, "dbengine tier %zu backfill", tier); + snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu backfill", tier); const char *bf = config_get(CONFIG_SECTION_DB, dbengineconfig, backfill == RRD_BACKFILL_NEW ? "new" : backfill == RRD_BACKFILL_FULL ? "full" : "none"); if(strcmp(bf, "new") == 0) backfill = RRD_BACKFILL_NEW; else if(strcmp(bf, "full") == 0) backfill = RRD_BACKFILL_FULL; else if(strcmp(bf, "none") == 0) backfill = RRD_BACKFILL_NONE; else { - netdata_log_error("DBENGINE: unknown backfill value '%s', assuming 'new'", bf); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "DBENGINE: unknown backfill value '%s', assuming 'new'", + bf); + config_set(CONFIG_SECTION_DB, dbengineconfig, "new"); backfill = RRD_BACKFILL_NEW; } @@ -920,10 +963,10 @@ void dbengine_init(char *hostname) { if(tier > 0 && get_tier_grouping(tier) > 65535) { storage_tiers_grouping_iterations[tier] = 1; - netdata_log_error("DBENGINE on '%s': dbengine tier %zu gives aggregation of more than 65535 points of tier 0. Disabling tiers above %zu", - hostname, - tier, - tier); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "DBENGINE on '%s': dbengine tier %zu gives aggregation of more than 65535 points of tier 0. " + "Disabling tiers above %zu", + hostname, tier, tier); break; } @@ -951,21 +994,19 @@ void dbengine_init(char *hostname) { netdata_thread_join(tiers_init[tier].thread, &ptr); if(tiers_init[tier].ret != 0) { - netdata_log_error("DBENGINE on '%s': Failed to initialize multi-host database tier %zu on path '%s'", - hostname, - tiers_init[tier].tier, - tiers_init[tier].path); + nd_log(NDLS_DAEMON, NDLP_ERR, + "DBENGINE on '%s': Failed to initialize multi-host database tier %zu on path '%s'", + hostname, tiers_init[tier].tier, tiers_init[tier].path); } else if(created_tiers == tier) created_tiers++; } if(created_tiers && created_tiers < storage_tiers) { - netdata_log_error("DBENGINE on '%s': Managed to create %zu tiers instead of %zu. Continuing with %zu available.", - hostname, - created_tiers, - storage_tiers, - created_tiers); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "DBENGINE on '%s': Managed to create %zu tiers instead of %zu. Continuing with %zu available.", + hostname, created_tiers, storage_tiers, created_tiers); + storage_tiers = created_tiers; } else if(!created_tiers) @@ -978,7 +1019,10 @@ void dbengine_init(char *hostname) { #else storage_tiers = config_get_number(CONFIG_SECTION_DB, "storage tiers", 1); if(storage_tiers != 1) { - netdata_log_error("DBENGINE is not available on '%s', so only 1 database tier can be supported.", hostname); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "DBENGINE is not available on '%s', so only 1 database tier can be supported.", + hostname); + storage_tiers = 1; config_set_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); } @@ -994,7 +1038,9 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt set_late_global_environment(system_info); fatal("Failed to initialize SQLite"); } - netdata_log_info("Skipping SQLITE metadata initialization since memory mode is not dbengine"); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "Skipping SQLITE metadata initialization since memory mode is not dbengine"); } if (unlikely(sql_init_context_database(system_info ? 0 : 1))) { @@ -1009,23 +1055,28 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt rrdpush_init(); if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE || rrdpush_receiver_needs_dbengine()) { - netdata_log_info("DBENGINE: Initializing ..."); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "DBENGINE: Initializing ..."); + dbengine_init(hostname); } - else { - netdata_log_info("DBENGINE: Not initializing ..."); + else storage_tiers = 1; - } if (!dbengine_enabled) { if (storage_tiers > 1) { - netdata_log_error("dbengine is not enabled, but %zu tiers have been requested. Resetting tiers to 1", - storage_tiers); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "dbengine is not enabled, but %zu tiers have been requested. Resetting tiers to 1", + storage_tiers); + storage_tiers = 1; } if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { - netdata_log_error("dbengine is not enabled, but it has been given as the default db mode. Resetting db mode to alloc"); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "dbengine is not enabled, but it has been given as the default db mode. " + "Resetting db mode to alloc"); + default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC; } } @@ -1034,7 +1085,6 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt if(!unittest) metadata_sync_init(); - netdata_log_debug(D_RRDHOST, "Initializing localhost with hostname '%s'", hostname); localhost = rrdhost_create( hostname , registry_get_this_machine_hostname() @@ -1066,14 +1116,12 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt return 1; } -#ifdef NETDATA_DEV_MODE // we register this only on localhost // for the other nodes, the origin server should register it rrd_collector_started(); // this creates a collector that runs for as long as netdata runs - rrd_collector_add_function(localhost, NULL, "streaming", 10, - RRDFUNCTIONS_STREAMING_HELP, true, - rrdhost_function_streaming, NULL); -#endif + rrd_function_add(localhost, NULL, "streaming", 10, + RRDFUNCTIONS_STREAMING_HELP, true, + rrdhost_function_streaming, NULL); if (likely(system_info)) { migrate_localhost(&localhost->host_uuid); @@ -1139,13 +1187,10 @@ static void rrdhost_streaming_sender_structures_init(RRDHOST *host) host->sender->rrdpush_sender_pipe[PIPE_READ] = -1; host->sender->rrdpush_sender_pipe[PIPE_WRITE] = -1; host->sender->rrdpush_sender_socket = -1; + host->sender->disabled_capabilities = STREAM_CAP_NONE; -#ifdef ENABLE_RRDPUSH_COMPRESSION - if(default_rrdpush_compression_enabled) - host->sender->flags |= SENDER_FLAG_COMPRESSION; - else - host->sender->flags &= ~SENDER_FLAG_COMPRESSION; -#endif + if(!default_rrdpush_compression_enabled) + host->sender->disabled_capabilities |= STREAM_CAP_COMPRESSIONS_AVAILABLE; spinlock_init(&host->sender->spinlock); replication_init_sender(host->sender); @@ -1160,9 +1205,9 @@ static void rrdhost_streaming_sender_structures_free(RRDHOST *host) rrdpush_sender_thread_stop(host, STREAM_HANDSHAKE_DISCONNECT_HOST_CLEANUP, true); // stop a possibly running thread cbuffer_free(host->sender->buffer); -#ifdef ENABLE_RRDPUSH_COMPRESSION + rrdpush_compressor_destroy(&host->sender->compressor); -#endif + replication_cleanup_sender(host->sender); __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_senders, sizeof(*host->sender), __ATOMIC_RELAXED); @@ -1176,7 +1221,9 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { if(!host) return; if (netdata_exit || force) { - netdata_log_info("RRD: 'host:%s' freeing memory...", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: 'host:%s' freeing memory...", + rrdhost_hostname(host)); // ------------------------------------------------------------------------ // first remove it from the indexes, so that it will not be discoverable @@ -1188,6 +1235,12 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(localhost, host, prev, next); } + // ------------------------------------------------------------------------ + // clean up streaming chart slots + + rrdhost_pluginsd_send_chart_slots_free(host); + rrdhost_pluginsd_receive_chart_slots_free(host); + // ------------------------------------------------------------------------ // clean up streaming @@ -1236,7 +1289,10 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { #endif if (!netdata_exit && !force) { - netdata_log_info("RRD: 'host:%s' is now in archive mode...", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: 'host:%s' is now in archive mode...", + rrdhost_hostname(host)); + rrdhost_flag_set(host, RRDHOST_FLAG_ARCHIVED | RRDHOST_FLAG_ORPHAN); return; } @@ -1266,7 +1322,7 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { freez(host->node_id); rrdfamily_index_destroy(host); - rrdfunctions_destroy(host); + rrdfunctions_host_destroy(host); rrdvariables_destroy(host->rrdvars); if (host == localhost) rrdvariables_destroy(health_rrdvars); @@ -1275,6 +1331,7 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { string_freez(host->hostname); __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(RRDHOST), __ATOMIC_RELAXED); + freez(host); } @@ -1305,7 +1362,9 @@ void rrd_finalize_collection_for_all_hosts(void) { void rrdhost_save_charts(RRDHOST *host) { if(!host) return; - netdata_log_info("RRD: 'host:%s' saving / closing database...", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: 'host:%s' saving / closing database...", + rrdhost_hostname(host)); RRDSET *st; @@ -1317,7 +1376,7 @@ void rrdhost_save_charts(RRDHOST *host) { rrdset_foreach_done(st); } -struct rrdhost_system_info *rrdhost_labels_to_system_info(DICTIONARY *labels) { +struct rrdhost_system_info *rrdhost_labels_to_system_info(RRDLABELS *labels) { struct rrdhost_system_info *info = callocz(1, sizeof(struct rrdhost_system_info)); info->hops = 1; @@ -1345,7 +1404,7 @@ struct rrdhost_system_info *rrdhost_labels_to_system_info(DICTIONARY *labels) { } static void rrdhost_load_auto_labels(void) { - DICTIONARY *labels = localhost->rrdlabels; + RRDLABELS *labels = localhost->rrdlabels; if (localhost->system_info->cloud_provider_type) rrdlabels_add(labels, "_cloud_provider_type", localhost->system_info->cloud_provider_type, RRDLABEL_SRC_AUTO); @@ -1406,7 +1465,14 @@ static void rrdhost_load_auto_labels(void) { add_aclk_host_labels(); - health_add_host_labels(); + // The source should be CONF, but when it is set, these labels are exported by default ('send configured labels' in exporting.conf). + // Their export seems to break exporting to Graphite, see https://github.com/netdata/netdata/issues/14084. + + int is_ephemeral = appconfig_get_boolean(&netdata_config, CONFIG_SECTION_GLOBAL, "is ephemeral node", CONFIG_BOOLEAN_NO); + rrdlabels_add(labels, "_is_ephemeral", is_ephemeral ? "true" : "false", RRDLABEL_SRC_AUTO); + + int has_unstable_connection = appconfig_get_boolean(&netdata_config, CONFIG_SECTION_GLOBAL, "has unstable connection", CONFIG_BOOLEAN_NO); + rrdlabels_add(labels, "_has_unstable_connection", has_unstable_connection ? "true" : "false", RRDLABEL_SRC_AUTO); rrdlabels_add(labels, "_is_parent", (localhost->connected_children_count > 0) ? "true" : "false", RRDLABEL_SRC_AUTO); @@ -1418,7 +1484,7 @@ void rrdhost_set_is_parent_label(void) { int count = __atomic_load_n(&localhost->connected_children_count, __ATOMIC_RELAXED); if (count == 0 || count == 1) { - DICTIONARY *labels = localhost->rrdlabels; + RRDLABELS *labels = localhost->rrdlabels; rrdlabels_add(labels, "_is_parent", (count) ? "true" : "false", RRDLABEL_SRC_AUTO); //queue a node info @@ -1434,7 +1500,9 @@ static void rrdhost_load_config_labels(void) { int status = config_load(NULL, 1, CONFIG_SECTION_HOST_LABEL); if(!status) { char *filename = CONFIG_DIR "/" CONFIG_FILENAME; - netdata_log_error("RRDLABEL: Cannot reload the configuration file '%s', using labels in memory", filename); + nd_log(NDLS_DAEMON, NDLP_WARNING, + "RRDLABEL: Cannot reload the configuration file '%s', using labels in memory", + filename); } struct section *co = appconfig_get_section(&netdata_config, CONFIG_SECTION_HOST_LABEL); @@ -1454,12 +1522,13 @@ static void rrdhost_load_kubernetes_labels(void) { sprintf(label_script, "%s/%s", netdata_configured_primary_plugins_dir, "get-kubernetes-labels.sh"); if (unlikely(access(label_script, R_OK) != 0)) { - netdata_log_error("Kubernetes pod label fetching script %s not found.",label_script); + nd_log(NDLS_DAEMON, NDLP_ERR, + "Kubernetes pod label fetching script %s not found.", + label_script); + return; } - netdata_log_debug(D_RRDHOST, "Attempting to fetch external labels via %s", label_script); - pid_t pid; FILE *fp_child_input; FILE *fp_child_output = netdata_popen(label_script, &pid, &fp_child_input); @@ -1473,7 +1542,9 @@ static void rrdhost_load_kubernetes_labels(void) { // Here we'll inform with an ERROR that the script failed, show whatever (if anything) was added to the list of labels, free the memory and set the return to null int rc = netdata_pclose(fp_child_input, fp_child_output, pid); if(rc) - netdata_log_error("%s exited abnormally. Failed to get kubernetes labels.", label_script); + nd_log(NDLS_DAEMON, NDLP_ERR, + "%s exited abnormally. Failed to get kubernetes labels.", + label_script); } void reload_host_labels(void) { @@ -1493,7 +1564,9 @@ void reload_host_labels(void) { } void rrdhost_finalize_collection(RRDHOST *host) { - netdata_log_info("RRD: 'host:%s' stopping data collection...", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: 'host:%s' stopping data collection...", + rrdhost_hostname(host)); RRDSET *st; rrdset_foreach_read(st, host) @@ -1507,7 +1580,9 @@ void rrdhost_finalize_collection(RRDHOST *host) { void rrdhost_delete_charts(RRDHOST *host) { if(!host) return; - netdata_log_info("RRD: 'host:%s' deleting disk files...", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: 'host:%s' deleting disk files...", + rrdhost_hostname(host)); RRDSET *st; @@ -1515,8 +1590,8 @@ void rrdhost_delete_charts(RRDHOST *host) { // we get a write lock // to ensure only one thread is saving the database rrdset_foreach_write(st, host){ - rrdset_delete_files(st); - } + rrdset_delete_files(st); + } rrdset_foreach_done(st); } @@ -1529,7 +1604,9 @@ void rrdhost_delete_charts(RRDHOST *host) { void rrdhost_cleanup_charts(RRDHOST *host) { if(!host) return; - netdata_log_info("RRD: 'host:%s' cleaning up disk files...", rrdhost_hostname(host)); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: 'host:%s' cleaning up disk files...", + rrdhost_hostname(host)); RRDSET *st; uint32_t rrdhost_delete_obsolete_charts = rrdhost_option_check(host, RRDHOST_OPTION_DELETE_OBSOLETE_CHARTS); @@ -1556,7 +1633,9 @@ void rrdhost_cleanup_charts(RRDHOST *host) { // RRDHOST - save all hosts to disk void rrdhost_save_all(void) { - netdata_log_info("RRD: saving databases [%zu hosts(s)]...", rrdhost_hosts_available()); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: saving databases [%zu hosts(s)]...", + rrdhost_hosts_available()); rrd_rdlock(); @@ -1571,7 +1650,9 @@ void rrdhost_save_all(void) { // RRDHOST - save or delete all hosts from disk void rrdhost_cleanup_all(void) { - netdata_log_info("RRD: cleaning up database [%zu hosts(s)]...", rrdhost_hosts_available()); + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "RRD: cleaning up database [%zu hosts(s)]...", + rrdhost_hosts_available()); rrd_rdlock(); @@ -1855,7 +1936,9 @@ void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) { s->stream.since = host->sender->last_state_since_t; s->stream.peers = socket_peers(host->sender->rrdpush_sender_socket); +#ifdef ENABLE_HTTPS s->stream.ssl = SSL_connection(&host->sender->ssl); +#endif memcpy(s->stream.sent_bytes_on_this_connection_per_type, host->sender->sent_bytes_on_this_connection_per_type, @@ -1875,9 +1958,7 @@ void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) { else s->stream.status = RRDHOST_STREAM_STATUS_ONLINE; -#ifdef ENABLE_RRDPUSH_COMPRESSION - s->stream.compression = (stream_has_capability(host->sender, STREAM_CAP_COMPRESSION) && host->sender->compressor.initialized); -#endif + s->stream.compression = host->sender->compressor.initialized; } else { s->stream.status = RRDHOST_STREAM_STATUS_OFFLINE; diff --git a/database/rrdlabels.c b/database/rrdlabels.c index 77d9a91f007dd0..27f2878cdce693 100644 --- a/database/rrdlabels.c +++ b/database/rrdlabels.c @@ -3,6 +3,91 @@ #define NETDATA_RRD_INTERNALS #include "rrd.h" +// Key OF HS ARRRAY + +struct { + Pvoid_t JudyHS; + SPINLOCK spinlock; +} global_labels = { + .JudyHS = (Pvoid_t) NULL, + .spinlock = NETDATA_SPINLOCK_INITIALIZER +}; + +typedef struct label_registry_idx { + STRING *key; + STRING *value; +} LABEL_REGISTRY_IDX; + +typedef struct labels_registry_entry { + LABEL_REGISTRY_IDX index; +} RRDLABEL; + +// Value of HS array +typedef struct labels_registry_idx_entry { + RRDLABEL label; + size_t refcount; +} RRDLABEL_IDX; + +typedef struct rrdlabels { + SPINLOCK spinlock; + size_t version; + Pvoid_t JudyL; +} RRDLABELS; + +#define lfe_start_nolock(label_list, label, ls) \ + do { \ + bool _first_then_next = true; \ + Pvoid_t *_PValue; \ + Word_t _Index = 0; \ + while ((_PValue = JudyLFirstThenNext((label_list)->JudyL, &_Index, &_first_then_next))) { \ + (ls) = *(RRDLABEL_SRC *)_PValue; \ + (void)(ls); \ + (label) = (void *)_Index; + +#define lfe_done_nolock() \ + } \ + } \ + while (0) + +#define lfe_start_read(label_list, label, ls) \ + do { \ + spinlock_lock(&(label_list)->spinlock); \ + bool _first_then_next = true; \ + Pvoid_t *_PValue; \ + Word_t _Index = 0; \ + while ((_PValue = JudyLFirstThenNext((label_list)->JudyL, &_Index, &_first_then_next))) { \ + (ls) = *(RRDLABEL_SRC *)_PValue; \ + (void)(ls); \ + (label) = (void *)_Index; + +#define lfe_done(label_list) \ + } \ + spinlock_unlock(&(label_list)->spinlock); \ + } \ + while (0) + +static inline void STATS_PLUS_MEMORY(struct dictionary_stats *stats, size_t key_size, size_t item_size, size_t value_size) { + if(key_size) + __atomic_fetch_add(&stats->memory.index, (long)JUDYHS_INDEX_SIZE_ESTIMATE(key_size), __ATOMIC_RELAXED); + + if(item_size) + __atomic_fetch_add(&stats->memory.dict, (long)item_size, __ATOMIC_RELAXED); + + if(value_size) + __atomic_fetch_add(&stats->memory.values, (long)value_size, __ATOMIC_RELAXED); +} + +static inline void STATS_MINUS_MEMORY(struct dictionary_stats *stats, size_t key_size, size_t item_size, size_t value_size) { + if(key_size) + __atomic_fetch_sub(&stats->memory.index, (long)JUDYHS_INDEX_SIZE_ESTIMATE(key_size), __ATOMIC_RELAXED); + + if(item_size) + __atomic_fetch_sub(&stats->memory.dict, (long)item_size, __ATOMIC_RELAXED); + + if(value_size) + __atomic_fetch_sub(&stats->memory.values, (long)value_size, __ATOMIC_RELAXED); +} + // ---------------------------------------------------------------------------- // labels sanitization @@ -369,6 +454,12 @@ __attribute__((constructor)) void initialize_labels_keys_char_map(void) { } +__attribute__((constructor)) void initialize_label_stats(void) { + dictionary_stats_category_rrdlabels.memory.dict = 0; + dictionary_stats_category_rrdlabels.memory.index = 0; + dictionary_stats_category_rrdlabels.memory.values = 0; +} + size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length) { if(unlikely(!dst_size)) return 0; @@ -484,93 +575,165 @@ static inline size_t rrdlabels_sanitize_value(char *dst, const char *src, size_t // ---------------------------------------------------------------------------- // rrdlabels_create() -typedef struct rrdlabel { - STRING *label_value; - RRDLABEL_SRC label_source; -} RRDLABEL; +RRDLABELS *rrdlabels_create(void) +{ + RRDLABELS *labels = callocz(1, sizeof(*labels)); + STATS_PLUS_MEMORY(&dictionary_stats_category_rrdlabels, 0, sizeof(RRDLABELS), 0); + return labels; +} -static void rrdlabel_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *dict_ptr __maybe_unused) { - RRDLABEL *lb = (RRDLABEL *)value; +static void dup_label(RRDLABEL *label_index) +{ + if (!label_index) + return; + + spinlock_lock(&global_labels.spinlock); + + Pvoid_t *PValue = JudyHSGet(global_labels.JudyHS, (void *)label_index, sizeof(*label_index)); + if (PValue && *PValue) { + RRDLABEL_IDX *rrdlabel = *PValue; + __atomic_add_fetch(&rrdlabel->refcount, 1, __ATOMIC_RELAXED); + } - // label_value is already allocated by the STRING - lb->label_source |= RRDLABEL_FLAG_NEW; - lb->label_source &= ~RRDLABEL_FLAG_OLD; + spinlock_unlock(&global_labels.spinlock); } -static void rrdlabel_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *dict_ptr __maybe_unused) { - RRDLABEL *lb = (RRDLABEL *)value; +static RRDLABEL *add_label_name_value(const char *name, const char *value) +{ + RRDLABEL_IDX *rrdlabel = NULL; + LABEL_REGISTRY_IDX label_index; + label_index.key = string_strdupz(name); + label_index.value = string_strdupz(value); + + spinlock_lock(&global_labels.spinlock); + + Pvoid_t *PValue = JudyHSIns(&global_labels.JudyHS, (void *)&label_index, sizeof(label_index), PJE0); + if(unlikely(!PValue || PValue == PJERR)) + fatal("RRDLABELS: corrupted judyHS array"); + + if (*PValue) { + rrdlabel = *PValue; + string_freez(label_index.key); + string_freez(label_index.value); + } else { + rrdlabel = callocz(1, sizeof(*rrdlabel)); + rrdlabel->label.index = label_index; + *PValue = rrdlabel; + STATS_PLUS_MEMORY(&dictionary_stats_category_rrdlabels, sizeof(LABEL_REGISTRY_IDX), sizeof(RRDLABEL_IDX), 0); + } + __atomic_add_fetch(&rrdlabel->refcount, 1, __ATOMIC_RELAXED); - string_freez(lb->label_value); - lb->label_value = NULL; + spinlock_unlock(&global_labels.spinlock); + return &rrdlabel->label; } -static bool rrdlabel_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *oldvalue, void *newvalue, void *dict_ptr __maybe_unused) { - RRDLABEL *lbold = (RRDLABEL *)oldvalue; - RRDLABEL *lbnew = (RRDLABEL *)newvalue; +static void delete_label(RRDLABEL *label) +{ + spinlock_lock(&global_labels.spinlock); + + Pvoid_t *PValue = JudyHSGet(global_labels.JudyHS, &label->index, sizeof(label->index)); + if (PValue && *PValue) { + RRDLABEL_IDX *rrdlabel = *PValue; + size_t refcount = __atomic_sub_fetch(&rrdlabel->refcount, 1, __ATOMIC_RELAXED); + if (refcount == 0) { + int ret = JudyHSDel(&global_labels.JudyHS, (void *)label, sizeof(*label), PJE0); + if (unlikely(ret == JERR)) + STATS_MINUS_MEMORY(&dictionary_stats_category_rrdlabels, 0, sizeof(*rrdlabel), 0); + else + STATS_MINUS_MEMORY(&dictionary_stats_category_rrdlabels, sizeof(LABEL_REGISTRY_IDX), sizeof(*rrdlabel), 0); + string_freez(label->index.key); + string_freez(label->index.value); + freez(rrdlabel); + } + } + spinlock_unlock(&global_labels.spinlock); +} - if(lbold->label_value == lbnew->label_value) { - // they are the same +// ---------------------------------------------------------------------------- +// rrdlabels_destroy() - lbold->label_source |= lbnew->label_source; - lbold->label_source |= RRDLABEL_FLAG_OLD; - lbold->label_source &= ~RRDLABEL_FLAG_NEW; +void rrdlabels_destroy(RRDLABELS *labels) +{ + if (unlikely(!labels)) + return; - // free the new one - string_freez(lbnew->label_value); + spinlock_lock(&labels->spinlock); - return false; + Pvoid_t *PValue; + Word_t Index = 0; + bool first_then_next = true; + while ((PValue = JudyLFirstThenNext(labels->JudyL, &Index, &first_then_next))) { + delete_label((RRDLABEL *)Index); } + size_t memory_freed = JudyLFreeArray(&labels->JudyL, PJE0); + STATS_MINUS_MEMORY(&dictionary_stats_category_rrdlabels, 0, memory_freed + sizeof(RRDLABELS), 0); + spinlock_unlock(&labels->spinlock); + freez(labels); +} - // they are different +// Check in labels to see if we have the key specified in label +static RRDLABEL *rrdlabels_find_label_with_key_unsafe(RRDLABELS *labels, RRDLABEL *label) +{ + if (unlikely(!labels)) + return NULL; + + Pvoid_t *PValue; + Word_t Index = 0; + bool first_then_next = true; + RRDLABEL *found = NULL; + while ((PValue = JudyLFirstThenNext(labels->JudyL, &Index, &first_then_next))) { + RRDLABEL *lb = (RRDLABEL *)Index; + if (lb->index.key == label->index.key && lb != label) { + found = (RRDLABEL *)Index; + break; + } + } + return found; +} - string_freez(lbold->label_value); - lbold->label_value = lbnew->label_value; - lbold->label_source = lbnew->label_source; - lbold->label_source |= RRDLABEL_FLAG_NEW; - lbold->label_source &= ~RRDLABEL_FLAG_OLD; +// ---------------------------------------------------------------------------- +// rrdlabels_add() - return true; -} +static void labels_add_already_sanitized(RRDLABELS *labels, const char *key, const char *value, RRDLABEL_SRC ls) +{ + RRDLABEL *new_label = add_label_name_value(key, value); -DICTIONARY *rrdlabels_create(void) { - DICTIONARY *dict = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - &dictionary_stats_category_rrdlabels, sizeof(RRDLABEL)); + spinlock_lock(&labels->spinlock); - dictionary_register_insert_callback(dict, rrdlabel_insert_callback, dict); - dictionary_register_delete_callback(dict, rrdlabel_delete_callback, dict); - dictionary_register_conflict_callback(dict, rrdlabel_conflict_callback, dict); - return dict; -} + RRDLABEL_SRC new_ls = (ls & ~(RRDLABEL_FLAG_NEW | RRDLABEL_FLAG_OLD)); + size_t mem_before_judyl = JudyLMemUsed(labels->JudyL); -// ---------------------------------------------------------------------------- -// rrdlabels_destroy() + Pvoid_t *PValue = JudyLIns(&labels->JudyL, (Word_t)new_label, PJE0); + if (!PValue || PValue == PJERR) + fatal("RRDLABELS: corrupted labels JudyL array"); -void rrdlabels_destroy(DICTIONARY *labels_dict) { - dictionary_destroy(labels_dict); -} + if(*PValue) { + new_ls |= RRDLABEL_FLAG_OLD; + delete_label(new_label); + } + else { + new_ls |= RRDLABEL_FLAG_NEW; -void rrdlabels_flush(DICTIONARY *labels_dict) { - dictionary_flush(labels_dict); -} + RRDLABEL *old_label_with_same_key = rrdlabels_find_label_with_key_unsafe(labels, new_label); + if (old_label_with_same_key) { + (void) JudyLDel(&labels->JudyL, (Word_t) old_label_with_same_key, PJE0); + delete_label(old_label_with_same_key); + } + } -// ---------------------------------------------------------------------------- -// rrdlabels_add() + labels->version++; + *((RRDLABEL_SRC *)PValue) = new_ls; -static void labels_add_already_sanitized(DICTIONARY *dict, const char *key, const char *value, RRDLABEL_SRC ls) { - if(ls & RRDLABEL_FLAG_NEW) ls &= ~RRDLABEL_FLAG_NEW; - if(ls & RRDLABEL_FLAG_OLD) ls &= ~RRDLABEL_FLAG_OLD; + size_t mem_after_judyl = JudyLMemUsed(labels->JudyL); + STATS_PLUS_MEMORY(&dictionary_stats_category_rrdlabels, 0, mem_after_judyl - mem_before_judyl, 0); - RRDLABEL tmp = { - .label_source = ls, - .label_value = string_strdupz(value) - }; - dictionary_set(dict, key, &tmp, sizeof(RRDLABEL)); + spinlock_unlock(&labels->spinlock); } - -void rrdlabels_add(DICTIONARY *dict, const char *name, const char *value, RRDLABEL_SRC ls) { - if(!dict) { +void rrdlabels_add(RRDLABELS *labels, const char *name, const char *value, RRDLABEL_SRC ls) +{ + if(!labels) { netdata_log_error("%s(): called with NULL dictionary.", __FUNCTION__ ); return; } @@ -584,7 +747,30 @@ void rrdlabels_add(DICTIONARY *dict, const char *name, const char *value, RRDLAB return; } - labels_add_already_sanitized(dict, n, v, ls); + labels_add_already_sanitized(labels, n, v, ls); +} + +bool rrdlabels_exist(RRDLABELS *labels, const char *key) +{ + if (!labels) + return false; + + STRING *this_key = string_strdupz(key); + + RRDLABEL *lb; + RRDLABEL_SRC ls; + + bool found = false; + lfe_start_read(labels, lb, ls) + { + if (lb->index.key == this_key) { + found = true; + break; + } + } + lfe_done(labels); + string_freez(this_key); + return found; } static const char *get_quoted_string_up_to(char *dst, size_t dst_size, const char *string, char upto1, char upto2) { @@ -619,8 +805,9 @@ static const char *get_quoted_string_up_to(char *dst, size_t dst_size, const cha return string; } -void rrdlabels_add_pair(DICTIONARY *dict, const char *string, RRDLABEL_SRC ls) { - if(!dict) { +void rrdlabels_add_pair(RRDLABELS *labels, const char *string, RRDLABEL_SRC ls) +{ + if(!labels) { netdata_log_error("%s(): called with NULL dictionary.", __FUNCTION__ ); return; } @@ -631,199 +818,255 @@ void rrdlabels_add_pair(DICTIONARY *dict, const char *string, RRDLABEL_SRC ls) { char value[RRDLABELS_MAX_VALUE_LENGTH + 1]; get_quoted_string_up_to(value, RRDLABELS_MAX_VALUE_LENGTH, string, '\0', '\0'); - rrdlabels_add(dict, name, value, ls); + rrdlabels_add(labels, name, value, ls); } // ---------------------------------------------------------------------------- -// rrdlabels_get_value_to_buffer_or_null() -void rrdlabels_get_value_to_buffer_or_null(DICTIONARY *labels, BUFFER *wb, const char *key, const char *quote, const char *null) { +void rrdlabels_value_to_buffer_array_item_or_null(RRDLABELS *labels, BUFFER *wb, const char *key) { if(!labels) return; - const DICTIONARY_ITEM *acquired_item = dictionary_get_and_acquire_item(labels, key); - RRDLABEL *lb = dictionary_acquired_item_value(acquired_item); - - if(lb && lb->label_value) - buffer_sprintf(wb, "%s%s%s", quote, string2str(lb->label_value), quote); - else - buffer_strcat(wb, null); + STRING *this_key = string_strdupz(key); - dictionary_acquired_item_release(labels, acquired_item); + RRDLABEL *lb; + RRDLABEL_SRC ls; + lfe_start_read(labels, lb, ls) + { + if (lb->index.key == this_key) { + if (lb->index.value) + buffer_json_add_array_item_string(wb, string2str(lb->index.value)); + else + buffer_json_add_array_item_string(wb, NULL); + break; + } + } + lfe_done(labels); + string_freez(this_key); } -void rrdlabels_value_to_buffer_array_item_or_null(DICTIONARY *labels, BUFFER *wb, const char *key) { +// ---------------------------------------------------------------------------- + +void rrdlabels_get_value_strcpyz(RRDLABELS *labels, char *dst, size_t dst_len, const char *key) { if(!labels) return; - const DICTIONARY_ITEM *acquired_item = dictionary_get_and_acquire_item(labels, key); - RRDLABEL *lb = dictionary_acquired_item_value(acquired_item); + STRING *this_key = string_strdupz(key); - if(lb && lb->label_value) - buffer_json_add_array_item_string(wb, string2str(lb->label_value)); - else - buffer_json_add_array_item_string(wb, NULL); + RRDLABEL *lb; + RRDLABEL_SRC ls; - dictionary_acquired_item_release(labels, acquired_item); + lfe_start_read(labels, lb, ls) + { + if (lb->index.key == this_key) { + if (lb->index.value) + strncpyz(dst, string2str(lb->index.value), dst_len); + else + dst[0] = '\0'; + break; + } + } + lfe_done(labels); + string_freez(this_key); } -// ---------------------------------------------------------------------------- -// rrdlabels_get_value_to_char_or_null() - -void rrdlabels_get_value_strdup_or_null(DICTIONARY *labels, char **value, const char *key) { - const DICTIONARY_ITEM *acquired_item = dictionary_get_and_acquire_item(labels, key); - RRDLABEL *lb = dictionary_acquired_item_value(acquired_item); +void rrdlabels_get_value_strdup_or_null(RRDLABELS *labels, char **value, const char *key) +{ + if(!labels) return; - *value = (lb && lb->label_value) ? strdupz(string2str(lb->label_value)) : NULL; + STRING *this_key = string_strdupz(key); - dictionary_acquired_item_release(labels, acquired_item); + RRDLABEL *lb; + RRDLABEL_SRC ls; + lfe_start_read(labels, lb, ls) + { + if (lb->index.key == this_key) { + *value = (lb->index.value) ? strdupz(string2str(lb->index.value)) : NULL; + break; + } + } + lfe_done(labels); + string_freez(this_key); } -void rrdlabels_get_value_strcpyz(DICTIONARY *labels, char *dst, size_t dst_len, const char *key) { - const DICTIONARY_ITEM *acquired_item = dictionary_get_and_acquire_item(labels, key); - RRDLABEL *lb = dictionary_acquired_item_value(acquired_item); +void rrdlabels_get_value_to_buffer_or_unset(RRDLABELS *labels, BUFFER *wb, const char *key, const char *unset) +{ + if(!labels || !key || !wb) return; - if(lb && lb->label_value) - strncpyz(dst, string2str(lb->label_value), dst_len); - else - dst[0] = '\0'; + STRING *this_key = string_strdupz(key); + RRDLABEL *lb; + RRDLABEL_SRC ls; - dictionary_acquired_item_release(labels, acquired_item); + lfe_start_read(labels, lb, ls) + { + if (lb->index.key == this_key) { + if (lb->index.value) + buffer_strcat(wb, string2str(lb->index.value)); + else + buffer_strcat(wb, unset); + break; + } + } + lfe_done(labels); + string_freez(this_key); } -STRING *rrdlabels_get_value_string_dup(DICTIONARY *labels, const char *key) { - const DICTIONARY_ITEM *acquired_item = dictionary_get_and_acquire_item(labels, key); - RRDLABEL *lb = dictionary_acquired_item_value(acquired_item); - - STRING *ret = NULL; - if(lb && lb->label_value) - ret = string_dup(lb->label_value); - - dictionary_acquired_item_release(labels, acquired_item); - - return ret; +static void rrdlabels_unmark_all_unsafe(RRDLABELS *labels) +{ + Pvoid_t *PValue; + Word_t Index = 0; + bool first_then_next = true; + while ((PValue = JudyLFirstThenNext(labels->JudyL, &Index, &first_then_next))) + *((RRDLABEL_SRC *)PValue) &= ~(RRDLABEL_FLAG_OLD | RRDLABEL_FLAG_NEW); } -STRING *rrdlabels_get_value_to_buffer_or_unset(DICTIONARY *labels, BUFFER *wb, const char *key, const char *unset) { - const DICTIONARY_ITEM *acquired_item = dictionary_get_and_acquire_item(labels, key); - RRDLABEL *lb = dictionary_acquired_item_value(acquired_item); - - STRING *ret = NULL; - if(lb && lb->label_value) - buffer_strcat(wb, string2str(lb->label_value)); - else - buffer_strcat(wb, unset); +void rrdlabels_unmark_all(RRDLABELS *labels) +{ + spinlock_lock(&labels->spinlock); - dictionary_acquired_item_release(labels, acquired_item); + rrdlabels_unmark_all_unsafe(labels); - return ret; + spinlock_unlock(&labels->spinlock); } -// ---------------------------------------------------------------------------- -// rrdlabels_unmark_all() -// remove labels RRDLABEL_FLAG_OLD and RRDLABEL_FLAG_NEW from all dictionary items +static void rrdlabels_remove_all_unmarked_unsafe(RRDLABELS *labels) +{ + Pvoid_t *PValue; + Word_t Index = 0; + bool first_then_next = true; -static int remove_flags_old_new(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - RRDLABEL *lb = (RRDLABEL *)value; + while ((PValue = JudyLFirstThenNext(labels->JudyL, &Index, &first_then_next))) { + if (!((*((RRDLABEL_SRC *)PValue)) & (RRDLABEL_FLAG_INTERNAL))) { - if(lb->label_source & RRDLABEL_FLAG_OLD) lb->label_source &= ~RRDLABEL_FLAG_OLD; - if(lb->label_source & RRDLABEL_FLAG_NEW) lb->label_source &= ~RRDLABEL_FLAG_NEW; + size_t mem_before_judyl = JudyLMemUsed(labels->JudyL); + (void)JudyLDel(&labels->JudyL, Index, PJE0); + size_t mem_after_judyl = JudyLMemUsed(labels->JudyL); - return 1; -} + STATS_MINUS_MEMORY(&dictionary_stats_category_rrdlabels, 0, mem_before_judyl - mem_after_judyl, 0); -void rrdlabels_unmark_all(DICTIONARY *labels) { - dictionary_walkthrough_read(labels, remove_flags_old_new, NULL); + delete_label((RRDLABEL *)Index); + if (labels->JudyL != (Pvoid_t) NULL) { + Index = 0; + first_then_next = true; + } + } + } } +void rrdlabels_remove_all_unmarked(RRDLABELS *labels) +{ + spinlock_lock(&labels->spinlock); + rrdlabels_remove_all_unmarked_unsafe(labels); + spinlock_unlock(&labels->spinlock); +} // ---------------------------------------------------------------------------- -// rrdlabels_remove_all_unmarked() -// remove dictionary items that are neither old, nor new +// rrdlabels_walkthrough_read() -static int remove_not_old_not_new_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *name = dictionary_acquired_item_name(item); - DICTIONARY *dict = (DICTIONARY *)data; - RRDLABEL *lb = (RRDLABEL *)value; +int rrdlabels_walkthrough_read(RRDLABELS *labels, int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *data) +{ + int ret = 0; - if(!(lb->label_source & (RRDLABEL_FLAG_OLD | RRDLABEL_FLAG_NEW | RRDLABEL_FLAG_PERMANENT))) { - dictionary_del(dict, name); - return 1; - } + if(unlikely(!labels || !callback)) return 0; - return 0; -} + RRDLABEL *lb; + RRDLABEL_SRC ls; + lfe_start_read(labels, lb, ls) + { + ret = callback(string2str(lb->index.key), string2str(lb->index.value), ls, data); + if (ret < 0) + break; + } + lfe_done(labels); -void rrdlabels_remove_all_unmarked(DICTIONARY *labels) { - dictionary_walkthrough_write(labels, remove_not_old_not_new_callback, labels); + return ret; } - // ---------------------------------------------------------------------------- -// rrdlabels_walkthrough_read() - -struct labels_walkthrough { - int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data); - void *data; -}; +// rrdlabels_migrate_to_these() +// migrate an existing label list to a new list -static int labels_walkthrough_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *name = dictionary_acquired_item_name(item); - struct labels_walkthrough *d = (struct labels_walkthrough *)data; - RRDLABEL *lb = (RRDLABEL *)value; +void rrdlabels_migrate_to_these(RRDLABELS *dst, RRDLABELS *src) { + if (!dst || !src || (dst == src)) + return; - RRDLABEL_SRC ls = lb->label_source; - if(ls & RRDLABEL_FLAG_NEW) ls &= ~RRDLABEL_FLAG_NEW; - if(ls & RRDLABEL_FLAG_OLD) ls &= ~RRDLABEL_FLAG_OLD; + spinlock_lock(&dst->spinlock); + spinlock_lock(&src->spinlock); - return d->callback(name, string2str(lb->label_value), ls, d->data); -} + rrdlabels_unmark_all_unsafe(dst); -int rrdlabels_walkthrough_read(DICTIONARY *labels, int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *data) { - struct labels_walkthrough d = { - .callback = callback, - .data = data - }; - return dictionary_walkthrough_read(labels, labels_walkthrough_callback, &d); -} + RRDLABEL *label; + Pvoid_t *PValue; -int rrdlabels_sorted_walkthrough_read(DICTIONARY *labels, int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *data) { - struct labels_walkthrough d = { - .callback = callback, - .data = data - }; - return dictionary_sorted_walkthrough_read(labels, labels_walkthrough_callback, &d); -} + RRDLABEL_SRC ls; + lfe_start_nolock(src, label, ls) + { + size_t mem_before_judyl = JudyLMemUsed(dst->JudyL); + PValue = JudyLIns(&dst->JudyL, (Word_t)label, PJE0); + if(unlikely(!PValue || PValue == PJERR)) + fatal("RRDLABELS migrate: corrupted labels array"); + + RRDLABEL_SRC flag = RRDLABEL_FLAG_NEW; + if (!*PValue) { + dup_label(label); + size_t mem_after_judyl = JudyLMemUsed(dst->JudyL); + STATS_PLUS_MEMORY(&dictionary_stats_category_rrdlabels, 0, mem_after_judyl - mem_before_judyl, 0); + } + else + flag = RRDLABEL_FLAG_OLD; + *((RRDLABEL_SRC *)PValue) |= flag; + } + lfe_done_nolock(); -// ---------------------------------------------------------------------------- -// rrdlabels_migrate_to_these() -// migrate an existing label list to a new list, INPLACE + rrdlabels_remove_all_unmarked_unsafe(dst); + dst->version = src->version; -static int copy_label_to_dictionary_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *name = dictionary_acquired_item_name(item); - DICTIONARY *dst = (DICTIONARY *)data; - RRDLABEL *lb = (RRDLABEL *)value; - labels_add_already_sanitized(dst, name, string2str(lb->label_value), lb->label_source); - return 1; + spinlock_unlock(&src->spinlock); + spinlock_unlock(&dst->spinlock); } -void rrdlabels_migrate_to_these(DICTIONARY *dst, DICTIONARY *src) { - if(!dst || !src) return; - - // remove the RRDLABEL_FLAG_OLD and RRDLABEL_FLAG_NEW from all items - rrdlabels_unmark_all(dst); +void rrdlabels_copy(RRDLABELS *dst, RRDLABELS *src) +{ + if (!dst || !src || (dst == src)) + return; - // Mark the existing ones as RRDLABEL_FLAG_OLD, - // or the newly added ones as RRDLABEL_FLAG_NEW - dictionary_walkthrough_read(src, copy_label_to_dictionary_callback, dst); + RRDLABEL *label; + RRDLABEL_SRC ls; - // remove the unmarked dst - rrdlabels_remove_all_unmarked(dst); -} + spinlock_lock(&dst->spinlock); + spinlock_lock(&src->spinlock); + + size_t mem_before_judyl = JudyLMemUsed(dst->JudyL); + bool update_statistics = false; + lfe_start_nolock(src, label, ls) + { + RRDLABEL *old_label_with_key = rrdlabels_find_label_with_key_unsafe(dst, label); + Pvoid_t *PValue = JudyLIns(&dst->JudyL, (Word_t)label, PJE0); + if(unlikely(!PValue || PValue == PJERR)) + fatal("RRDLABELS: corrupted labels array"); + + if (!*PValue) { + dup_label(label); + ls = (ls & ~(RRDLABEL_FLAG_OLD)) | RRDLABEL_FLAG_NEW; + dst->version++; + update_statistics = true; + if (old_label_with_key) { + (void)JudyLDel(&dst->JudyL, (Word_t)old_label_with_key, PJE0); + delete_label((RRDLABEL *)old_label_with_key); + } + } + else + ls = (ls & ~(RRDLABEL_FLAG_NEW)) | RRDLABEL_FLAG_OLD; -void rrdlabels_copy(DICTIONARY *dst, DICTIONARY *src) { - if(!dst || !src) return; + *((RRDLABEL_SRC *)PValue) = ls; + } + lfe_done_nolock(); + if (update_statistics) { + size_t mem_after_judyl = JudyLMemUsed(dst->JudyL); + STATS_PLUS_MEMORY(&dictionary_stats_category_rrdlabels, 0, mem_after_judyl - mem_before_judyl, 0); + } - dictionary_walkthrough_read(src, copy_label_to_dictionary_callback, dst); + spinlock_unlock(&src->spinlock); + spinlock_unlock(&dst->spinlock); } @@ -837,8 +1080,7 @@ struct simple_pattern_match_name_value { char equal; }; -static int simple_pattern_match_name_only_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *name = dictionary_acquired_item_name(item); +static int simple_pattern_match_name_only_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) { struct simple_pattern_match_name_value *t = (struct simple_pattern_match_name_value *)data; (void)value; @@ -849,10 +1091,8 @@ static int simple_pattern_match_name_only_callback(const DICTIONARY_ITEM *item, return 0; } -static int simple_pattern_match_name_and_value_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *name = dictionary_acquired_item_name(item); +static int simple_pattern_match_name_and_value_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) { struct simple_pattern_match_name_value *t = (struct simple_pattern_match_name_value *)data; - RRDLABEL *lb = (RRDLABEL *)value; // we return -1 to stop the walkthrough on first match t->searches++; @@ -860,7 +1100,7 @@ static int simple_pattern_match_name_and_value_callback(const DICTIONARY_ITEM *i size_t len = RRDLABELS_MAX_NAME_LENGTH + RRDLABELS_MAX_VALUE_LENGTH + 2; // +1 for =, +1 for \0 char tmp[len], *dst = &tmp[0]; - const char *v = string2str(lb->label_value); + const char *v = value; // copy the name while(*name) *dst++ = *name++; @@ -881,7 +1121,7 @@ static int simple_pattern_match_name_and_value_callback(const DICTIONARY_ITEM *i return 0; } -bool rrdlabels_match_simple_pattern_parsed(DICTIONARY *labels, SIMPLE_PATTERN *pattern, char equal, size_t *searches) { +bool rrdlabels_match_simple_pattern_parsed(RRDLABELS *labels, SIMPLE_PATTERN *pattern, char equal, size_t *searches) { if (!labels) return false; struct simple_pattern_match_name_value t = { @@ -890,7 +1130,7 @@ bool rrdlabels_match_simple_pattern_parsed(DICTIONARY *labels, SIMPLE_PATTERN *p .equal = equal }; - int ret = dictionary_walkthrough_read(labels, equal?simple_pattern_match_name_and_value_callback:simple_pattern_match_name_only_callback, &t); + int ret = rrdlabels_walkthrough_read(labels, equal?simple_pattern_match_name_and_value_callback:simple_pattern_match_name_only_callback, &t); if(searches) *searches = t.searches; @@ -898,7 +1138,7 @@ bool rrdlabels_match_simple_pattern_parsed(DICTIONARY *labels, SIMPLE_PATTERN *p return (ret == -1)?true:false; } -bool rrdlabels_match_simple_pattern(DICTIONARY *labels, const char *simple_pattern_txt) { +bool rrdlabels_match_simple_pattern(RRDLABELS *labels, const char *simple_pattern_txt) { if (!labels) return false; SIMPLE_PATTERN *pattern = simple_pattern_create(simple_pattern_txt, " ,|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); @@ -923,39 +1163,23 @@ bool rrdlabels_match_simple_pattern(DICTIONARY *labels, const char *simple_patte // ---------------------------------------------------------------------------- // Log all labels -static int rrdlabels_log_label_to_buffer_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *name = dictionary_acquired_item_name(item); - +static int rrdlabels_log_label_to_buffer_callback(const char *name, const char *value, void *data) { BUFFER *wb = (BUFFER *)data; - RRDLABEL *lb = (RRDLABEL *)value; - - buffer_sprintf(wb, "Label: %s: \"%s\" (", name, string2str(lb->label_value)); - - size_t sources = 0; - if(lb->label_source & RRDLABEL_SRC_AUTO) { - buffer_sprintf(wb, "auto"); - sources++; - } - - if(lb->label_source & RRDLABEL_SRC_CONFIG) - buffer_sprintf(wb, "%snetdata.conf", sources++?",":""); - - if(lb->label_source & RRDLABEL_SRC_K8S) - buffer_sprintf(wb, "%sk8s", sources++?",":""); - - if(lb->label_source & RRDLABEL_SRC_ACLK) - buffer_sprintf(wb, "%saclk", sources++?",":""); - - if(!sources) - buffer_strcat(wb, "unknown"); + buffer_sprintf(wb, "Label: %s: \"%s\" (", name, value); + buffer_strcat(wb, "unknown"); buffer_strcat(wb, ")\n"); return 1; } -void rrdlabels_log_to_buffer(DICTIONARY *labels, BUFFER *wb) { - dictionary_sorted_walkthrough_read(labels, rrdlabels_log_label_to_buffer_callback, wb); +void rrdlabels_log_to_buffer(RRDLABELS *labels, BUFFER *wb) +{ + RRDLABEL *lb; + RRDLABEL_SRC ls; + lfe_start_read(labels, lb, ls) + rrdlabels_log_label_to_buffer_callback((void *) string2str(lb->index.key), (void *) string2str(lb->index.value), wb); + lfe_done(labels); } @@ -975,10 +1199,10 @@ struct labels_to_buffer { size_t count; }; -static int label_to_buffer_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *name = dictionary_acquired_item_name(item); +static int label_to_buffer_callback(const RRDLABEL *lb, void *value __maybe_unused, RRDLABEL_SRC ls, void *data) +{ + struct labels_to_buffer *t = (struct labels_to_buffer *)data; - RRDLABEL *lb = (RRDLABEL *)value; size_t n_size = (t->name_sanitizer ) ? ( RRDLABELS_MAX_NAME_LENGTH * 2 ) : 1; size_t v_size = (t->value_sanitizer) ? ( RRDLABELS_MAX_VALUE_LENGTH * 2 ) : 1; @@ -986,7 +1210,9 @@ static int label_to_buffer_callback(const DICTIONARY_ITEM *item, void *value, vo char n[n_size]; char v[v_size]; - const char *nn = name, *vv = string2str(lb->label_value); + const char *name = string2str(lb->index.key); + + const char *nn = name, *vv = string2str(lb->index.value); if(t->name_sanitizer) { t->name_sanitizer(n, name, n_size); @@ -994,11 +1220,11 @@ static int label_to_buffer_callback(const DICTIONARY_ITEM *item, void *value, vo } if(t->value_sanitizer) { - t->value_sanitizer(v, string2str(lb->label_value), v_size); + t->value_sanitizer(v, string2str(lb->index.value), v_size); vv = v; } - if(!t->filter_callback || t->filter_callback(name, string2str(lb->label_value), lb->label_source, t->filter_data)) { + if(!t->filter_callback || t->filter_callback(name, string2str(lb->index.value), ls, t->filter_data)) { buffer_sprintf(t->wb, "%s%s%s%s%s%s%s%s%s", t->count++?t->between_them:"", t->before_each, t->quote, nn, t->quote, t->equal, t->quote, vv, t->quote); return 1; } @@ -1006,7 +1232,26 @@ static int label_to_buffer_callback(const DICTIONARY_ITEM *item, void *value, vo return 0; } -int rrdlabels_to_buffer(DICTIONARY *labels, BUFFER *wb, const char *before_each, const char *equal, const char *quote, const char *between_them, bool (*filter_callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *filter_data, void (*name_sanitizer)(char *dst, const char *src, size_t dst_size), void (*value_sanitizer)(char *dst, const char *src, size_t dst_size)) { + +int label_walkthrough_read(RRDLABELS *labels, int (*callback)(const RRDLABEL *item, void *entry, RRDLABEL_SRC ls, void *data), void *data) +{ + int ret = 0; + + if(unlikely(!labels || !callback)) return 0; + + RRDLABEL *lb; + RRDLABEL_SRC ls; + lfe_start_read(labels, lb, ls) + { + ret = callback((const RRDLABEL *)lb, (void *)string2str(lb->index.value), ls, data); + if (ret < 0) + break; + } + lfe_done(labels); + return ret; +} + +int rrdlabels_to_buffer(RRDLABELS *labels, BUFFER *wb, const char *before_each, const char *equal, const char *quote, const char *between_them, bool (*filter_callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *filter_data, void (*name_sanitizer)(char *dst, const char *src, size_t dst_size), void (*value_sanitizer)(char *dst, const char *src, size_t dst_size)) { struct labels_to_buffer tmp = { .wb = wb, .filter_callback = filter_callback, @@ -1019,18 +1264,39 @@ int rrdlabels_to_buffer(DICTIONARY *labels, BUFFER *wb, const char *before_each, .between_them = between_them, .count = 0 }; - return dictionary_walkthrough_read(labels, label_to_buffer_callback, (void *)&tmp); + return label_walkthrough_read(labels, label_to_buffer_callback, (void *)&tmp); } -void rrdlabels_to_buffer_json_members(DICTIONARY *labels, BUFFER *wb) { +void rrdlabels_to_buffer_json_members(RRDLABELS *labels, BUFFER *wb) +{ RRDLABEL *lb; - dfe_start_read(labels, lb) { - buffer_json_member_add_string(wb, lb_dfe.name, string2str(lb->label_value)); - } - dfe_done(lb); + RRDLABEL_SRC ls; + lfe_start_read(labels, lb, ls) + buffer_json_member_add_string(wb, string2str(lb->index.key), string2str(lb->index.value)); + lfe_done(labels); +} + +size_t rrdlabels_entries(RRDLABELS *labels __maybe_unused) +{ + if (unlikely(!labels)) + return 0; + + size_t count; + spinlock_lock(&labels->spinlock); + count = JudyLCount(labels->JudyL, 0, -1, PJE0); + spinlock_unlock(&labels->spinlock); + return count; } -void rrdset_update_rrdlabels(RRDSET *st, DICTIONARY *new_rrdlabels) { +size_t rrdlabels_version(RRDLABELS *labels __maybe_unused) +{ + if (unlikely(!labels)) + return 0; + + return (size_t) labels->version; +} + +void rrdset_update_rrdlabels(RRDSET *st, RRDLABELS *new_rrdlabels) { if(!st->rrdlabels) st->rrdlabels = rrdlabels_create(); @@ -1039,6 +1305,7 @@ void rrdset_update_rrdlabels(RRDSET *st, DICTIONARY *new_rrdlabels) { rrdset_flag_set(st, RRDSET_FLAG_METADATA_UPDATE); rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); + rrdset_metadata_updated(st); } @@ -1051,16 +1318,37 @@ struct rrdlabels_unittest_add_a_pair { const char *expected_value; const char *name; const char *value; - RRDLABEL_SRC ls; int errors; }; -int rrdlabels_unittest_add_a_pair_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { +RRDLABEL *rrdlabels_find_label_with_key(RRDLABELS *labels, const char *key, RRDLABEL_SRC *source) +{ + if (!labels || !key) + return NULL; + + STRING *this_key = string_strdupz(key); + + RRDLABEL *lb = NULL; + RRDLABEL_SRC ls; + + lfe_start_read(labels, lb, ls) + { + if (lb->index.key == this_key) { + if (source) + *source = ls; + break; + } + } + lfe_done(labels); + string_freez(this_key); + return lb; +} + +static int rrdlabels_unittest_add_a_pair_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) { struct rrdlabels_unittest_add_a_pair *t = (struct rrdlabels_unittest_add_a_pair *)data; t->name = name; t->value = value; - t->ls = ls; if(strcmp(name, t->expected_name) != 0) { fprintf(stderr, "name is wrong, found \"%s\", expected \"%s\"", name, t->expected_name); @@ -1082,8 +1370,8 @@ int rrdlabels_unittest_add_a_pair_callback(const char *name, const char *value, return 1; } -int rrdlabels_unittest_add_a_pair(const char *pair, const char *name, const char *value) { - DICTIONARY *labels = rrdlabels_create(); +static int rrdlabels_unittest_add_a_pair(const char *pair, const char *name, const char *value) { + RRDLABELS *labels = rrdlabels_create(); int errors; fprintf(stderr, "rrdlabels_add_pair(labels, %s) ... ", pair); @@ -1112,7 +1400,7 @@ int rrdlabels_unittest_add_a_pair(const char *pair, const char *name, const char return errors; } -int rrdlabels_unittest_add_pairs() { +static int rrdlabels_unittest_add_pairs() { fprintf(stderr, "\n%s() tests\n", __FUNCTION__); int errors = 0; @@ -1160,7 +1448,142 @@ int rrdlabels_unittest_add_pairs() { return errors; } -int rrdlabels_unittest_check_simple_pattern(DICTIONARY *labels, const char *pattern, bool expected) { +static int rrdlabels_unittest_expect_value(RRDLABELS *labels, const char *key, const char *value, RRDLABEL_SRC required_source) +{ + RRDLABEL_SRC source; + RRDLABEL *label = rrdlabels_find_label_with_key(labels, key, &source); + return (!label || strcmp(string2str(label->index.value), value) != 0 || !(source & required_source)); +} + +static int rrdlabels_unittest_double_check() +{ + fprintf(stderr, "\n%s() tests\n", __FUNCTION__); + + int ret = 0; + RRDLABELS *labels = rrdlabels_create(); + + rrdlabels_add(labels, "key1", "value1", RRDLABEL_SRC_CONFIG); + ret += rrdlabels_unittest_expect_value(labels, "key1", "value1", RRDLABEL_FLAG_NEW); + + rrdlabels_add(labels, "key1", "value2", RRDLABEL_SRC_CONFIG); + ret += !rrdlabels_unittest_expect_value(labels, "key1", "value2", RRDLABEL_FLAG_OLD); + + ret += (rrdlabels_entries(labels) != 1); + + rrdlabels_destroy(labels); + + if (ret) + fprintf(stderr, "\n%s() tests failed\n", __FUNCTION__); + return ret; +} + +static int rrdlabels_walkthrough_index_read(RRDLABELS *labels, int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, size_t index, void *data), void *data) +{ + int ret = 0; + + if(unlikely(!labels || !callback)) return 0; + + RRDLABEL *lb; + RRDLABEL_SRC ls; + size_t index = 0; + lfe_start_read(labels, lb, ls) + { + ret = callback(string2str(lb->index.key), string2str(lb->index.value), ls, index, data); + if (ret < 0) + break; + index++; + } + lfe_done(labels); + + return ret; +} + +static int unittest_dump_labels(const char *name, const char *value, RRDLABEL_SRC ls, size_t index, void *data __maybe_unused) +{ + if (!index && data) { + fprintf(stderr, "%s\n", (char *) data); + } + fprintf(stderr, "LABEL \"%s\" = %d \"%s\"\n", name, ls & (~RRDLABEL_FLAG_INTERNAL), value); + return 1; +} + +static int rrdlabels_unittest_migrate_check() +{ + fprintf(stderr, "\n%s() tests\n", __FUNCTION__); + + RRDLABELS *labels1 = NULL; + RRDLABELS *labels2 = NULL; + + labels1 = rrdlabels_create(); + labels2 = rrdlabels_create(); + + rrdlabels_add(labels1, "key1", "value1", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels1, "key1", "value2", RRDLABEL_SRC_CONFIG); + + rrdlabels_add(labels2, "new_key1", "value2", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels2, "new_key2", "value2", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels2, "key1", "value2", RRDLABEL_SRC_CONFIG); + + fprintf(stderr, "Labels1 entries found %zu (should be 1)\n", rrdlabels_entries(labels1)); + fprintf(stderr, "Labels2 entries found %zu (should be 3)\n", rrdlabels_entries(labels2)); + + rrdlabels_migrate_to_these(labels1, labels2); + fprintf(stderr, "labels1 (migrated) entries found %zu (should be 3)\n", rrdlabels_entries(labels1)); + size_t entries = rrdlabels_entries(labels1); + + rrdlabels_destroy(labels1); + rrdlabels_destroy(labels2); + + if (entries != 3) + return 1; + + // Copy test + labels1 = rrdlabels_create(); + labels2 = rrdlabels_create(); + + rrdlabels_add(labels1, "key1", "value1", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels1, "key2", "value2", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels1, "key3", "value3", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels1, "key4", "value4", RRDLABEL_SRC_CONFIG); // 4 keys + rrdlabels_walkthrough_index_read(labels1, unittest_dump_labels, "\nlabels1"); + + rrdlabels_add(labels2, "key0", "value0", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels2, "key1", "value1", RRDLABEL_SRC_CONFIG); + rrdlabels_add(labels2, "key2", "value2", RRDLABEL_SRC_CONFIG); + + int rc = 0; + rc = rrdlabels_unittest_expect_value(labels1, "key1", "value1", RRDLABEL_FLAG_NEW); + if (rc) + return rc; + + rrdlabels_walkthrough_index_read(labels2, unittest_dump_labels, "\nlabels2"); + + rrdlabels_copy(labels1, labels2); // labels1 should have 5 keys + rc = rrdlabels_unittest_expect_value(labels1, "key1", "value1", RRDLABEL_FLAG_OLD); + if (rc) + return rc; + + rc = rrdlabels_unittest_expect_value(labels1, "key0", "value0", RRDLABEL_FLAG_NEW); + if (rc) + return rc; + + rrdlabels_walkthrough_index_read(labels1, unittest_dump_labels, "\nlabels1 after copy from labels2"); + entries = rrdlabels_entries(labels1); + + fprintf(stderr, "labels1 (copied) entries found %zu (should be 5)\n", rrdlabels_entries(labels1)); + if (entries != 5) + return 1; + + rrdlabels_add(labels1, "key0", "value0", RRDLABEL_SRC_CONFIG); + rc = rrdlabels_unittest_expect_value(labels1, "key0", "value0", RRDLABEL_FLAG_OLD); + + rrdlabels_destroy(labels1); + rrdlabels_destroy(labels2); + + return rc; +} + +static int rrdlabels_unittest_check_simple_pattern(RRDLABELS *labels, const char *pattern, bool expected) { fprintf(stderr, "rrdlabels_match_simple_pattern(labels, \"%s\") ... ", pattern); bool ret = rrdlabels_match_simple_pattern(labels, pattern); @@ -1169,12 +1592,12 @@ int rrdlabels_unittest_check_simple_pattern(DICTIONARY *labels, const char *patt return (ret == expected)?0:1; } -int rrdlabels_unittest_simple_pattern() { +static int rrdlabels_unittest_simple_pattern() { fprintf(stderr, "\n%s() tests\n", __FUNCTION__); int errors = 0; - DICTIONARY *labels = rrdlabels_create(); + RRDLABELS *labels = rrdlabels_create(); rrdlabels_add(labels, "tag1", "value1", RRDLABEL_SRC_CONFIG); rrdlabels_add(labels, "tag2", "value2", RRDLABEL_SRC_CONFIG); rrdlabels_add(labels, "tag3", "value3", RRDLABEL_SRC_CONFIG); @@ -1252,6 +1675,8 @@ int rrdlabels_unittest(void) { errors += rrdlabels_unittest_sanitization(); errors += rrdlabels_unittest_add_pairs(); errors += rrdlabels_unittest_simple_pattern(); + errors += rrdlabels_unittest_double_check(); + errors += rrdlabels_unittest_migrate_check(); fprintf(stderr, "%d errors found\n", errors); return errors; diff --git a/database/rrdlabels.h b/database/rrdlabels.h new file mode 100644 index 00000000000000..64a0e23848328c --- /dev/null +++ b/database/rrdlabels.h @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDLABELS_H +#define NETDATA_RRDLABELS_H + +#include "rrd.h" + +typedef enum __attribute__ ((__packed__)) rrdlabel_source { + RRDLABEL_SRC_AUTO = (1 << 0), // set when Netdata found the label by some automation + RRDLABEL_SRC_CONFIG = (1 << 1), // set when the user configured the label + RRDLABEL_SRC_K8S = (1 << 2), // set when this label is found from k8s (RRDLABEL_SRC_AUTO should also be set) + RRDLABEL_SRC_ACLK = (1 << 3), // set when this label is found from ACLK (RRDLABEL_SRC_AUTO should also be set) + + // more sources can be added here + + RRDLABEL_FLAG_DONT_DELETE = (1 << 29), // set when this label should never be removed (can be overwritten though) + RRDLABEL_FLAG_OLD = (1 << 30), // marks for rrdlabels internal use - they are not exposed outside rrdlabels + RRDLABEL_FLAG_NEW = (1 << 31) // marks for rrdlabels internal use - they are not exposed outside rrdlabels +} RRDLABEL_SRC; + +#define RRDLABEL_FLAG_INTERNAL (RRDLABEL_FLAG_OLD | RRDLABEL_FLAG_NEW | RRDLABEL_FLAG_DONT_DELETE) + +size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length); + +RRDLABELS *rrdlabels_create(void); +void rrdlabels_destroy(RRDLABELS *labels_dict); +void rrdlabels_add(RRDLABELS *labels, const char *name, const char *value, RRDLABEL_SRC ls); +void rrdlabels_add_pair(RRDLABELS *labels, const char *string, RRDLABEL_SRC ls); +void rrdlabels_value_to_buffer_array_item_or_null(RRDLABELS *labels, BUFFER *wb, const char *key); +void rrdlabels_get_value_strdup_or_null(RRDLABELS *labels, char **value, const char *key); +void rrdlabels_get_value_to_buffer_or_unset(RRDLABELS *labels, BUFFER *wb, const char *key, const char *unset); +bool rrdlabels_exist(RRDLABELS *labels, const char *key); +size_t rrdlabels_entries(RRDLABELS *labels __maybe_unused); +size_t rrdlabels_version(RRDLABELS *labels __maybe_unused); +void rrdlabels_get_value_strcpyz(RRDLABELS *labels, char *dst, size_t dst_len, const char *key); + +void rrdlabels_unmark_all(RRDLABELS *labels); +void rrdlabels_remove_all_unmarked(RRDLABELS *labels); + +int rrdlabels_walkthrough_read(RRDLABELS *labels, int (*callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *data); +void rrdlabels_log_to_buffer(RRDLABELS *labels, BUFFER *wb); +bool rrdlabels_match_simple_pattern(RRDLABELS *labels, const char *simple_pattern_txt); + +bool rrdlabels_match_simple_pattern_parsed(RRDLABELS *labels, SIMPLE_PATTERN *pattern, char equal, size_t *searches); +int rrdlabels_to_buffer(RRDLABELS *labels, BUFFER *wb, const char *before_each, const char *equal, const char *quote, const char *between_them, + bool (*filter_callback)(const char *name, const char *value, RRDLABEL_SRC ls, void *data), void *filter_data, + void (*name_sanitizer)(char *dst, const char *src, size_t dst_size), + void (*value_sanitizer)(char *dst, const char *src, size_t dst_size)); +void rrdlabels_to_buffer_json_members(RRDLABELS *labels, BUFFER *wb); + +void rrdlabels_migrate_to_these(RRDLABELS *dst, RRDLABELS *src); +void rrdlabels_copy(RRDLABELS *dst, RRDLABELS *src); + +int rrdlabels_unittest(void); + +// unfortunately this break when defined in exporting_engine.h +bool exporting_labels_filter_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data); + +#endif /* NETDATA_RRDLABELS_H */ diff --git a/database/rrdset.c b/database/rrdset.c index 1e00d5c8a26dc4..f4bb48aa751549 100644 --- a/database/rrdset.c +++ b/database/rrdset.c @@ -5,6 +5,129 @@ #include #include "storage_engine.h" + +void rrdset_metadata_updated(RRDSET *st) { + __atomic_add_fetch(&st->version, 1, __ATOMIC_RELAXED); + rrdcontext_updated_rrdset(st); +} + +// ---------------------------------------------------------------------------- +// RRDSET rrdpush send chart_slots + +static void rrdset_rrdpush_send_chart_slot_assign(RRDSET *st) { + RRDHOST *host = st->rrdhost; + spinlock_lock(&host->rrdpush.send.pluginsd_chart_slots.available.spinlock); + + if(host->rrdpush.send.pluginsd_chart_slots.available.used > 0) + st->rrdpush.sender.chart_slot = + host->rrdpush.send.pluginsd_chart_slots.available.array[--host->rrdpush.send.pluginsd_chart_slots.available.used]; + else + st->rrdpush.sender.chart_slot = ++host->rrdpush.send.pluginsd_chart_slots.last_used; + + spinlock_unlock(&host->rrdpush.send.pluginsd_chart_slots.available.spinlock); +} + +static void rrdset_rrdpush_send_chart_slot_release(RRDSET *st) { + if(!st->rrdpush.sender.chart_slot || st->rrdhost->rrdpush.send.pluginsd_chart_slots.available.ignore) + return; + + RRDHOST *host = st->rrdhost; + spinlock_lock(&host->rrdpush.send.pluginsd_chart_slots.available.spinlock); + + if(host->rrdpush.send.pluginsd_chart_slots.available.used >= host->rrdpush.send.pluginsd_chart_slots.available.size) { + uint32_t old_size = host->rrdpush.send.pluginsd_chart_slots.available.size; + uint32_t new_size = (old_size > 0) ? (old_size * 2) : 1024; + + host->rrdpush.send.pluginsd_chart_slots.available.array = + reallocz(host->rrdpush.send.pluginsd_chart_slots.available.array, new_size * sizeof(uint32_t)); + + host->rrdpush.send.pluginsd_chart_slots.available.size = new_size; + } + + host->rrdpush.send.pluginsd_chart_slots.available.array[host->rrdpush.send.pluginsd_chart_slots.available.used++] = + st->rrdpush.sender.chart_slot; + + st->rrdpush.sender.chart_slot = 0; + spinlock_unlock(&host->rrdpush.send.pluginsd_chart_slots.available.spinlock); +} + +void rrdhost_pluginsd_send_chart_slots_free(RRDHOST *host) { + spinlock_lock(&host->rrdpush.send.pluginsd_chart_slots.available.spinlock); + host->rrdpush.send.pluginsd_chart_slots.available.ignore = true; + freez(host->rrdpush.send.pluginsd_chart_slots.available.array); + host->rrdpush.send.pluginsd_chart_slots.available.array = NULL; + host->rrdpush.send.pluginsd_chart_slots.available.used = 0; + host->rrdpush.send.pluginsd_chart_slots.available.size = 0; + spinlock_unlock(&host->rrdpush.send.pluginsd_chart_slots.available.spinlock); + + // zero all the slots on all charts, so that they will not attempt to access the array + RRDSET *st; + rrdset_foreach_read(st, host) { + st->rrdpush.sender.chart_slot = 0; + } + rrdset_foreach_done(st); +} + +void rrdset_pluginsd_receive_unslot(RRDSET *st) { + for(size_t i = 0; i < st->pluginsd.size ;i++) { + rrddim_acquired_release(st->pluginsd.prd_array[i].rda); // can be NULL + st->pluginsd.prd_array[i].rda = NULL; + st->pluginsd.prd_array[i].rd = NULL; + st->pluginsd.prd_array[i].id = NULL; + } + + RRDHOST *host = st->rrdhost; + + if(st->pluginsd.last_slot >= 0 && + (uint32_t)st->pluginsd.last_slot < host->rrdpush.receive.pluginsd_chart_slots.size && + host->rrdpush.receive.pluginsd_chart_slots.array[st->pluginsd.last_slot] == st) { + host->rrdpush.receive.pluginsd_chart_slots.array[st->pluginsd.last_slot] = NULL; + } + + st->pluginsd.last_slot = -1; + st->pluginsd.dims_with_slots = false; +} + +void rrdset_pluginsd_receive_unslot_and_cleanup(RRDSET *st) { + if(!st) + return; + + spinlock_lock(&st->pluginsd.spinlock); + + rrdset_pluginsd_receive_unslot(st); + + freez(st->pluginsd.prd_array); + st->pluginsd.prd_array = NULL; + st->pluginsd.size = 0; + st->pluginsd.pos = 0; + st->pluginsd.set = false; + st->pluginsd.last_slot = -1; + st->pluginsd.dims_with_slots = false; + st->pluginsd.collector_tid = 0; + + spinlock_unlock(&st->pluginsd.spinlock); +} + +static void rrdset_pluginsd_receive_slots_initialize(RRDSET *st) { + spinlock_init(&st->pluginsd.spinlock); + st->pluginsd.last_slot = -1; +} + +void rrdhost_pluginsd_receive_chart_slots_free(RRDHOST *host) { + spinlock_lock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); + + if(host->rrdpush.receive.pluginsd_chart_slots.array) { + for (size_t s = 0; s < host->rrdpush.receive.pluginsd_chart_slots.size; s++) + rrdset_pluginsd_receive_unslot_and_cleanup(host->rrdpush.receive.pluginsd_chart_slots.array[s]); + + freez(host->rrdpush.receive.pluginsd_chart_slots.array); + host->rrdpush.receive.pluginsd_chart_slots.array = NULL; + host->rrdpush.receive.pluginsd_chart_slots.size = 0; + } + + spinlock_unlock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); +} + // ---------------------------------------------------------------------------- // RRDSET name index @@ -39,8 +162,8 @@ static inline RRDSET *rrdset_index_find_name(RRDHOST *host, const char *name) { static inline void rrdset_update_permanent_labels(RRDSET *st) { if(!st->rrdlabels) return; - rrdlabels_add(st->rrdlabels, "_collect_plugin", rrdset_plugin_name(st), RRDLABEL_SRC_AUTO| RRDLABEL_FLAG_PERMANENT); - rrdlabels_add(st->rrdlabels, "_collect_module", rrdset_module_name(st), RRDLABEL_SRC_AUTO| RRDLABEL_FLAG_PERMANENT); + rrdlabels_add(st->rrdlabels, "_collect_plugin", rrdset_plugin_name(st), RRDLABEL_SRC_AUTO | RRDLABEL_FLAG_DONT_DELETE); + rrdlabels_add(st->rrdlabels, "_collect_module", rrdset_module_name(st), RRDLABEL_SRC_AUTO | RRDLABEL_FLAG_DONT_DELETE); } static STRING *rrdset_fix_name(RRDHOST *host, const char *chart_full_id, const char *type, const char *current_name, const char *name) { @@ -64,7 +187,7 @@ static STRING *rrdset_fix_name(RRDHOST *host, const char *chart_full_id, const c i++; } while (rrdset_index_find_name(host, new_name)); - netdata_log_info("RRDSET: using name '%s' for chart '%s' on host '%s'.", new_name, full_name, rrdhost_hostname(host)); +// netdata_log_info("RRDSET: using name '%s' for chart '%s' on host '%s'.", new_name, full_name, rrdhost_hostname(host)); } else return NULL; @@ -135,6 +258,8 @@ static void rrdset_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v st->chart_type = ctr->chart_type; st->rrdhost = host; + rrdset_rrdpush_send_chart_slot_assign(st); + spinlock_init(&st->data_collection_lock); st->flags = RRDSET_FLAG_SYNC_CLOCK @@ -179,13 +304,13 @@ static void rrdset_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v st->green = NAN; st->red = NAN; + rrdset_pluginsd_receive_slots_initialize(st); + ctr->react_action = RRDSET_REACT_NEW; ml_chart_new(st); } -void pluginsd_rrdset_cleanup(RRDSET *st); - void rrdset_finalize_collection(RRDSET *st, bool dimensions_too) { RRDHOST *host = st->rrdhost; @@ -208,7 +333,7 @@ void rrdset_finalize_collection(RRDSET *st, bool dimensions_too) { } } - pluginsd_rrdset_cleanup(st); + rrdset_pluginsd_receive_unslot_and_cleanup(st); } // the destructor - the dictionary is write locked while this runs @@ -220,6 +345,8 @@ static void rrdset_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, v rrdset_finalize_collection(st, false); + rrdset_rrdpush_send_chart_slot_release(st); + // remove it from the name index rrdset_index_del_name(host, st); @@ -288,15 +415,10 @@ static bool rrdset_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, struct rrdset_constructor *ctr = constructor_data; RRDSET *st = rrdset; - rrdset_isnot_obsolete(st); + rrdset_isnot_obsolete___safe_from_collector_thread(st); ctr->react_action = RRDSET_REACT_NONE; - if (rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED)) { - rrdset_flag_clear(st, RRDSET_FLAG_ARCHIVED); - ctr->react_action |= RRDSET_REACT_CHART_ACTIVATED; - } - if (rrdset_reset_name(st, (ctr->name && *ctr->name) ? ctr->name : ctr->id) == 2) ctr->react_action |= RRDSET_REACT_UPDATED; @@ -368,7 +490,6 @@ static bool rrdset_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, rrdset_update_permanent_labels(st); rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); return ctr->react_action != RRDSET_REACT_NONE; } @@ -396,7 +517,7 @@ static void rrdset_react_callback(const DICTIONARY_ITEM *item __maybe_unused, vo rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); } - rrdcontext_updated_rrdset(st); + rrdset_metadata_updated(st); } void rrdset_index_init(RRDHOST *host) { @@ -547,7 +668,7 @@ int rrdset_reset_name(RRDSET *st, const char *name) { rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_IGNORE); rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND); rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrdset_metadata_updated(st); rrdcontext_updated_rrdset_name(st); return 2; @@ -656,19 +777,19 @@ void rrdset_get_retention_of_tier_for_collected_chart(RRDSET *st, time_t *first_ *last_time_s = db_last_entry_s; } -inline void rrdset_is_obsolete(RRDSET *st) { - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))) { - netdata_log_info("Cannot obsolete already archived chart %s", rrdset_name(st)); - return; - } +inline void rrdset_is_obsolete___safe_from_collector_thread(RRDSET *st) { + rrdset_pluginsd_receive_unslot(st); if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { +// netdata_log_info("Setting obsolete flag on chart 'host:%s/chart:%s'", +// rrdhost_hostname(st->rrdhost), rrdset_id(st)); + rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE); rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_PENDING_OBSOLETE_CHARTS); st->last_accessed_time_s = now_realtime_sec(); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrdset_metadata_updated(st); // the chart will not get more updates (data collection) // so, we have to push its definition now @@ -677,12 +798,16 @@ inline void rrdset_is_obsolete(RRDSET *st) { } } -inline void rrdset_isnot_obsolete(RRDSET *st) { +inline void rrdset_isnot_obsolete___safe_from_collector_thread(RRDSET *st) { if(unlikely((rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { + +// netdata_log_info("Clearing obsolete flag on chart 'host:%s/chart:%s'", +// rrdhost_hostname(st->rrdhost), rrdset_id(st)); + rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); st->last_accessed_time_s = now_realtime_sec(); - rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrdset_metadata_updated(st); // the chart will be pushed upstream automatically // due to data collection @@ -1046,7 +1171,7 @@ void rrdset_timed_next(RRDSET *st, struct timeval now, usec_t duration_since_las last_time_s = now.tv_sec; if(min_delta > permanent_min_delta) { - netdata_log_info("MINIMUM MICROSECONDS DELTA of thread %d increased from %lld to %lld (+%lld)", gettid(), permanent_min_delta, min_delta, min_delta - permanent_min_delta); + netdata_log_info("MINIMUM MICROSECONDS DELTA of thread %d increased from %"PRIi64" to %"PRIi64" (+%"PRIi64")", gettid(), permanent_min_delta, min_delta, min_delta - permanent_min_delta); permanent_min_delta = min_delta; } @@ -1056,12 +1181,12 @@ void rrdset_timed_next(RRDSET *st, struct timeval now, usec_t duration_since_las #endif } - netdata_log_debug(D_RRD_CALLS, "rrdset_timed_next() for chart %s with duration since last update %llu usec", rrdset_name(st), duration_since_last_update); - rrdset_debug(st, "NEXT: %llu microseconds", duration_since_last_update); + netdata_log_debug(D_RRD_CALLS, "rrdset_timed_next() for chart %s with duration since last update %"PRIu64" usec", rrdset_name(st), duration_since_last_update); + rrdset_debug(st, "NEXT: %"PRIu64" microseconds", duration_since_last_update); internal_error(discarded && discarded != duration_since_last_update, - "host '%s', chart '%s': discarded data collection time of %llu usec, " - "replaced with %llu usec, reason: '%s'" + "host '%s', chart '%s': discarded data collection time of %"PRIu64" usec, " + "replaced with %"PRIu64" usec, reason: '%s'" , rrdhost_hostname(st->rrdhost) , rrdset_id(st) , discarded @@ -1198,6 +1323,14 @@ void rrddim_store_metric_with_trace(RRDDIM *rd, usec_t point_end_time_ut, NETDAT #else // !NETDATA_LOG_COLLECTION_ERRORS void rrddim_store_metric(RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, SN_FLAGS flags) { #endif // !NETDATA_LOG_COLLECTION_ERRORS + + static __thread struct log_stack_entry lgs[] = { + [0] = ND_LOG_FIELD_STR(NDF_NIDL_DIMENSION, NULL), + [1] = ND_LOG_FIELD_END(), + }; + lgs[0].str = rd->id; + log_stack_push(lgs); + #ifdef NETDATA_LOG_COLLECTION_ERRORS rd->rrddim_store_metric_count++; @@ -1259,6 +1392,7 @@ void rrddim_store_metric(RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, } rrdcontext_collected_rrddim(rd); + log_stack_pop(&lgs); } void store_metric_collection_completed() { @@ -1333,7 +1467,7 @@ static inline size_t rrdset_done_interpolate( internal_error(iterations < 0, "RRDSET: '%s': iterations calculation wrapped! " - "first_ut = %llu, last_stored_ut = %llu, next_store_ut = %llu, now_collect_ut = %llu" + "first_ut = %"PRIu64", last_stored_ut = %"PRIu64", next_store_ut = %"PRIu64", now_collect_ut = %"PRIu64"" , rrdset_id(st) , first_ut , last_stored_ut @@ -1366,8 +1500,8 @@ static inline size_t rrdset_done_interpolate( rrdset_debug(st, "%s: CALC2 INC " NETDATA_DOUBLE_FORMAT " = " NETDATA_DOUBLE_FORMAT - " * (%llu - %llu)" - " / (%llu - %llu)" + " * (%"PRIu64" - %"PRIu64")" + " / (%"PRIu64" - %"PRIu64"" , rrddim_name(rd) , new_value , rd->collector.calculated_value @@ -1416,8 +1550,8 @@ static inline size_t rrdset_done_interpolate( rrdset_debug(st, "%s: CALC2 DEF " NETDATA_DOUBLE_FORMAT " = (((" "(" NETDATA_DOUBLE_FORMAT " - " NETDATA_DOUBLE_FORMAT ")" - " * %llu" - " / %llu) + " NETDATA_DOUBLE_FORMAT, rrddim_name(rd) + " * %"PRIu64"" + " / %"PRIu64") + " NETDATA_DOUBLE_FORMAT, rrddim_name(rd) , new_value , rd->collector.calculated_value, rd->collector.last_calculated_value , (next_store_ut - first_ut) @@ -1537,12 +1671,12 @@ void rrdset_timed_done(RRDSET *st, struct timeval now, bool pending_rrdset_next) if (unlikely(rrdset_flags & RRDSET_FLAG_OBSOLETE)) { netdata_log_error("Chart '%s' has the OBSOLETE flag set, but it is collected.", rrdset_id(st)); - rrdset_isnot_obsolete(st); + rrdset_isnot_obsolete___safe_from_collector_thread(st); } // check if the chart has a long time to be updated if(unlikely(st->usec_since_last_update > MAX(st->db.entries, 60) * update_every_ut)) { - netdata_log_info("host '%s', chart '%s': took too long to be updated (counter #%u, update #%u, %0.3" NETDATA_DOUBLE_MODIFIER + nd_log_daemon(NDLP_DEBUG, "host '%s', chart '%s': took too long to be updated (counter #%u, update #%u, %0.3" NETDATA_DOUBLE_MODIFIER " secs). Resetting it.", rrdhost_hostname(st->rrdhost), rrdset_id(st), st->counter, st->counter_done, (NETDATA_DOUBLE)st->usec_since_last_update / USEC_PER_SEC); rrdset_reset(st); @@ -1551,7 +1685,7 @@ void rrdset_timed_done(RRDSET *st, struct timeval now, bool pending_rrdset_next) first_entry = 1; } - rrdset_debug(st, "microseconds since last update: %llu", st->usec_since_last_update); + rrdset_debug(st, "microseconds since last update: %"PRIu64"", st->usec_since_last_update); // set last_collected_time if(unlikely(!st->last_collected_time.tv_sec)) { @@ -1684,7 +1818,7 @@ void rrdset_timed_done(RRDSET *st, struct timeval now, bool pending_rrdset_next) if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE))) { netdata_log_error("Dimension %s in chart '%s' has the OBSOLETE flag set, but it is collected.", rrddim_name(rd), rrdset_id(st)); - rrddim_isnot_obsolete(st, rd); + rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); } } } diff --git a/database/rrdvar.c b/database/rrdvar.c index 09c4d404dd317b..68d22abb9eb0d1 100644 --- a/database/rrdvar.c +++ b/database/rrdvar.c @@ -315,7 +315,6 @@ int health_variable_lookup(STRING *variable, RRDCALC *rc, NETDATA_DOUBLE *result struct variable2json_helper { BUFFER *buf; - size_t counter; RRDVAR_FLAGS options; }; @@ -326,47 +325,54 @@ static int single_variable2json_callback(const DICTIONARY_ITEM *item __maybe_unu if (helper->options == RRDVAR_FLAG_NONE || rrdvar_flags(rva) & helper->options) { if(unlikely(isnan(value) || isinf(value))) - buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": null", helper->counter?",":"", rrdvar_name(rva)); + buffer_json_member_add_string(helper->buf, rrdvar_name(rva), NULL); else - buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": %0.5" NETDATA_DOUBLE_MODIFIER, helper->counter?",":"", rrdvar_name(rva), (NETDATA_DOUBLE)value); - - helper->counter++; + buffer_json_member_add_double(helper->buf, rrdvar_name(rva), (NETDATA_DOUBLE)value); } return 0; } void health_api_v1_chart_custom_variables2json(RRDSET *st, BUFFER *buf) { - struct variable2json_helper helper = { - .buf = buf, - .counter = 0, - .options = RRDVAR_FLAG_CUSTOM_CHART_VAR}; + struct variable2json_helper helper = {.buf = buf, .options = RRDVAR_FLAG_CUSTOM_CHART_VAR}; - buffer_sprintf(buf, "{"); rrdvar_walkthrough_read(st->rrdvars, single_variable2json_callback, &helper); - buffer_strcat(buf, "\n\t\t\t}"); } void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf) { RRDHOST *host = st->rrdhost; - struct variable2json_helper helper = { - .buf = buf, - .counter = 0, - .options = RRDVAR_FLAG_NONE}; + struct variable2json_helper helper = {.buf = buf, .options = RRDVAR_FLAG_NONE}; - buffer_sprintf(buf, "{\n\t\"chart\": \"%s\",\n\t\"chart_name\": \"%s\",\n\t\"chart_context\": \"%s\",\n\t\"chart_variables\": {", rrdset_id(st), rrdset_name(st), rrdset_context(st)); - rrdvar_walkthrough_read(st->rrdvars, single_variable2json_callback, &helper); + buffer_json_initialize(buf, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(buf, "chart", rrdset_id(st)); + buffer_json_member_add_string(buf, "chart_name", rrdset_name(st)); + buffer_json_member_add_string(buf, "chart_context", rrdset_context(st)); + + { + buffer_json_member_add_object(buf, "chart_variables"); + rrdvar_walkthrough_read(st->rrdvars, single_variable2json_callback, &helper); + buffer_json_object_close(buf); + } - buffer_sprintf(buf, "\n\t},\n\t\"family\": \"%s\",\n\t\"family_variables\": {", rrdset_family(st)); - helper.counter = 0; - rrdvar_walkthrough_read(rrdfamily_rrdvars_dict(st->rrdfamily), single_variable2json_callback, &helper); + buffer_json_member_add_string(buf, "family", rrdset_family(st)); - buffer_sprintf(buf, "\n\t},\n\t\"host\": \"%s\",\n\t\"host_variables\": {", rrdhost_hostname(host)); - helper.counter = 0; - rrdvar_walkthrough_read(host->rrdvars, single_variable2json_callback, &helper); + { + buffer_json_member_add_object(buf, "family_variables"); + rrdvar_walkthrough_read(rrdfamily_rrdvars_dict(st->rrdfamily), single_variable2json_callback, &helper); + buffer_json_object_close(buf); + } + + buffer_json_member_add_string(buf, "host", rrdhost_hostname(host)); + + { + buffer_json_member_add_object(buf, "host_variables"); + rrdvar_walkthrough_read(host->rrdvars, single_variable2json_callback, &helper); + buffer_json_object_close(buf); + } - buffer_strcat(buf, "\n\t}\n}\n"); + buffer_json_finalize(buf); } // ---------------------------------------------------------------------------- diff --git a/database/sqlite/dbdata.c b/database/sqlite/dbdata.c new file mode 100644 index 00000000000000..1ad742e0482fb9 --- /dev/null +++ b/database/sqlite/dbdata.c @@ -0,0 +1,959 @@ +/* +** 2019-04-17 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains an implementation of two eponymous virtual tables, +** "sqlite_dbdata" and "sqlite_dbptr". Both modules require that the +** "sqlite_dbpage" eponymous virtual table be available. +** +** SQLITE_DBDATA: +** sqlite_dbdata is used to extract data directly from a database b-tree +** page and its associated overflow pages, bypassing the b-tree layer. +** The table schema is equivalent to: +** +** CREATE TABLE sqlite_dbdata( +** pgno INTEGER, +** cell INTEGER, +** field INTEGER, +** value ANY, +** schema TEXT HIDDEN +** ); +** +** IMPORTANT: THE VIRTUAL TABLE SCHEMA ABOVE IS SUBJECT TO CHANGE. IN THE +** FUTURE NEW NON-HIDDEN COLUMNS MAY BE ADDED BETWEEN "value" AND +** "schema". +** +** Each page of the database is inspected. If it cannot be interpreted as +** a b-tree page, or if it is a b-tree page containing 0 entries, the +** sqlite_dbdata table contains no rows for that page. Otherwise, the +** table contains one row for each field in the record associated with +** each cell on the page. For intkey b-trees, the key value is stored in +** field -1. +** +** For example, for the database: +** +** CREATE TABLE t1(a, b); -- root page is page 2 +** INSERT INTO t1(rowid, a, b) VALUES(5, 'v', 'five'); +** INSERT INTO t1(rowid, a, b) VALUES(10, 'x', 'ten'); +** +** the sqlite_dbdata table contains, as well as from entries related to +** page 1, content equivalent to: +** +** INSERT INTO sqlite_dbdata(pgno, cell, field, value) VALUES +** (2, 0, -1, 5 ), +** (2, 0, 0, 'v' ), +** (2, 0, 1, 'five'), +** (2, 1, -1, 10 ), +** (2, 1, 0, 'x' ), +** (2, 1, 1, 'ten' ); +** +** If database corruption is encountered, this module does not report an +** error. Instead, it attempts to extract as much data as possible and +** ignores the corruption. +** +** SQLITE_DBPTR: +** The sqlite_dbptr table has the following schema: +** +** CREATE TABLE sqlite_dbptr( +** pgno INTEGER, +** child INTEGER, +** schema TEXT HIDDEN +** ); +** +** It contains one entry for each b-tree pointer between a parent and +** child page in the database. +*/ + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#if !defined(SQLITEINT_H) +#include "sqlite3.h" + +typedef unsigned char u8; +typedef unsigned int u32; + +#endif +#include +#include + +#ifndef SQLITE_OMIT_VIRTUALTABLE + +#define DBDATA_PADDING_BYTES 100 + +typedef struct DbdataTable DbdataTable; +typedef struct DbdataCursor DbdataCursor; + +/* Cursor object */ +struct DbdataCursor { + sqlite3_vtab_cursor base; /* Base class. Must be first */ + sqlite3_stmt *pStmt; /* For fetching database pages */ + + int iPgno; /* Current page number */ + u8 *aPage; /* Buffer containing page */ + int nPage; /* Size of aPage[] in bytes */ + int nCell; /* Number of cells on aPage[] */ + int iCell; /* Current cell number */ + int bOnePage; /* True to stop after one page */ + int szDb; + sqlite3_int64 iRowid; + + /* Only for the sqlite_dbdata table */ + u8 *pRec; /* Buffer containing current record */ + sqlite3_int64 nRec; /* Size of pRec[] in bytes */ + sqlite3_int64 nHdr; /* Size of header in bytes */ + int iField; /* Current field number */ + u8 *pHdrPtr; + u8 *pPtr; + u32 enc; /* Text encoding */ + + sqlite3_int64 iIntkey; /* Integer key value */ +}; + +/* Table object */ +struct DbdataTable { + sqlite3_vtab base; /* Base class. Must be first */ + sqlite3 *db; /* The database connection */ + sqlite3_stmt *pStmt; /* For fetching database pages */ + int bPtr; /* True for sqlite3_dbptr table */ +}; + +/* Column and schema definitions for sqlite_dbdata */ +#define DBDATA_COLUMN_PGNO 0 +#define DBDATA_COLUMN_CELL 1 +#define DBDATA_COLUMN_FIELD 2 +#define DBDATA_COLUMN_VALUE 3 +#define DBDATA_COLUMN_SCHEMA 4 +#define DBDATA_SCHEMA \ + "CREATE TABLE x(" \ + " pgno INTEGER," \ + " cell INTEGER," \ + " field INTEGER," \ + " value ANY," \ + " schema TEXT HIDDEN" \ + ")" + +/* Column and schema definitions for sqlite_dbptr */ +#define DBPTR_COLUMN_PGNO 0 +#define DBPTR_COLUMN_CHILD 1 +#define DBPTR_COLUMN_SCHEMA 2 +#define DBPTR_SCHEMA \ + "CREATE TABLE x(" \ + " pgno INTEGER," \ + " child INTEGER," \ + " schema TEXT HIDDEN" \ + ")" + +/* +** Connect to an sqlite_dbdata (pAux==0) or sqlite_dbptr (pAux!=0) virtual +** table. +*/ +static int dbdataConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + DbdataTable *pTab = 0; + int rc = sqlite3_declare_vtab(db, pAux ? DBPTR_SCHEMA : DBDATA_SCHEMA); + + (void)argc; + (void)argv; + (void)pzErr; + sqlite3_vtab_config(db, SQLITE_VTAB_USES_ALL_SCHEMAS); + if( rc==SQLITE_OK ){ + pTab = (DbdataTable*)sqlite3_malloc64(sizeof(DbdataTable)); + if( pTab==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(pTab, 0, sizeof(DbdataTable)); + pTab->db = db; + pTab->bPtr = (pAux!=0); + } + } + + *ppVtab = (sqlite3_vtab*)pTab; + return rc; +} + +/* +** Disconnect from or destroy a sqlite_dbdata or sqlite_dbptr virtual table. +*/ +static int dbdataDisconnect(sqlite3_vtab *pVtab){ + DbdataTable *pTab = (DbdataTable*)pVtab; + if( pTab ){ + sqlite3_finalize(pTab->pStmt); + sqlite3_free(pVtab); + } + return SQLITE_OK; +} + +/* +** This function interprets two types of constraints: +** +** schema=? +** pgno=? +** +** If neither are present, idxNum is set to 0. If schema=? is present, +** the 0x01 bit in idxNum is set. If pgno=? is present, the 0x02 bit +** in idxNum is set. +** +** If both parameters are present, schema is in position 0 and pgno in +** position 1. +*/ +static int dbdataBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdx){ + DbdataTable *pTab = (DbdataTable*)tab; + int i; + int iSchema = -1; + int iPgno = -1; + int colSchema = (pTab->bPtr ? DBPTR_COLUMN_SCHEMA : DBDATA_COLUMN_SCHEMA); + + for(i=0; inConstraint; i++){ + struct sqlite3_index_constraint *p = &pIdx->aConstraint[i]; + if( p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + if( p->iColumn==colSchema ){ + if( p->usable==0 ) return SQLITE_CONSTRAINT; + iSchema = i; + } + if( p->iColumn==DBDATA_COLUMN_PGNO && p->usable ){ + iPgno = i; + } + } + } + + if( iSchema>=0 ){ + pIdx->aConstraintUsage[iSchema].argvIndex = 1; + pIdx->aConstraintUsage[iSchema].omit = 1; + } + if( iPgno>=0 ){ + pIdx->aConstraintUsage[iPgno].argvIndex = 1 + (iSchema>=0); + pIdx->aConstraintUsage[iPgno].omit = 1; + pIdx->estimatedCost = 100; + pIdx->estimatedRows = 50; + + if( pTab->bPtr==0 && pIdx->nOrderBy && pIdx->aOrderBy[0].desc==0 ){ + int iCol = pIdx->aOrderBy[0].iColumn; + if( pIdx->nOrderBy==1 ){ + pIdx->orderByConsumed = (iCol==0 || iCol==1); + }else if( pIdx->nOrderBy==2 && pIdx->aOrderBy[1].desc==0 && iCol==0 ){ + pIdx->orderByConsumed = (pIdx->aOrderBy[1].iColumn==1); + } + } + + }else{ + pIdx->estimatedCost = 100000000; + pIdx->estimatedRows = 1000000000; + } + pIdx->idxNum = (iSchema>=0 ? 0x01 : 0x00) | (iPgno>=0 ? 0x02 : 0x00); + return SQLITE_OK; +} + +/* +** Open a new sqlite_dbdata or sqlite_dbptr cursor. +*/ +static int dbdataOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + DbdataCursor *pCsr; + + pCsr = (DbdataCursor*)sqlite3_malloc64(sizeof(DbdataCursor)); + if( pCsr==0 ){ + return SQLITE_NOMEM; + }else{ + memset(pCsr, 0, sizeof(DbdataCursor)); + pCsr->base.pVtab = pVTab; + } + + *ppCursor = (sqlite3_vtab_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Restore a cursor object to the state it was in when first allocated +** by dbdataOpen(). +*/ +static void dbdataResetCursor(DbdataCursor *pCsr){ + DbdataTable *pTab = (DbdataTable*)(pCsr->base.pVtab); + if( pTab->pStmt==0 ){ + pTab->pStmt = pCsr->pStmt; + }else{ + sqlite3_finalize(pCsr->pStmt); + } + pCsr->pStmt = 0; + pCsr->iPgno = 1; + pCsr->iCell = 0; + pCsr->iField = 0; + pCsr->bOnePage = 0; + sqlite3_free(pCsr->aPage); + sqlite3_free(pCsr->pRec); + pCsr->pRec = 0; + pCsr->aPage = 0; +} + +/* +** Close an sqlite_dbdata or sqlite_dbptr cursor. +*/ +static int dbdataClose(sqlite3_vtab_cursor *pCursor){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + dbdataResetCursor(pCsr); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Utility methods to decode 16 and 32-bit big-endian unsigned integers. +*/ +static u32 get_uint16(unsigned char *a){ + return (a[0]<<8)|a[1]; +} +static u32 get_uint32(unsigned char *a){ + return ((u32)a[0]<<24) + | ((u32)a[1]<<16) + | ((u32)a[2]<<8) + | ((u32)a[3]); +} + +/* +** Load page pgno from the database via the sqlite_dbpage virtual table. +** If successful, set (*ppPage) to point to a buffer containing the page +** data, (*pnPage) to the size of that buffer in bytes and return +** SQLITE_OK. In this case it is the responsibility of the caller to +** eventually free the buffer using sqlite3_free(). +** +** Or, if an error occurs, set both (*ppPage) and (*pnPage) to 0 and +** return an SQLite error code. +*/ +static int dbdataLoadPage( + DbdataCursor *pCsr, /* Cursor object */ + u32 pgno, /* Page number of page to load */ + u8 **ppPage, /* OUT: pointer to page buffer */ + int *pnPage /* OUT: Size of (*ppPage) in bytes */ +){ + int rc2; + int rc = SQLITE_OK; + sqlite3_stmt *pStmt = pCsr->pStmt; + + *ppPage = 0; + *pnPage = 0; + if( pgno>0 ){ + sqlite3_bind_int64(pStmt, 2, pgno); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + int nCopy = sqlite3_column_bytes(pStmt, 0); + if( nCopy>0 ){ + u8 *pPage; + pPage = (u8*)sqlite3_malloc64(nCopy + DBDATA_PADDING_BYTES); + if( pPage==0 ){ + rc = SQLITE_NOMEM; + }else{ + const u8 *pCopy = sqlite3_column_blob(pStmt, 0); + memcpy(pPage, pCopy, nCopy); + memset(&pPage[nCopy], 0, DBDATA_PADDING_BYTES); + } + *ppPage = pPage; + *pnPage = nCopy; + } + } + rc2 = sqlite3_reset(pStmt); + if( rc==SQLITE_OK ) rc = rc2; + } + + return rc; +} + +/* +** Read a varint. Put the value in *pVal and return the number of bytes. +*/ +static int dbdataGetVarint(const u8 *z, sqlite3_int64 *pVal){ + sqlite3_uint64 u = 0; + int i; + for(i=0; i<8; i++){ + u = (u<<7) + (z[i]&0x7f); + if( (z[i]&0x80)==0 ){ *pVal = (sqlite3_int64)u; return i+1; } + } + u = (u<<8) + (z[i]&0xff); + *pVal = (sqlite3_int64)u; + return 9; +} + +/* +** Like dbdataGetVarint(), but set the output to 0 if it is less than 0 +** or greater than 0xFFFFFFFF. This can be used for all varints in an +** SQLite database except for key values in intkey tables. +*/ +static int dbdataGetVarintU32(const u8 *z, sqlite3_int64 *pVal){ + sqlite3_int64 val; + int nRet = dbdataGetVarint(z, &val); + if( val<0 || val>0xFFFFFFFF ) val = 0; + *pVal = val; + return nRet; +} + +/* +** Return the number of bytes of space used by an SQLite value of type +** eType. +*/ +static int dbdataValueBytes(int eType){ + switch( eType ){ + case 0: case 8: case 9: + case 10: case 11: + return 0; + case 1: + return 1; + case 2: + return 2; + case 3: + return 3; + case 4: + return 4; + case 5: + return 6; + case 6: + case 7: + return 8; + default: + if( eType>0 ){ + return ((eType-12) / 2); + } + return 0; + } +} + +/* +** Load a value of type eType from buffer pData and use it to set the +** result of context object pCtx. +*/ +static void dbdataValue( + sqlite3_context *pCtx, + u32 enc, + int eType, + u8 *pData, + sqlite3_int64 nData +){ + if( eType>=0 && dbdataValueBytes(eType)<=nData ){ + switch( eType ){ + case 0: + case 10: + case 11: + sqlite3_result_null(pCtx); + break; + + case 8: + sqlite3_result_int(pCtx, 0); + break; + case 9: + sqlite3_result_int(pCtx, 1); + break; + + case 1: case 2: case 3: case 4: case 5: case 6: case 7: { + sqlite3_uint64 v = (signed char)pData[0]; + pData++; + switch( eType ){ + case 7: + case 6: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2; + case 5: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2; + case 4: v = (v<<8) + pData[0]; pData++; + case 3: v = (v<<8) + pData[0]; pData++; + case 2: v = (v<<8) + pData[0]; pData++; + } + + if( eType==7 ){ + double r; + memcpy(&r, &v, sizeof(r)); + sqlite3_result_double(pCtx, r); + }else{ + sqlite3_result_int64(pCtx, (sqlite3_int64)v); + } + break; + } + + default: { + int n = ((eType-12) / 2); + if( eType % 2 ){ + switch( enc ){ +#ifndef SQLITE_OMIT_UTF16 + case SQLITE_UTF16BE: + sqlite3_result_text16be(pCtx, (void*)pData, n, SQLITE_TRANSIENT); + break; + case SQLITE_UTF16LE: + sqlite3_result_text16le(pCtx, (void*)pData, n, SQLITE_TRANSIENT); + break; +#endif + default: + sqlite3_result_text(pCtx, (char*)pData, n, SQLITE_TRANSIENT); + break; + } + }else{ + sqlite3_result_blob(pCtx, pData, n, SQLITE_TRANSIENT); + } + } + } + } +} + +/* +** Move an sqlite_dbdata or sqlite_dbptr cursor to the next entry. +*/ +static int dbdataNext(sqlite3_vtab_cursor *pCursor){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; + + pCsr->iRowid++; + while( 1 ){ + int rc; + int iOff = (pCsr->iPgno==1 ? 100 : 0); + int bNextPage = 0; + + if( pCsr->aPage==0 ){ + while( 1 ){ + if( pCsr->bOnePage==0 && pCsr->iPgno>pCsr->szDb ) return SQLITE_OK; + rc = dbdataLoadPage(pCsr, pCsr->iPgno, &pCsr->aPage, &pCsr->nPage); + if( rc!=SQLITE_OK ) return rc; + if( pCsr->aPage && pCsr->nPage>=256 ) break; + sqlite3_free(pCsr->aPage); + pCsr->aPage = 0; + if( pCsr->bOnePage ) return SQLITE_OK; + pCsr->iPgno++; + } + + assert( iOff+3+2<=pCsr->nPage ); + pCsr->iCell = pTab->bPtr ? -2 : 0; + pCsr->nCell = get_uint16(&pCsr->aPage[iOff+3]); + } + + if( pTab->bPtr ){ + if( pCsr->aPage[iOff]!=0x02 && pCsr->aPage[iOff]!=0x05 ){ + pCsr->iCell = pCsr->nCell; + } + pCsr->iCell++; + if( pCsr->iCell>=pCsr->nCell ){ + sqlite3_free(pCsr->aPage); + pCsr->aPage = 0; + if( pCsr->bOnePage ) return SQLITE_OK; + pCsr->iPgno++; + }else{ + return SQLITE_OK; + } + }else{ + /* If there is no record loaded, load it now. */ + if( pCsr->pRec==0 ){ + int bHasRowid = 0; + int nPointer = 0; + sqlite3_int64 nPayload = 0; + sqlite3_int64 nHdr = 0; + int iHdr; + int U, X; + int nLocal; + + switch( pCsr->aPage[iOff] ){ + case 0x02: + nPointer = 4; + break; + case 0x0a: + break; + case 0x0d: + bHasRowid = 1; + break; + default: + /* This is not a b-tree page with records on it. Continue. */ + pCsr->iCell = pCsr->nCell; + break; + } + + if( pCsr->iCell>=pCsr->nCell ){ + bNextPage = 1; + }else{ + + iOff += 8 + nPointer + pCsr->iCell*2; + if( iOff>pCsr->nPage ){ + bNextPage = 1; + }else{ + iOff = get_uint16(&pCsr->aPage[iOff]); + } + + /* For an interior node cell, skip past the child-page number */ + iOff += nPointer; + + /* Load the "byte of payload including overflow" field */ + if( bNextPage || iOff>pCsr->nPage ){ + bNextPage = 1; + }else{ + iOff += dbdataGetVarintU32(&pCsr->aPage[iOff], &nPayload); + } + + /* If this is a leaf intkey cell, load the rowid */ + if( bHasRowid && !bNextPage && iOffnPage ){ + iOff += dbdataGetVarint(&pCsr->aPage[iOff], &pCsr->iIntkey); + } + + /* Figure out how much data to read from the local page */ + U = pCsr->nPage; + if( bHasRowid ){ + X = U-35; + }else{ + X = ((U-12)*64/255)-23; + } + if( nPayload<=X ){ + nLocal = nPayload; + }else{ + int M, K; + M = ((U-12)*32/255)-23; + K = M+((nPayload-M)%(U-4)); + if( K<=X ){ + nLocal = K; + }else{ + nLocal = M; + } + } + + if( bNextPage || nLocal+iOff>pCsr->nPage ){ + bNextPage = 1; + }else{ + + /* Allocate space for payload. And a bit more to catch small buffer + ** overruns caused by attempting to read a varint or similar from + ** near the end of a corrupt record. */ + pCsr->pRec = (u8*)sqlite3_malloc64(nPayload+DBDATA_PADDING_BYTES); + if( pCsr->pRec==0 ) return SQLITE_NOMEM; + memset(pCsr->pRec, 0, nPayload+DBDATA_PADDING_BYTES); + pCsr->nRec = nPayload; + + /* Load the nLocal bytes of payload */ + memcpy(pCsr->pRec, &pCsr->aPage[iOff], nLocal); + iOff += nLocal; + + /* Load content from overflow pages */ + if( nPayload>nLocal ){ + sqlite3_int64 nRem = nPayload - nLocal; + u32 pgnoOvfl = get_uint32(&pCsr->aPage[iOff]); + while( nRem>0 ){ + u8 *aOvfl = 0; + int nOvfl = 0; + int nCopy; + rc = dbdataLoadPage(pCsr, pgnoOvfl, &aOvfl, &nOvfl); + assert( rc!=SQLITE_OK || aOvfl==0 || nOvfl==pCsr->nPage ); + if( rc!=SQLITE_OK ) return rc; + if( aOvfl==0 ) break; + + nCopy = U-4; + if( nCopy>nRem ) nCopy = nRem; + memcpy(&pCsr->pRec[nPayload-nRem], &aOvfl[4], nCopy); + nRem -= nCopy; + + pgnoOvfl = get_uint32(aOvfl); + sqlite3_free(aOvfl); + } + } + + iHdr = dbdataGetVarintU32(pCsr->pRec, &nHdr); + if( nHdr>nPayload ) nHdr = 0; + pCsr->nHdr = nHdr; + pCsr->pHdrPtr = &pCsr->pRec[iHdr]; + pCsr->pPtr = &pCsr->pRec[pCsr->nHdr]; + pCsr->iField = (bHasRowid ? -1 : 0); + } + } + }else{ + pCsr->iField++; + if( pCsr->iField>0 ){ + sqlite3_int64 iType; + if( pCsr->pHdrPtr>&pCsr->pRec[pCsr->nRec] ){ + bNextPage = 1; + }else{ + int szField = 0; + pCsr->pHdrPtr += dbdataGetVarintU32(pCsr->pHdrPtr, &iType); + szField = dbdataValueBytes(iType); + if( (pCsr->nRec - (pCsr->pPtr - pCsr->pRec))pPtr = &pCsr->pRec[pCsr->nRec]; + }else{ + pCsr->pPtr += szField; + } + } + } + } + + if( bNextPage ){ + sqlite3_free(pCsr->aPage); + sqlite3_free(pCsr->pRec); + pCsr->aPage = 0; + pCsr->pRec = 0; + if( pCsr->bOnePage ) return SQLITE_OK; + pCsr->iPgno++; + }else{ + if( pCsr->iField<0 || pCsr->pHdrPtr<&pCsr->pRec[pCsr->nHdr] ){ + return SQLITE_OK; + } + + /* Advance to the next cell. The next iteration of the loop will load + ** the record and so on. */ + sqlite3_free(pCsr->pRec); + pCsr->pRec = 0; + pCsr->iCell++; + } + } + } + + assert( !"can't get here" ); + return SQLITE_OK; +} + +/* +** Return true if the cursor is at EOF. +*/ +static int dbdataEof(sqlite3_vtab_cursor *pCursor){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + return pCsr->aPage==0; +} + +/* +** Return true if nul-terminated string zSchema ends in "()". Or false +** otherwise. +*/ +static int dbdataIsFunction(const char *zSchema){ + size_t n = strlen(zSchema); + if( n>2 && zSchema[n-2]=='(' && zSchema[n-1]==')' ){ + return (int)n-2; + } + return 0; +} + +/* +** Determine the size in pages of database zSchema (where zSchema is +** "main", "temp" or the name of an attached database) and set +** pCsr->szDb accordingly. If successful, return SQLITE_OK. Otherwise, +** an SQLite error code. +*/ +static int dbdataDbsize(DbdataCursor *pCsr, const char *zSchema){ + DbdataTable *pTab = (DbdataTable*)pCsr->base.pVtab; + char *zSql = 0; + int rc, rc2; + int nFunc = 0; + sqlite3_stmt *pStmt = 0; + + if( (nFunc = dbdataIsFunction(zSchema))>0 ){ + zSql = sqlite3_mprintf("SELECT %.*s(0)", nFunc, zSchema); + }else{ + zSql = sqlite3_mprintf("PRAGMA %Q.page_count", zSchema); + } + if( zSql==0 ) return SQLITE_NOMEM; + + rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + if( rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW ){ + pCsr->szDb = sqlite3_column_int(pStmt, 0); + } + rc2 = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ) rc = rc2; + return rc; +} + +/* +** Attempt to figure out the encoding of the database by retrieving page 1 +** and inspecting the header field. If successful, set the pCsr->enc variable +** and return SQLITE_OK. Otherwise, return an SQLite error code. +*/ +static int dbdataGetEncoding(DbdataCursor *pCsr){ + int rc = SQLITE_OK; + int nPg1 = 0; + u8 *aPg1 = 0; + rc = dbdataLoadPage(pCsr, 1, &aPg1, &nPg1); + if( rc==SQLITE_OK && nPg1>=(56+4) ){ + pCsr->enc = get_uint32(&aPg1[56]); + } + sqlite3_free(aPg1); + return rc; +} + + +/* +** xFilter method for sqlite_dbdata and sqlite_dbptr. +*/ +static int dbdataFilter( + sqlite3_vtab_cursor *pCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; + int rc = SQLITE_OK; + const char *zSchema = "main"; + (void)idxStr; + (void)argc; + + dbdataResetCursor(pCsr); + assert( pCsr->iPgno==1 ); + if( idxNum & 0x01 ){ + zSchema = (const char*)sqlite3_value_text(argv[0]); + if( zSchema==0 ) zSchema = ""; + } + if( idxNum & 0x02 ){ + pCsr->iPgno = sqlite3_value_int(argv[(idxNum & 0x01)]); + pCsr->bOnePage = 1; + }else{ + rc = dbdataDbsize(pCsr, zSchema); + } + + if( rc==SQLITE_OK ){ + int nFunc = 0; + if( pTab->pStmt ){ + pCsr->pStmt = pTab->pStmt; + pTab->pStmt = 0; + }else if( (nFunc = dbdataIsFunction(zSchema))>0 ){ + char *zSql = sqlite3_mprintf("SELECT %.*s(?2)", nFunc, zSchema); + if( zSql==0 ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pStmt, 0); + sqlite3_free(zSql); + } + }else{ + rc = sqlite3_prepare_v2(pTab->db, + "SELECT data FROM sqlite_dbpage(?) WHERE pgno=?", -1, + &pCsr->pStmt, 0 + ); + } + } + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_text(pCsr->pStmt, 1, zSchema, -1, SQLITE_TRANSIENT); + } + + /* Try to determine the encoding of the db by inspecting the header + ** field on page 1. */ + if( rc==SQLITE_OK ){ + rc = dbdataGetEncoding(pCsr); + } + + if( rc!=SQLITE_OK ){ + pTab->base.zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pTab->db)); + } + + if( rc==SQLITE_OK ){ + rc = dbdataNext(pCursor); + } + return rc; +} + +/* +** Return a column for the sqlite_dbdata or sqlite_dbptr table. +*/ +static int dbdataColumn( + sqlite3_vtab_cursor *pCursor, + sqlite3_context *ctx, + int i +){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; + if( pTab->bPtr ){ + switch( i ){ + case DBPTR_COLUMN_PGNO: + sqlite3_result_int64(ctx, pCsr->iPgno); + break; + case DBPTR_COLUMN_CHILD: { + int iOff = pCsr->iPgno==1 ? 100 : 0; + if( pCsr->iCell<0 ){ + iOff += 8; + }else{ + iOff += 12 + pCsr->iCell*2; + if( iOff>pCsr->nPage ) return SQLITE_OK; + iOff = get_uint16(&pCsr->aPage[iOff]); + } + if( iOff<=pCsr->nPage ){ + sqlite3_result_int64(ctx, get_uint32(&pCsr->aPage[iOff])); + } + break; + } + } + }else{ + switch( i ){ + case DBDATA_COLUMN_PGNO: + sqlite3_result_int64(ctx, pCsr->iPgno); + break; + case DBDATA_COLUMN_CELL: + sqlite3_result_int(ctx, pCsr->iCell); + break; + case DBDATA_COLUMN_FIELD: + sqlite3_result_int(ctx, pCsr->iField); + break; + case DBDATA_COLUMN_VALUE: { + if( pCsr->iField<0 ){ + sqlite3_result_int64(ctx, pCsr->iIntkey); + }else if( &pCsr->pRec[pCsr->nRec] >= pCsr->pPtr ){ + sqlite3_int64 iType; + dbdataGetVarintU32(pCsr->pHdrPtr, &iType); + dbdataValue( + ctx, pCsr->enc, iType, pCsr->pPtr, + &pCsr->pRec[pCsr->nRec] - pCsr->pPtr + ); + } + break; + } + } + } + return SQLITE_OK; +} + +/* +** Return the rowid for an sqlite_dbdata or sqlite_dptr table. +*/ +static int dbdataRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + *pRowid = pCsr->iRowid; + return SQLITE_OK; +} + + +/* +** Invoke this routine to register the "sqlite_dbdata" virtual table module +*/ +static int sqlite3DbdataRegister(sqlite3 *db){ + static sqlite3_module dbdata_module = { + 0, /* iVersion */ + 0, /* xCreate */ + dbdataConnect, /* xConnect */ + dbdataBestIndex, /* xBestIndex */ + dbdataDisconnect, /* xDisconnect */ + 0, /* xDestroy */ + dbdataOpen, /* xOpen - open a cursor */ + dbdataClose, /* xClose - close a cursor */ + dbdataFilter, /* xFilter - configure scan constraints */ + dbdataNext, /* xNext - advance a cursor */ + dbdataEof, /* xEof - check for end of scan */ + dbdataColumn, /* xColumn - read data */ + dbdataRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0, /* xRollbackTo */ + 0 /* xShadowName */ + }; + + int rc = sqlite3_create_module(db, "sqlite_dbdata", &dbdata_module, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3_create_module(db, "sqlite_dbptr", &dbdata_module, (void*)1); + } + return rc; +} + +int sqlite3_dbdata_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + (void)pzErrMsg; + return sqlite3DbdataRegister(db); +} + +#endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */ +#pragma GCC diagnostic pop diff --git a/database/sqlite/sqlite3.c b/database/sqlite/sqlite3.c index 005aab85a968d3..da8c38d091b4d1 100644 --- a/database/sqlite/sqlite3.c +++ b/database/sqlite/sqlite3.c @@ -1,6 +1,6 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.41.2. By combining all the individual C code files into this +** version 3.42.0. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -17,6 +17,9 @@ ** language. The code for the "sqlite3" command-line shell is also in a ** separate file. This file contains only code for the core SQLite library. */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +#pragma GCC diagnostic ignored "-Wunused-parameter" #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 #ifndef SQLITE_PRIVATE @@ -26,6 +29,7 @@ #define SQLITE_ENABLE_UPDATE_DELETE_LIMIT 1 #define SQLITE_OMIT_LOAD_EXTENSION 1 #define SQLITE_ENABLE_DBSTAT_VTAB 1 +#define SQLITE_ENABLE_DBPAGE_VTAB 1 /************** Begin file sqliteInt.h ***************************************/ /* ** 2001 September 15 @@ -127,6 +131,10 @@ #define SQLITE_4_BYTE_ALIGNED_MALLOC #endif /* defined(_MSC_VER) && !defined(_WIN64) */ +#if !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 +#define HAVE_LOG2 0 +#endif /* !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 */ + #endif /* SQLITE_MSVC_H */ /************** End of msvc.h ************************************************/ @@ -456,9 +464,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.41.2" -#define SQLITE_VERSION_NUMBER 3041002 -#define SQLITE_SOURCE_ID "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" +#define SQLITE_VERSION "3.42.0" +#define SQLITE_VERSION_NUMBER 3042000 +#define SQLITE_SOURCE_ID "2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1965,20 +1973,23 @@ SQLITE_API int sqlite3_os_end(void); ** must ensure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running. ** -** The sqlite3_config() interface -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** ** The first argument to sqlite3_config() is an integer ** [configuration option] that determines ** what property of SQLite is to be configured. Subsequent arguments ** vary depending on the [configuration option] ** in the first argument. ** +** For most configuration options, the sqlite3_config() interface +** may only be invoked prior to library initialization using +** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". +** ^If sqlite3_config() is called after [sqlite3_initialize()] and before +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. +** Note, however, that ^sqlite3_config() can be called as part of the +** implementation of an application-defined [sqlite3_os_init()]. +** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option ** then this routine returns a non-zero [error code]. @@ -2086,6 +2097,23 @@ struct sqlite3_mem_methods { ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. ** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +**
    +**
  • SQLITE_CONFIG_LOG +**
  • SQLITE_CONFIG_PCACHE_HDRSZ +**
+** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications ** should check the return code from [sqlite3_config()] to make sure that @@ -2432,28 +2460,28 @@ struct sqlite3_mem_methods { ** compile-time option is not set, then the default maximum is 1073741824. ** */ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ @@ -2688,7 +2716,7 @@ struct sqlite3_mem_methods { ** ** ** [[SQLITE_DBCONFIG_DQS_DML]] -**
SQLITE_DBCONFIG_DQS_DML +**
SQLITE_DBCONFIG_DQS_DML
**
The SQLITE_DBCONFIG_DQS_DML option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DML statements ** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The @@ -2697,7 +2725,7 @@ struct sqlite3_mem_methods { **
** ** [[SQLITE_DBCONFIG_DQS_DDL]] -**
SQLITE_DBCONFIG_DQS_DDL +**
SQLITE_DBCONFIG_DQS_DDL
**
The SQLITE_DBCONFIG_DQS option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DDL statements, ** such as CREATE TABLE and CREATE INDEX. The @@ -2706,7 +2734,7 @@ struct sqlite3_mem_methods { **
** ** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] -**
SQLITE_DBCONFIG_TRUSTED_SCHEMA +**
SQLITE_DBCONFIG_TRUSTED_SCHEMA
**
The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to ** assume that database schemas are untainted by malicious content. ** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite @@ -2726,7 +2754,7 @@ struct sqlite3_mem_methods { **
** ** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] -**
SQLITE_DBCONFIG_LEGACY_FILE_FORMAT +**
SQLITE_DBCONFIG_LEGACY_FILE_FORMAT
**
The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly ** created database file to have a schema format version number (the 4-byte @@ -2735,7 +2763,7 @@ struct sqlite3_mem_methods { ** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, ** newly created databases are generally not understandable by SQLite versions ** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there -** is now scarcely any need to generated database files that are compatible +** is now scarcely any need to generate database files that are compatible ** all the way back to version 3.0.0, and so this setting is of little ** practical use, but is provided so that SQLite can continue to claim the ** ability to generate new database files that are compatible with version @@ -2746,6 +2774,38 @@ struct sqlite3_mem_methods { ** not considered a bug since SQLite versions 3.3.0 and earlier do not support ** either generated columns or decending indexes. **
+** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +**
SQLITE_DBCONFIG_STMT_SCANSTATUS
+**
The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +**
+** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +**
SQLITE_DBCONFIG_REVERSE_SCANORDER
+**
The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. +**
+** ** */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ @@ -2766,7 +2826,9 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ #define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -6511,6 +6573,13 @@ SQLITE_API void sqlite3_activate_cerod( ** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. */ SQLITE_API int sqlite3_sleep(int); @@ -8138,9 +8207,9 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */ @@ -9874,18 +9943,28 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
SQLITE_VTAB_INNOCUOUS
**
Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a ** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS ** flag unless absolutely necessary. **
+** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]
SQLITE_VTAB_USES_ALL_SCHEMAS
+**
Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +**
** */ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 #define SQLITE_VTAB_INNOCUOUS 2 #define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy @@ -11060,16 +11139,20 @@ SQLITE_API int sqlite3session_create( SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* -** CAPIREF: Conigure a Session Object +** CAPI3REF: Configure a Session Object ** METHOD: sqlite3_session ** ** This method is used to configure a session object after it has been -** created. At present the only valid value for the second parameter is -** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. ** -** Arguments for sqlite3session_object_config() +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config ** -** The following values may passed as the the 4th parameter to +** The following values may passed as the the 2nd parameter to ** sqlite3session_object_config(). ** **
SQLITE_SESSION_OBJCONFIG_SIZE
@@ -11085,12 +11168,21 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); ** ** It is an error (SQLITE_MISUSE) to attempt to modify this setting after ** the first table has been attached to the session object. +** +**
SQLITE_SESSION_OBJCONFIG_ROWID
+** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. */ -SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); - -/* -*/ -#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 /* ** CAPI3REF: Enable Or Disable A Session Object @@ -12223,9 +12315,23 @@ SQLITE_API int sqlite3changeset_apply_v2( ** Invert the changeset before applying it. This is equivalent to inverting ** a changeset using sqlite3changeset_invert() before applying it. It is ** an error to specify this flag with a patchset. +** +**
SQLITE_CHANGESETAPPLY_IGNORENOOP
+** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +**
    +**
  • a delete change if the row being deleted cannot be found, +**
  • an update change if the modified fields are already set to +** their new values in the conflicting row, or +**
  • an insert change if all fields of the conflicting row match +** the row being inserted. +**
*/ #define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 #define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 /* ** CAPI3REF: Constants Passed To The Conflict Handler @@ -13522,8 +13628,8 @@ struct fts5_api { #endif /* -** WAL mode depends on atomic aligned 32-bit loads and stores in a few -** places. The following macros try to make this explicit. +** A few places in the code require atomic load/store of aligned +** integer values. */ #ifndef __has_extension # define __has_extension(x) 0 /* compatibility with non-clang compilers */ @@ -13579,15 +13685,22 @@ struct fts5_api { #endif /* -** A macro to hint to the compiler that a function should not be +** Macros to hint to the compiler that a function should or should not be ** inlined. */ #if defined(__GNUC__) # define SQLITE_NOINLINE __attribute__((noinline)) +# define SQLITE_INLINE __attribute__((always_inline)) inline #elif defined(_MSC_VER) && _MSC_VER>=1310 # define SQLITE_NOINLINE __declspec(noinline) +# define SQLITE_INLINE __forceinline #else # define SQLITE_NOINLINE +# define SQLITE_INLINE +#endif +#if defined(SQLITE_COVERAGE_TEST) || defined(__STRICT_ANSI__) +# undef SQLITE_INLINE +# define SQLITE_INLINE #endif /* @@ -16548,6 +16661,10 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(Vdbe*, int, int, int); SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, VdbeOp*); #endif +#if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3CursorRangeHintExprCheck(Walker *pWalker, Expr *pExpr); +#endif + #endif /* SQLITE_VDBE_H */ /************** End of vdbe.h ************************************************/ @@ -17257,7 +17374,7 @@ struct sqlite3 { #define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ /* result set is empty */ #define SQLITE_IgnoreChecks 0x00000200 /* Do not enforce check constraints */ -#define SQLITE_ReadUncommit 0x00000400 /* READ UNCOMMITTED in shared-cache */ +#define SQLITE_StmtScanStatus 0x00000400 /* Enable stmt_scanstats() counters */ #define SQLITE_NoCkptOnClose 0x00000800 /* No checkpoint on close()/DETACH */ #define SQLITE_ReverseOrder 0x00001000 /* Reverse unordered SELECTs */ #define SQLITE_RecTriggers 0x00002000 /* Enable recursive triggers */ @@ -17283,6 +17400,7 @@ struct sqlite3 { /* DELETE, or UPDATE and return */ /* the count using a callback. */ #define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */ +#define SQLITE_ReadUncommit HI(0x00004) /* READ UNCOMMITTED in shared-cache */ /* Flags used only if debugging */ #ifdef SQLITE_DEBUG @@ -17339,6 +17457,7 @@ struct sqlite3 { /* TH3 expects this value ^^^^^^^^^^ See flatten04.test */ #define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */ #define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */ +#define SQLITE_NullUnusedCols 0x04000000 /* NULL unused columns in subqueries */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* @@ -17810,6 +17929,7 @@ struct VTable { sqlite3_vtab *pVtab; /* Pointer to vtab instance */ int nRef; /* Number of pointers to this structure */ u8 bConstraint; /* True if constraints are supported */ + u8 bAllSchemas; /* True if might use any attached schema */ u8 eVtabRisk; /* Riskiness of allowing hacker access */ int iSavepoint; /* Depth of the SAVEPOINT stack */ VTable *pNext; /* Next in linked list (see above) */ @@ -18190,6 +18310,7 @@ struct Index { ** expression, or a reference to a VIRTUAL column */ #ifdef SQLITE_ENABLE_STAT4 int nSample; /* Number of elements in aSample[] */ + int mxSample; /* Number of slots allocated to aSample[] */ int nSampleCol; /* Size of IndexSample.anEq[] and so on */ tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ IndexSample *aSample; /* Samples of the left-most key */ @@ -19676,6 +19797,7 @@ struct Walker { struct CoveringIndexCheck *pCovIdxCk; /* Check for covering index */ SrcItem *pSrcItem; /* A single FROM clause item */ DbFixer *pFix; /* See sqlite3FixSelect() */ + Mem *aMem; /* See sqlite3BtreeCursorHint() */ } u; }; @@ -19945,6 +20067,8 @@ SQLITE_PRIVATE int sqlite3CorruptPgnoError(int,Pgno); # define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) # define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) # define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80) +# define sqlite3JsonId1(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x42) +# define sqlite3JsonId2(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x46) #else # define sqlite3Toupper(x) toupper((unsigned char)(x)) # define sqlite3Isspace(x) isspace((unsigned char)(x)) @@ -19954,6 +20078,8 @@ SQLITE_PRIVATE int sqlite3CorruptPgnoError(int,Pgno); # define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) # define sqlite3Tolower(x) tolower((unsigned char)(x)) # define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`') +# define sqlite3JsonId1(x) (sqlite3IsIdChar(x)&&(x)<'0') +# define sqlite3JsonId2(x) sqlite3IsIdChar(x) #endif SQLITE_PRIVATE int sqlite3IsIdChar(u8); @@ -20147,6 +20273,10 @@ SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int); SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int); SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int); SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*); +SQLITE_PRIVATE void sqlite3TouchRegister(Parse*,int); +#if defined(SQLITE_ENABLE_STAT4) || defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3FirstAvailableRegister(Parse*,int); +#endif #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int); #endif @@ -20297,7 +20427,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList Expr*,ExprList*,u32,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int); +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,char*); @@ -20386,7 +20516,7 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); -SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr*,const SrcItem*); +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif @@ -20834,10 +20964,7 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *); SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); -#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ - && !defined(SQLITE_OMIT_VIRTUALTABLE) -SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info*); -#endif +SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(Parse*); SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); @@ -21084,6 +21211,12 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void); SQLITE_PRIVATE sqlite3_uint64 sqlite3Hwtime(void); #endif +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +# define IS_STMT_SCANSTATUS(db) (db->flags & SQLITE_StmtScanStatus) +#else +# define IS_STMT_SCANSTATUS(db) 0 +#endif + #endif /* SQLITEINT_H */ /************** End of sqliteInt.h *******************************************/ @@ -22079,7 +22212,7 @@ SQLITE_PRIVATE const unsigned char *sqlite3aGTb = &sqlite3UpperToLower[256+12-OP ** isalnum() 0x06 ** isxdigit() 0x08 ** toupper() 0x20 -** SQLite identifier character 0x40 +** SQLite identifier character 0x40 $, _, or non-ascii ** Quote character 0x80 ** ** Bit 0x20 is set if the mapped character requires translation to upper @@ -22273,7 +22406,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */ 0, /* iPrngSeed */ #ifdef SQLITE_DEBUG - {0,0,0,0,0,0} /* aTune */ + {0,0,0,0,0,0}, /* aTune */ #endif }; @@ -23572,6 +23705,7 @@ struct DateTime { char validTZ; /* True (1) if tz is valid */ char tzSet; /* Timezone was set explicitly */ char isError; /* An overflow has occurred */ + char useSubsec; /* Display subsecond precision */ }; @@ -23886,6 +24020,11 @@ static int parseDateOrTime( }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8)>0 ){ setRawDateNumber(p, r); return 0; + }else if( (sqlite3StrICmp(zDate,"subsec")==0 + || sqlite3StrICmp(zDate,"subsecond")==0) + && sqlite3NotPureFunc(context) ){ + p->useSubsec = 1; + return setDateTimeToCurrent(context, p); } return 1; } @@ -24300,8 +24439,22 @@ static int parseModifier( ** ** Move the date backwards to the beginning of the current day, ** or month or year. + ** + ** subsecond + ** subsec + ** + ** Show subsecond precision in the output of datetime() and + ** unixepoch() and strftime('%s'). */ - if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break; + if( sqlite3_strnicmp(z, "start of ", 9)!=0 ){ + if( sqlite3_stricmp(z, "subsec")==0 + || sqlite3_stricmp(z, "subsecond")==0 + ){ + p->useSubsec = 1; + rc = 0; + } + break; + } if( !p->validJD && !p->validYMD && !p->validHMS ) break; z += 9; computeYMD(p); @@ -24499,7 +24652,11 @@ static void unixepochFunc( DateTime x; if( isDate(context, argc, argv, &x)==0 ){ computeJD(&x); - sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + if( x.useSubsec ){ + sqlite3_result_double(context, (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + } } } @@ -24515,8 +24672,8 @@ static void datetimeFunc( ){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int Y, s; - char zBuf[24]; + int Y, s, n; + char zBuf[32]; computeYMD_HMS(&x); Y = x.Y; if( Y<0 ) Y = -Y; @@ -24537,15 +24694,28 @@ static void datetimeFunc( zBuf[15] = '0' + (x.m/10)%10; zBuf[16] = '0' + (x.m)%10; zBuf[17] = ':'; - s = (int)x.s; - zBuf[18] = '0' + (s/10)%10; - zBuf[19] = '0' + (s)%10; - zBuf[20] = 0; + if( x.useSubsec ){ + s = (int)1000.0*x.s; + zBuf[18] = '0' + (s/10000)%10; + zBuf[19] = '0' + (s/1000)%10; + zBuf[20] = '.'; + zBuf[21] = '0' + (s/100)%10; + zBuf[22] = '0' + (s/10)%10; + zBuf[23] = '0' + (s)%10; + zBuf[24] = 0; + n = 24; + }else{ + s = (int)x.s; + zBuf[18] = '0' + (s/10)%10; + zBuf[19] = '0' + (s)%10; + zBuf[20] = 0; + n = 20; + } if( x.Y<0 ){ zBuf[0] = '-'; - sqlite3_result_text(context, zBuf, 20, SQLITE_TRANSIENT); + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); }else{ - sqlite3_result_text(context, &zBuf[1], 19, SQLITE_TRANSIENT); + sqlite3_result_text(context, &zBuf[1], n-1, SQLITE_TRANSIENT); } } } @@ -24562,7 +24732,7 @@ static void timeFunc( ){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int s; + int s, n; char zBuf[16]; computeHMS(&x); zBuf[0] = '0' + (x.h/10)%10; @@ -24571,11 +24741,24 @@ static void timeFunc( zBuf[3] = '0' + (x.m/10)%10; zBuf[4] = '0' + (x.m)%10; zBuf[5] = ':'; - s = (int)x.s; - zBuf[6] = '0' + (s/10)%10; - zBuf[7] = '0' + (s)%10; - zBuf[8] = 0; - sqlite3_result_text(context, zBuf, 8, SQLITE_TRANSIENT); + if( x.useSubsec ){ + s = (int)1000.0*x.s; + zBuf[6] = '0' + (s/10000)%10; + zBuf[7] = '0' + (s/1000)%10; + zBuf[8] = '.'; + zBuf[9] = '0' + (s/100)%10; + zBuf[10] = '0' + (s/10)%10; + zBuf[11] = '0' + (s)%10; + zBuf[12] = 0; + n = 12; + }else{ + s = (int)x.s; + zBuf[6] = '0' + (s/10)%10; + zBuf[7] = '0' + (s)%10; + zBuf[8] = 0; + n = 8; + } + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); } } @@ -24706,8 +24889,13 @@ static void strftimeFunc( break; } case 's': { - i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); - sqlite3_str_appendf(&sRes,"%lld",iS); + if( x.useSubsec ){ + sqlite3_str_appendf(&sRes,"%.3f", + (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); + sqlite3_str_appendf(&sRes,"%lld",iS); + } break; } case 'S': { @@ -30078,6 +30266,20 @@ static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ } #endif /* SQLITE_OMIT_FLOATING_POINT */ +#ifndef SQLITE_OMIT_FLOATING_POINT +/* +** "*val" is a u64. *msd is a divisor used to extract the +** most significant digit of *val. Extract that most significant +** digit and return it. +*/ +static char et_getdigit_int(u64 *val, u64 *msd){ + u64 x = (*val)/(*msd); + *val -= x*(*msd); + if( *msd>=10 ) *msd /= 10; + return '0' + (char)(x & 15); +} +#endif /* SQLITE_OMIT_FLOATING_POINT */ + /* ** Set the StrAccum object to an error mode. */ @@ -30170,6 +30372,8 @@ SQLITE_API void sqlite3_str_vappendf( char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ sqlite_uint64 longvalue; /* Value for integer types */ LONGDOUBLE_TYPE realvalue; /* Value for real types */ + sqlite_uint64 msd; /* Divisor to get most-significant-digit + ** of longvalue */ const et_info *infop; /* Pointer to the appropriate info structure */ char *zOut; /* Rendering buffer */ int nOut; /* Size of the rendering buffer */ @@ -30476,52 +30680,78 @@ SQLITE_API void sqlite3_str_vappendf( }else{ prefix = flag_prefix; } + exp = 0; if( xtype==etGENERIC && precision>0 ) precision--; testcase( precision>0xfff ); - idx = precision & 0xfff; - rounder = arRound[idx%10]; - while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; } - if( xtype==etFLOAT ){ - double rx = (double)realvalue; - sqlite3_uint64 u; - int ex; - memcpy(&u, &rx, sizeof(u)); - ex = -1023 + (int)((u>>52)&0x7ff); - if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16; - realvalue += rounder; - } - /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ - exp = 0; - if( sqlite3IsNaN((double)realvalue) ){ - bufpt = "NaN"; - length = 3; - break; - } - if( realvalue>0.0 ){ - LONGDOUBLE_TYPE scale = 1.0; - while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;} - while( realvalue>=1e10*scale && exp<=350 ){ scale *= 1e10; exp+=10; } - while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } - realvalue /= scale; - while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } - while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } - if( exp>350 ){ - bufpt = buf; - buf[0] = prefix; - memcpy(buf+(prefix!=0),"Inf",4); - length = 3+(prefix!=0); + if( realvalue<1.0e+16 + && realvalue==(LONGDOUBLE_TYPE)(longvalue = (u64)realvalue) + ){ + /* Number is a pure integer that can be represented as u64 */ + for(msd=1; msd*10<=longvalue; msd *= 10, exp++){} + if( exp>precision && xtype!=etFLOAT ){ + u64 rnd = msd/2; + int kk = precision; + while( kk-- > 0 ){ rnd /= 10; } + longvalue += rnd; + } + }else{ + msd = 0; + longvalue = 0; /* To prevent a compiler warning */ + idx = precision & 0xfff; + rounder = arRound[idx%10]; + while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; } + if( xtype==etFLOAT ){ + double rx = (double)realvalue; + sqlite3_uint64 u; + int ex; + memcpy(&u, &rx, sizeof(u)); + ex = -1023 + (int)((u>>52)&0x7ff); + if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16; + realvalue += rounder; + } + if( sqlite3IsNaN((double)realvalue) ){ + if( flag_zeropad ){ + bufpt = "null"; + length = 4; + }else{ + bufpt = "NaN"; + length = 3; + } break; } + + /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ + if( ALWAYS(realvalue>0.0) ){ + LONGDOUBLE_TYPE scale = 1.0; + while( realvalue>=1e100*scale && exp<=350){ scale*=1e100;exp+=100;} + while( realvalue>=1e10*scale && exp<=350 ){ scale*=1e10; exp+=10; } + while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } + realvalue /= scale; + while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } + while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } + if( exp>350 ){ + if( flag_zeropad ){ + realvalue = 9.0; + exp = 999; + }else{ + bufpt = buf; + buf[0] = prefix; + memcpy(buf+(prefix!=0),"Inf",4); + length = 3+(prefix!=0); + break; + } + } + if( xtype!=etFLOAT ){ + realvalue += rounder; + if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } + } + } } - bufpt = buf; + /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ - if( xtype!=etFLOAT ){ - realvalue += rounder; - if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } - } if( xtype==etGENERIC ){ flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){ @@ -30538,16 +30768,18 @@ SQLITE_API void sqlite3_str_vappendf( }else{ e2 = exp; } + nsd = 16 + flag_altform2*10; + bufpt = buf; { i64 szBufNeeded; /* Size of a temporary buffer needed */ szBufNeeded = MAX(e2,0)+(i64)precision+(i64)width+15; + if( cThousand && e2>0 ) szBufNeeded += (e2+2)/3; if( szBufNeeded > etBUFSIZE ){ bufpt = zExtra = printfTempBuf(pAccum, szBufNeeded); if( bufpt==0 ) return; } } zOut = bufpt; - nsd = 16 + flag_altform2*10; flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; /* The sign in front of the number */ if( prefix ){ @@ -30556,9 +30788,15 @@ SQLITE_API void sqlite3_str_vappendf( /* Digits prior to the decimal point */ if( e2<0 ){ *(bufpt++) = '0'; + }else if( msd>0 ){ + for(; e2>=0; e2--){ + *(bufpt++) = et_getdigit_int(&longvalue,&msd); + if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ','; + } }else{ for(; e2>=0; e2--){ *(bufpt++) = et_getdigit(&realvalue,&nsd); + if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ','; } } /* The decimal point */ @@ -30572,8 +30810,14 @@ SQLITE_API void sqlite3_str_vappendf( *(bufpt++) = '0'; } /* Significant digits after the decimal point */ - while( (precision--)>0 ){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); + if( msd>0 ){ + while( (precision--)>0 ){ + *(bufpt++) = et_getdigit_int(&longvalue,&msd); + } + }else{ + while( (precision--)>0 ){ + *(bufpt++) = et_getdigit(&realvalue,&nsd); + } } /* Remove trailing zeros and the "." if no digits follow the "." */ if( flag_rtz && flag_dp ){ @@ -31254,12 +31498,22 @@ SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_li return zBuf; } SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ - char *z; + StrAccum acc; va_list ap; + if( n<=0 ) return zBuf; +#ifdef SQLITE_ENABLE_API_ARMOR + if( zBuf==0 || zFormat==0 ) { + (void)SQLITE_MISUSE_BKPT; + if( zBuf ) zBuf[0] = 0; + return zBuf; + } +#endif + sqlite3StrAccumInit(&acc, 0, zBuf, n, 0); va_start(ap,zFormat); - z = sqlite3_vsnprintf(n, zBuf, zFormat, ap); + sqlite3_str_vappendf(&acc, zFormat, ap); va_end(ap); - return z; + zBuf[acc.nChar] = 0; + return zBuf; } /* @@ -34289,13 +34543,15 @@ SQLITE_PRIVATE int sqlite3Int64ToText(i64 v, char *zOut){ } i = sizeof(zTemp)-2; zTemp[sizeof(zTemp)-1] = 0; - do{ - zTemp[i--] = (x%10) + '0'; + while( 1 /*exit-by-break*/ ){ + zTemp[i] = (x%10) + '0'; x = x/10; - }while( x ); - if( v<0 ) zTemp[i--] = '-'; - memcpy(zOut, &zTemp[i+1], sizeof(zTemp)-1-i); - return sizeof(zTemp)-2-i; + if( x==0 ) break; + i--; + }; + if( v<0 ) zTemp[--i] = '-'; + memcpy(zOut, &zTemp[i], sizeof(zTemp)-i); + return sizeof(zTemp)-1-i; } /* @@ -34460,7 +34716,9 @@ SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){ u = u*16 + sqlite3HexToInt(z[k]); } memcpy(pOut, &u, 8); - return (z[k]==0 && k-i<=16) ? 0 : 2; + if( k-i>16 ) return 2; + if( z[k]!=0 ) return 1; + return 0; }else #endif /* SQLITE_OMIT_HEX_INTEGER */ { @@ -34496,7 +34754,7 @@ SQLITE_PRIVATE int sqlite3GetInt32(const char *zNum, int *pValue){ u32 u = 0; zNum += 2; while( zNum[0]=='0' ) zNum++; - for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){ + for(i=0; i<8 && sqlite3Isxdigit(zNum[i]); i++){ u = u*16 + sqlite3HexToInt(zNum[i]); } if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){ @@ -36992,7 +37250,7 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void){ #endif /* Use pread() and pwrite() if they are available */ -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(__linux__) # define HAVE_PREAD 1 # define HAVE_PWRITE 1 #endif @@ -40242,12 +40500,6 @@ static int nfsUnlock(sqlite3_file *id, int eFileLock){ ** Seek to the offset passed as the second argument, then read cnt ** bytes into pBuf. Return the number of bytes actually read. ** -** NB: If you define USE_PREAD or USE_PREAD64, then it might also -** be necessary to define _XOPEN_SOURCE to be 500. This varies from -** one system to another. Since SQLite does not define USE_PREAD -** in any form by default, we will not attempt to define _XOPEN_SOURCE. -** See tickets #2741 and #2681. -** ** To avoid stomping the errno value on a failed read the lastErrno value ** is set before returning. */ @@ -50274,7 +50526,7 @@ static int winOpen( if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; } @@ -50291,7 +50543,7 @@ static int winOpen( if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; } @@ -50311,7 +50563,7 @@ static int winOpen( if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; } @@ -50534,6 +50786,13 @@ static int winAccess( OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n", zFilename, flags, pResOut)); + if( zFilename==0 ){ + *pResOut = 0; + OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", + zFilename, pResOut, *pResOut)); + return SQLITE_OK; + } + zConverted = winConvertFromUtf8Filename(zFilename); if( zConverted==0 ){ OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); @@ -52690,11 +52949,15 @@ struct PCache { PgHdr *pPg; unsigned char *a; int j; - pPg = (PgHdr*)pLower->pExtra; - printf("%3lld: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); - a = (unsigned char *)pLower->pBuf; - for(j=0; j<12; j++) printf("%02x", a[j]); - printf(" ptr %p\n", pPg); + if( pLower==0 ){ + printf("%3d: NULL\n", i); + }else{ + pPg = (PgHdr*)pLower->pExtra; + printf("%3d: nRef %2lld flgs %02x data ", i, pPg->nRef, pPg->flags); + a = (unsigned char *)pLower->pBuf; + for(j=0; j<12; j++) printf("%02x", a[j]); + printf(" ptr %p\n", pPg); + } } static void pcacheDump(PCache *pCache){ int N; @@ -52707,9 +52970,8 @@ struct PCache { if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; for(i=1; i<=N; i++){ pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); - if( pLower==0 ) continue; pcachePageTrace(i, pLower); - if( ((PgHdr*)pLower)->pPage==0 ){ + if( pLower && ((PgHdr*)pLower)->pPage==0 ){ sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); } } @@ -58097,6 +58359,8 @@ static int pager_truncate(Pager *pPager, Pgno nPage){ int rc = SQLITE_OK; assert( pPager->eState!=PAGER_ERROR ); assert( pPager->eState!=PAGER_READER ); + PAGERTRACE(("Truncate %d npage %u\n", PAGERID(pPager), nPage)); + if( isOpen(pPager->fd) && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) @@ -61014,6 +61278,10 @@ static int getPageNormal( if( !isOpen(pPager->fd) || pPager->dbSizepPager->mxPgno ){ rc = SQLITE_FULL; + if( pgno<=pPager->dbSize ){ + sqlite3PcacheRelease(pPg); + pPg = 0; + } goto pager_acquire_err; } if( noContent ){ @@ -61178,10 +61446,12 @@ SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ /* ** Release a page reference. ** -** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be -** used if we know that the page being released is not the last page. +** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be used +** if we know that the page being released is not the last reference to page1. ** The btree layer always holds page1 open until the end, so these first -** to routines can be used to release any page other than BtShared.pPage1. +** two routines can be used to release any page other than BtShared.pPage1. +** The assert() at tag-20230419-2 proves that this constraint is always +** honored. ** ** Use sqlite3PagerUnrefPageOne() to release page1. This latter routine ** checks the total number of outstanding pages and if the number of @@ -61197,7 +61467,7 @@ SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage *pPg){ sqlite3PcacheRelease(pPg); } /* Do not use this routine to release the last reference to page1 */ - assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); + assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); /* tag-20230419-2 */ } SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){ if( pPg ) sqlite3PagerUnrefNotNull(pPg); @@ -62957,13 +63227,15 @@ SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager){ */ static int pagerExclusiveLock(Pager *pPager){ int rc; /* Return code */ + u8 eOrigLock; /* Original lock */ - assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); + assert( pPager->eLock>=SHARED_LOCK ); + eOrigLock = pPager->eLock; rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); if( rc!=SQLITE_OK ){ /* If the attempt to grab the exclusive lock failed, release the ** pending lock that may have been obtained instead. */ - pagerUnlockDb(pPager, SHARED_LOCK); + pagerUnlockDb(pPager, eOrigLock); } return rc; @@ -63968,19 +64240,40 @@ static void walChecksumBytes( assert( nByte>=8 ); assert( (nByte&0x00000007)==0 ); assert( nByte<=65536 ); + assert( nByte%4==0 ); - if( nativeCksum ){ + if( !nativeCksum ){ + do { + s1 += BYTESWAP32(aData[0]) + s2; + s2 += BYTESWAP32(aData[1]) + s1; + aData += 2; + }while( aDataszPage==szPage ); + if( (int)pWal->szPage!=szPage ){ + return SQLITE_CORRUPT_BKPT; /* TH3 test case: cov1/corrupt155.test */ + } /* Setup information needed to write frames into the WAL */ w.pWal = pWal; @@ -67571,7 +67866,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){ ** byte are used. The integer consists of all bytes that have bit 8 set and ** the first byte with bit 8 clear. The most significant byte of the integer ** appears first. A variable-length integer may not be more than 9 bytes long. -** As a special case, all 8 bytes of the 9th byte are used as data. This +** As a special case, all 8 bits of the 9th byte are used as data. This ** allows a 64-bit integer to be encoded in 9 bytes. ** ** 0x00 becomes 0x00000000 @@ -67955,7 +68250,7 @@ struct BtCursor { #define BTCF_WriteFlag 0x01 /* True if a write cursor */ #define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */ #define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */ -#define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */ +#define BTCF_AtLast 0x08 /* Cursor is pointing to the last entry */ #define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */ #define BTCF_Multiple 0x20 /* Maybe another cursor on the same btree */ #define BTCF_Pinned 0x40 /* Cursor is busy and cannot be moved */ @@ -68100,8 +68395,9 @@ struct IntegrityCk { int rc; /* SQLITE_OK, SQLITE_NOMEM, or SQLITE_INTERRUPT */ u32 nStep; /* Number of steps into the integrity_check process */ const char *zPfx; /* Error message prefix */ - Pgno v1; /* Value for first %u substitution in zPfx */ - int v2; /* Value for second %d substitution in zPfx */ + Pgno v0; /* Value for first %u substitution in zPfx (root page) */ + Pgno v1; /* Value for second %u substitution in zPfx (current pg) */ + int v2; /* Value for third %d substitution in zPfx */ StrAccum errMsg; /* Accumulate the error message text here */ u32 *heap; /* Min-heap used for analyzing cell coverage */ sqlite3 *db; /* Database connection running the check */ @@ -68564,8 +68860,8 @@ SQLITE_PRIVATE sqlite3_uint64 sqlite3BtreeSeekCount(Btree *pBt){ int corruptPageError(int lineno, MemPage *p){ char *zMsg; sqlite3BeginBenignMalloc(); - zMsg = sqlite3_mprintf("database corruption page %d of %s", - (int)p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0) + zMsg = sqlite3_mprintf("database corruption page %u of %s", + p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0) ); sqlite3EndBenignMalloc(); if( zMsg ){ @@ -69374,8 +69670,25 @@ SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow) */ SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ /* Used only by system that substitute their own storage engine */ +#ifdef SQLITE_DEBUG + if( ALWAYS(eHintType==BTREE_HINT_RANGE) ){ + va_list ap; + Expr *pExpr; + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = sqlite3CursorRangeHintExprCheck; + va_start(ap, eHintType); + pExpr = va_arg(ap, Expr*); + w.u.aMem = va_arg(ap, Mem*); + va_end(ap); + assert( pExpr!=0 ); + assert( w.u.aMem!=0 ); + sqlite3WalkExpr(&w, pExpr); + } +#endif /* SQLITE_DEBUG */ } -#endif +#endif /* SQLITE_ENABLE_CURSOR_HINTS */ + /* ** Provide flag hints to the cursor. @@ -69460,7 +69773,7 @@ static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){ pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ - TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); + TRACE(("PTRMAP_UPDATE: %u->(%u,%u)\n", key, eType, parent)); *pRC= rc = sqlite3PagerWrite(pDbPage); if( rc==SQLITE_OK ){ pPtrmap[offset] = eType; @@ -69659,27 +69972,31 @@ static void btreeParseCellPtr( iKey = *pIter; if( iKey>=0x80 ){ u8 x; - iKey = ((iKey&0x7f)<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x =*++pIter) & 0x7f); + iKey = (iKey<<7) ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x10204000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<8) | (*++pIter); + iKey = (iKey<<8) ^ 0x8000 ^ (*++pIter); } } } } } + }else{ + iKey ^= 0x204000; } + }else{ + iKey ^= 0x4000; } } pIter++; @@ -69756,10 +70073,11 @@ static void btreeParseCell( ** ** cellSizePtrNoPayload() => table internal nodes ** cellSizePtrTableLeaf() => table leaf nodes -** cellSizePtr() => all index nodes & table leaf nodes +** cellSizePtr() => index internal nodes +** cellSizeIdxLeaf() => index leaf nodes */ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ - u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */ + u8 *pIter = pCell + 4; /* For looping over bytes of pCell */ u8 *pEnd; /* End mark for a varint */ u32 nSize; /* Size value to return */ @@ -69772,6 +70090,49 @@ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ pPage->xParseCell(pPage, pCell, &debuginfo); #endif + assert( pPage->childPtrSize==4 ); + nSize = *pIter; + if( nSize>=0x80 ){ + pEnd = &pIter[8]; + nSize &= 0x7f; + do{ + nSize = (nSize<<7) | (*++pIter & 0x7f); + }while( *(pIter)>=0x80 && pItermaxLocal ); + testcase( nSize==(u32)pPage->maxLocal+1 ); + if( nSize<=pPage->maxLocal ){ + nSize += (u32)(pIter - pCell); + assert( nSize>4 ); + }else{ + int minLocal = pPage->minLocal; + nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); + testcase( nSize==pPage->maxLocal ); + testcase( nSize==(u32)pPage->maxLocal+1 ); + if( nSize>pPage->maxLocal ){ + nSize = minLocal; + } + nSize += 4 + (u16)(pIter - pCell); + } + assert( nSize==debuginfo.nSize || CORRUPT_DB ); + return (u16)nSize; +} +static u16 cellSizePtrIdxLeaf(MemPage *pPage, u8 *pCell){ + u8 *pIter = pCell; /* For looping over bytes of pCell */ + u8 *pEnd; /* End mark for a varint */ + u32 nSize; /* Size value to return */ + +#ifdef SQLITE_DEBUG + /* The value returned by this function should always be the same as + ** the (CellInfo.nSize) value found by doing a full parse of the + ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of + ** this function verifies that this invariant is not violated. */ + CellInfo debuginfo; + pPage->xParseCell(pPage, pCell, &debuginfo); +#endif + + assert( pPage->childPtrSize==0 ); nSize = *pIter; if( nSize>=0x80 ){ pEnd = &pIter[8]; @@ -70008,10 +70369,10 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){ /* These conditions have already been verified in btreeInitPage() ** if PRAGMA cell_size_check=ON. */ - if( pciCellLast ){ + if( pc>iCellLast ){ return SQLITE_CORRUPT_PAGE(pPage); } - assert( pc>=iCellStart && pc<=iCellLast ); + assert( pc>=0 && pc<=iCellLast ); size = pPage->xCellSize(pPage, &src[pc]); cbrk -= size; if( cbrkusableSize ){ @@ -70126,7 +70487,7 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ ** allocation is being made in order to insert a new cell, so we will ** also end up needing a new cell pointer. */ -static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ +static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ u8 * const data = pPage->aData; /* Local cache of pPage->aData */ int top; /* First byte of cell content area */ @@ -70152,13 +70513,14 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ ** integer, so a value of 0 is used in its place. */ pTmp = &data[hdr+5]; top = get2byte(pTmp); - assert( top<=(int)pPage->pBt->usableSize ); /* by btreeComputeFreeSpace() */ if( gap>top ){ if( top==0 && pPage->pBt->usableSize==65536 ){ top = 65536; }else{ return SQLITE_CORRUPT_PAGE(pPage); } + }else if( top>(int)pPage->pBt->usableSize ){ + return SQLITE_CORRUPT_PAGE(pPage); } /* If there is enough space between gap and top for one more cell pointer, @@ -70241,7 +70603,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( iSize>=4 ); /* Minimum cell size is 4 */ - assert( iStart<=pPage->pBt->usableSize-4 ); + assert( CORRUPT_DB || iStart<=pPage->pBt->usableSize-4 ); /* The list of freeblocks must be in ascending order. Find the ** spot on the list where iStart should be inserted. @@ -70298,6 +70660,11 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ } pTmp = &data[hdr+5]; x = get2byte(pTmp); + if( pPage->pBt->btsFlags & BTS_FAST_SECURE ){ + /* Overwrite deleted information with zeros when the secure_delete + ** option is enabled */ + memset(&data[iStart], 0, iSize); + } if( iStart<=x ){ /* The new freeblock is at the beginning of the cell content area, ** so just extend the cell content area rather than create another @@ -70309,14 +70676,9 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ }else{ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); + put2byte(&data[iStart], iFreeBlk); + put2byte(&data[iStart+2], iSize); } - if( pPage->pBt->btsFlags & BTS_FAST_SECURE ){ - /* Overwrite deleted information with zeros when the secure_delete - ** option is enabled */ - memset(&data[iStart], 0, iSize); - } - put2byte(&data[iStart], iFreeBlk); - put2byte(&data[iStart+2], iSize); pPage->nFree += iOrigSize; return SQLITE_OK; } @@ -70353,14 +70715,14 @@ static int decodeFlags(MemPage *pPage, int flagByte){ }else if( flagByte==(PTF_ZERODATA | PTF_LEAF) ){ pPage->intKey = 0; pPage->intKeyLeaf = 0; - pPage->xCellSize = cellSizePtr; + pPage->xCellSize = cellSizePtrIdxLeaf; pPage->xParseCell = btreeParseCellPtrIndex; pPage->maxLocal = pBt->maxLocal; pPage->minLocal = pBt->minLocal; }else{ pPage->intKey = 0; pPage->intKeyLeaf = 0; - pPage->xCellSize = cellSizePtr; + pPage->xCellSize = cellSizePtrIdxLeaf; pPage->xParseCell = btreeParseCellPtrIndex; return SQLITE_CORRUPT_PAGE(pPage); } @@ -72226,7 +72588,7 @@ static int relocatePage( if( iDbPage<3 ) return SQLITE_CORRUPT_BKPT; /* Move page iDbPage from its current location to page number iFreePage */ - TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", + TRACE(("AUTOVACUUM: Moving %u to free page %u (ptr page %u type %u)\n", iDbPage, iFreePage, iPtrPage, eType)); rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); if( rc!=SQLITE_OK ){ @@ -74512,7 +74874,8 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){ pPage = pCur->pPage; idx = ++pCur->ix; - if( !pPage->isInit || sqlite3FaultSim(412) ){ + if( sqlite3FaultSim(412) ) pPage->isInit = 0; + if( !pPage->isInit ){ return SQLITE_CORRUPT_BKPT; } @@ -74775,7 +75138,7 @@ static int allocateBtreePage( memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); *ppPage = pTrunk; pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + TRACE(("ALLOCATE: %u trunk - %u free pages left\n", *pPgno, n-1)); }else if( k>(u32)(pBt->usableSize/4 - 2) ){ /* Value of k is out of range. Database corruption */ rc = SQLITE_CORRUPT_PGNO(iTrunk); @@ -74841,7 +75204,7 @@ static int allocateBtreePage( } } pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + TRACE(("ALLOCATE: %u trunk - %u free pages left\n", *pPgno, n-1)); #endif }else if( k>0 ){ /* Extract a leaf from the trunk */ @@ -74886,8 +75249,8 @@ static int allocateBtreePage( ){ int noContent; *pPgno = iPage; - TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d" - ": %d more free pages\n", + TRACE(("ALLOCATE: %u was leaf %u of %u on trunk %u" + ": %u more free pages\n", *pPgno, closest+1, k, pTrunk->pgno, n-1)); rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ) goto end_allocate_page; @@ -74943,7 +75306,7 @@ static int allocateBtreePage( ** becomes a new pointer-map page, the second is used by the caller. */ MemPage *pPg = 0; - TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); + TRACE(("ALLOCATE: %u from end of file (pointer-map page)\n", pBt->nPage)); assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent); if( rc==SQLITE_OK ){ @@ -74966,7 +75329,7 @@ static int allocateBtreePage( releasePage(*ppPage); *ppPage = 0; } - TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); + TRACE(("ALLOCATE: %u from end of file\n", *pPgno)); } assert( CORRUPT_DB || *pPgno!=PENDING_BYTE_PAGE(pBt) ); @@ -75094,7 +75457,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ } rc = btreeSetHasContent(pBt, iPage); } - TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); + TRACE(("FREE-PAGE: %u leaf on trunk page %u\n",pPage->pgno,pTrunk->pgno)); goto freepage_out; } } @@ -75115,7 +75478,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ put4byte(pPage->aData, iTrunk); put4byte(&pPage->aData[4], 0); put4byte(&pPage1->aData[32], iPage); - TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); + TRACE(("FREE-PAGE: %u new trunk page replacing %u\n", pPage->pgno, iTrunk)); freepage_out: if( pPage ){ @@ -75474,6 +75837,14 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ ** in pTemp or the original pCell) and also record its index. ** Allocating a new entry in pPage->aCell[] implies that ** pPage->nOverflow is incremented. +** +** The insertCellFast() routine below works exactly the same as +** insertCell() except that it lacks the pTemp and iChild parameters +** which are assumed zero. Other than that, the two routines are the +** same. +** +** Fixes or enhancements to this routine should be reflected in +** insertCellFast()! */ static int insertCell( MemPage *pPage, /* Page into which we are copying */ @@ -75496,14 +75867,103 @@ static int insertCell( assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( sz==pPage->xCellSize(pPage, pCell) || CORRUPT_DB ); assert( pPage->nFree>=0 ); + assert( iChild>0 ); if( pPage->nOverflow || sz+2>pPage->nFree ){ if( pTemp ){ memcpy(pTemp, pCell, sz); pCell = pTemp; } - if( iChild ){ - put4byte(pCell, iChild); + put4byte(pCell, iChild); + j = pPage->nOverflow++; + /* Comparison against ArraySize-1 since we hold back one extra slot + ** as a contingency. In other words, never need more than 3 overflow + ** slots but 4 are allocated, just to be safe. */ + assert( j < ArraySize(pPage->apOvfl)-1 ); + pPage->apOvfl[j] = pCell; + pPage->aiOvfl[j] = (u16)i; + + /* When multiple overflows occur, they are always sequential and in + ** sorted order. This invariants arise because multiple overflows can + ** only occur when inserting divider cells into the parent page during + ** balancing, and the dividers are adjacent and sorted. + */ + assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */ + assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */ + }else{ + int rc = sqlite3PagerWrite(pPage->pDbPage); + if( NEVER(rc!=SQLITE_OK) ){ + return rc; + } + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + data = pPage->aData; + assert( &data[pPage->cellOffset]==pPage->aCellIdx ); + rc = allocateSpace(pPage, sz, &idx); + if( rc ){ return rc; } + /* The allocateSpace() routine guarantees the following properties + ** if it returns successfully */ + assert( idx >= 0 ); + assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); + assert( idx+sz <= (int)pPage->pBt->usableSize ); + pPage->nFree -= (u16)(2 + sz); + /* In a corrupt database where an entry in the cell index section of + ** a btree page has a value of 3 or less, the pCell value might point + ** as many as 4 bytes in front of the start of the aData buffer for + ** the source page. Make sure this does not cause problems by not + ** reading the first 4 bytes */ + memcpy(&data[idx+4], pCell+4, sz-4); + put4byte(&data[idx], iChild); + pIns = pPage->aCellIdx + i*2; + memmove(pIns+2, pIns, 2*(pPage->nCell - i)); + put2byte(pIns, idx); + pPage->nCell++; + /* increment the cell count */ + if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++; + assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB ); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pPage->pBt->autoVacuum ){ + int rc2 = SQLITE_OK; + /* The cell may contain a pointer to an overflow page. If so, write + ** the entry for the overflow page into the pointer map. + */ + ptrmapPutOvflPtr(pPage, pPage, pCell, &rc2); + if( rc2 ) return rc2; } +#endif + } + return SQLITE_OK; +} + +/* +** This variant of insertCell() assumes that the pTemp and iChild +** parameters are both zero. Use this variant in sqlite3BtreeInsert() +** for performance improvement, and also so that this variant is only +** called from that one place, and is thus inlined, and thus runs must +** faster. +** +** Fixes or enhancements to this routine should be reflected into +** the insertCell() routine. +*/ +static int insertCellFast( + MemPage *pPage, /* Page into which we are copying */ + int i, /* New cell becomes the i-th cell of the page */ + u8 *pCell, /* Content of the new cell */ + int sz /* Bytes of content in pCell */ +){ + int idx = 0; /* Where to write new cell content in data[] */ + int j; /* Loop counter */ + u8 *data; /* The content of the whole page */ + u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ + + assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); + assert( MX_CELL(pPage->pBt)<=10921 ); + assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); + assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); + assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( sz==pPage->xCellSize(pPage, pCell) || CORRUPT_DB ); + assert( pPage->nFree>=0 ); + assert( pPage->nOverflow==0 ); + if( sz+2>pPage->nFree ){ j = pPage->nOverflow++; /* Comparison against ArraySize-1 since we hold back one extra slot ** as a contingency. In other words, never need more than 3 overflow @@ -75535,17 +75995,7 @@ static int insertCell( assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); assert( idx+sz <= (int)pPage->pBt->usableSize ); pPage->nFree -= (u16)(2 + sz); - if( iChild ){ - /* In a corrupt database where an entry in the cell index section of - ** a btree page has a value of 3 or less, the pCell value might point - ** as many as 4 bytes in front of the start of the aData buffer for - ** the source page. Make sure this does not cause problems by not - ** reading the first 4 bytes */ - memcpy(&data[idx+4], pCell+4, sz-4); - put4byte(&data[idx], iChild); - }else{ - memcpy(&data[idx], pCell, sz); - } + memcpy(&data[idx], pCell, sz); pIns = pPage->aCellIdx + i*2; memmove(pIns+2, pIns, 2*(pPage->nCell - i)); put2byte(pIns, idx); @@ -75730,7 +76180,7 @@ static int rebuildPage( assert( i(u32)usableSize ){ j = 0; } + if( NEVER(j>(u32)usableSize) ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); for(k=0; pCArray->ixNx[k]<=i && ALWAYS(kpBt->usableSize]; u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize]; int nRet = 0; - int i; + int i, j; int iEnd = iFirst + nCell; - u8 *pFree = 0; /* \__ Parameters for pending call to */ - int szFree = 0; /* / freeSpace() */ + int nFree = 0; + int aOfst[10]; + int aAfter[10]; for(i=iFirst; iapCell[i]; if( SQLITE_WITHIN(pCell, pStart, pEnd) ){ int sz; + int iAfter; + int iOfst; /* No need to use cachedCellSize() here. The sizes of all cells that ** are to be freed have already been computing while deciding which ** cells need freeing */ sz = pCArray->szCell[i]; assert( sz>0 ); - if( pFree!=(pCell + sz) ){ - if( pFree ){ - assert( pFree>aData && (pFree - aData)<65536 ); - freeSpace(pPg, (u16)(pFree - aData), szFree); - } - pFree = pCell; - szFree = sz; - if( pFree+sz>pEnd ){ - return 0; + iOfst = (u16)(pCell - aData); + iAfter = iOfst+sz; + for(j=0; j=nFree ){ + if( nFree>=(int)(sizeof(aOfst)/sizeof(aOfst[0])) ){ + for(j=0; jpEnd ) return 0; + nFree++; } nRet++; } } - if( pFree ){ - assert( pFree>aData && (pFree - aData)<65536 ); - freeSpace(pPg, (u16)(pFree - aData), szFree); + for(j=0; jpPg->aDataEnd ) goto editpage_fail; + if( NEVER(pData>pPg->aDataEnd) ) goto editpage_fail; /* Add cells to the start of the page */ if( iNew0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB); - TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n", + TRACE(("BALANCE: old: %u(nc=%u) %u(nc=%u) %u(nc=%u)\n", apOld[0]->pgno, apOld[0]->nCell, nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0, nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0 @@ -76785,8 +77243,8 @@ static int balance_nonroot( } } - TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) " - "%d(%d nc=%d) %d(%d nc=%d)\n", + TRACE(("BALANCE: new: %u(%u nc=%u) %u(%u nc=%u) %u(%u nc=%u) " + "%u(%u nc=%u) %u(%u nc=%u)\n", apNew[0]->pgno, szNew[0], cntNew[0], nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0, @@ -77031,7 +77489,7 @@ static int balance_nonroot( } assert( pParent->isInit ); - TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", + TRACE(("BALANCE: finished: old=%u new=%u cells=%u\n", nOld, nNew, b.nCell)); /* Free any old pages that were not reused as new pages. @@ -77116,7 +77574,7 @@ static int balance_deeper(MemPage *pRoot, MemPage **ppChild){ assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); assert( pChild->nCell==pRoot->nCell || CORRUPT_DB ); - TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); + TRACE(("BALANCE: copy root %u into %u\n", pRoot->pgno, pChild->pgno)); /* Copy the overflow cells from pRoot to pChild */ memcpy(pChild->aiOvfl, pRoot->aiOvfl, @@ -77599,7 +78057,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( } } assert( pCur->eState==CURSOR_VALID - || (pCur->eState==CURSOR_INVALID && loc) ); + || (pCur->eState==CURSOR_INVALID && loc) || CORRUPT_DB ); pPage = pCur->pPage; assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) ); @@ -77614,7 +78072,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( rc ) return rc; } - TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", + TRACE(("INSERT: table=%u nkey=%lld ndata=%u page=%u %s\n", pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno, loc==0 ? "overwrite" : "new entry")); assert( pPage->isInit || CORRUPT_DB ); @@ -77690,7 +78148,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( }else{ assert( pPage->leaf ); } - rc = insertCell(pPage, idx, newCell, szNew, 0, 0); + rc = insertCellFast(pPage, idx, newCell, szNew); assert( pPage->nOverflow==0 || rc==SQLITE_OK ); assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); @@ -77914,6 +78372,9 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ return SQLITE_CORRUPT_BKPT; } + if( pCell<&pPage->aCellIdx[pPage->nCell] ){ + return SQLITE_CORRUPT_BKPT; + } /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must ** be preserved following this delete operation. If the current delete @@ -78662,7 +79123,8 @@ static void checkAppendMsg( sqlite3_str_append(&pCheck->errMsg, "\n", 1); } if( pCheck->zPfx ){ - sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, pCheck->v1, pCheck->v2); + sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, + pCheck->v0, pCheck->v1, pCheck->v2); } sqlite3_str_vappendf(&pCheck->errMsg, zFormat, ap); va_end(ap); @@ -78702,11 +79164,11 @@ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ */ static int checkRef(IntegrityCk *pCheck, Pgno iPage){ if( iPage>pCheck->nPage || iPage==0 ){ - checkAppendMsg(pCheck, "invalid page number %d", iPage); + checkAppendMsg(pCheck, "invalid page number %u", iPage); return 1; } if( getPageReferenced(pCheck, iPage) ){ - checkAppendMsg(pCheck, "2nd reference to page %d", iPage); + checkAppendMsg(pCheck, "2nd reference to page %u", iPage); return 1; } setPageReferenced(pCheck, iPage); @@ -78732,13 +79194,13 @@ static void checkPtrmap( rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); if( rc!=SQLITE_OK ){ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) checkOom(pCheck); - checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild); + checkAppendMsg(pCheck, "Failed to read ptrmap key=%u", iChild); return; } if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ checkAppendMsg(pCheck, - "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", + "Bad ptr map entry key=%u expected=(%u,%u) got=(%u,%u)", iChild, eType, iParent, ePtrmapType, iPtrmapParent); } } @@ -78763,7 +79225,7 @@ static void checkList( if( checkRef(pCheck, iPage) ) break; N--; if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){ - checkAppendMsg(pCheck, "failed to get page %d", iPage); + checkAppendMsg(pCheck, "failed to get page %u", iPage); break; } pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage); @@ -78776,7 +79238,7 @@ static void checkList( #endif if( n>pCheck->pBt->usableSize/4-2 ){ checkAppendMsg(pCheck, - "freelist leaf count too big on page %d", iPage); + "freelist leaf count too big on page %u", iPage); N--; }else{ for(i=0; i<(int)n; i++){ @@ -78808,7 +79270,7 @@ static void checkList( } if( N && nErrAtStart==pCheck->nErr ){ checkAppendMsg(pCheck, - "%s is %d but should be %d", + "%s is %u but should be %u", isFreeList ? "size" : "overflow list length", expected-N, expected); } @@ -78923,8 +79385,8 @@ static int checkTreePage( usableSize = pBt->usableSize; if( iPage==0 ) return 0; if( checkRef(pCheck, iPage) ) return 0; - pCheck->zPfx = "Page %u: "; - pCheck->v1 = iPage; + pCheck->zPfx = "Tree %u page %u: "; + pCheck->v0 = pCheck->v1 = iPage; if( (rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0 ){ checkAppendMsg(pCheck, "unable to get the page. error code=%d", rc); @@ -78950,7 +79412,7 @@ static int checkTreePage( hdr = pPage->hdrOffset; /* Set up for cell analysis */ - pCheck->zPfx = "On tree page %u cell %d: "; + pCheck->zPfx = "Tree %u page %u cell %u: "; contentOffset = get2byteNotZero(&data[hdr+5]); assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */ @@ -78970,7 +79432,7 @@ static int checkTreePage( pgno = get4byte(&data[hdr+8]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ - pCheck->zPfx = "On page %u at right child: "; + pCheck->zPfx = "Tree %u page %u right child: "; checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); } #endif @@ -78994,7 +79456,7 @@ static int checkTreePage( pc = get2byteAligned(pCellIdx); pCellIdx -= 2; if( pcusableSize-4 ){ - checkAppendMsg(pCheck, "Offset %d out of range %d..%d", + checkAppendMsg(pCheck, "Offset %u out of range %u..%u", pc, contentOffset, usableSize-4); doCoverageCheck = 0; continue; @@ -79126,7 +79588,7 @@ static int checkTreePage( */ if( heap[0]==0 && nFrag!=data[hdr+7] ){ checkAppendMsg(pCheck, - "Fragmentation of %d bytes reported as %d on page %u", + "Fragmentation of %u bytes reported as %u on page %u", nFrag, data[hdr+7], iPage); } } @@ -79223,7 +79685,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( /* Check the integrity of the freelist */ if( bCkFreelist ){ - sCheck.zPfx = "Main freelist: "; + sCheck.zPfx = "Freelist: "; checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), get4byte(&pBt->pPage1->aData[36])); sCheck.zPfx = 0; @@ -79240,7 +79702,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( mxInHdr = get4byte(&pBt->pPage1->aData[52]); if( mx!=mxInHdr ){ checkAppendMsg(&sCheck, - "max rootpage (%d) disagrees with header (%d)", + "max rootpage (%u) disagrees with header (%u)", mx, mxInHdr ); } @@ -79271,7 +79733,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ #ifdef SQLITE_OMIT_AUTOVACUUM if( getPageReferenced(&sCheck, i)==0 ){ - checkAppendMsg(&sCheck, "Page %d is never used", i); + checkAppendMsg(&sCheck, "Page %u: never used", i); } #else /* If the database supports auto-vacuum, make sure no tables contain @@ -79279,11 +79741,11 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( */ if( getPageReferenced(&sCheck, i)==0 && (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, "Page %d is never used", i); + checkAppendMsg(&sCheck, "Page %u: never used", i); } if( getPageReferenced(&sCheck, i)!=0 && (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i); + checkAppendMsg(&sCheck, "Page %u: pointer map referenced", i); } #endif } @@ -79845,13 +80307,7 @@ static int backupOnePage( assert( !isFatalError(p->rc) ); assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ); assert( zSrcData ); - - /* Catch the case where the destination is an in-memory database and the - ** page sizes of the source and destination differ. - */ - if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){ - rc = SQLITE_READONLY; - } + assert( nSrcPgsz==nDestPgsz || sqlite3PagerIsMemdb(pDestPager)==0 ); /* This loop runs once for each destination page spanned by the source ** page. For each iteration, variable iOff is set to the byte offset @@ -79984,7 +80440,10 @@ SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){ pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); pgszDest = sqlite3BtreeGetPageSize(p->pDest); destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); - if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ + if( SQLITE_OK==rc + && (destMode==PAGER_JOURNALMODE_WAL || sqlite3PagerIsMemdb(pDestPager)) + && pgszSrc!=pgszDest + ){ rc = SQLITE_READONLY; } @@ -80533,6 +80992,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemValidStrRep(Mem *p){ char *z; int i, j, incr; if( (p->flags & MEM_Str)==0 ) return 1; + if( p->db && p->db->mallocFailed ) return 1; if( p->flags & MEM_Term ){ /* Insure that the string is properly zero-terminated. Pay particular ** attention to the case where p->n is odd */ @@ -80815,7 +81275,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){ vdbeMemRenderNum(nByte, pMem->z, pMem); assert( pMem->z!=0 ); - assert( pMem->n==sqlite3Strlen30NN(pMem->z) ); + assert( pMem->n==(int)sqlite3Strlen30NN(pMem->z) ); pMem->enc = SQLITE_UTF8; pMem->flags |= MEM_Str|MEM_Term; if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal); @@ -81859,6 +82319,9 @@ static int valueFromFunction( if( pList ) nVal = pList->nExpr; assert( !ExprHasProperty(p, EP_IntValue) ); pFunc = sqlite3FindFunction(db, p->u.zToken, nVal, enc, 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pFunc==0 ) return SQLITE_OK; +#endif assert( pFunc ); if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL) @@ -81895,16 +82358,11 @@ static int valueFromFunction( }else{ sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8); assert( rc==SQLITE_OK ); - assert( enc==pVal->enc - || (pVal->flags & MEM_Str)==0 - || db->mallocFailed ); -#if 0 /* Not reachable except after a prior failure */ rc = sqlite3VdbeChangeEncoding(pVal, enc); - if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){ + if( NEVER(rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal)) ){ rc = SQLITE_TOOBIG; pCtx->pParse->nErr++; } -#endif } value_from_function_out: @@ -81968,6 +82426,13 @@ static int valueFromExpr( rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx); testcase( rc!=SQLITE_OK ); if( *ppVal ){ +#ifdef SQLITE_ENABLE_STAT4 + rc = ExpandBlob(*ppVal); +#else + /* zero-blobs only come from functions, not literal values. And + ** functions are only processed under STAT4 */ + assert( (ppVal[0][0].flags & MEM_Zero)==0 ); +#endif sqlite3VdbeMemCast(*ppVal, aff, enc); sqlite3ValueApplyAffinity(*ppVal, affinity, enc); } @@ -82814,10 +83279,10 @@ SQLITE_PRIVATE void sqlite3ExplainBreakpoint(const char *z1, const char *z2){ */ SQLITE_PRIVATE int sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){ int addr = 0; -#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS) +#if !defined(SQLITE_DEBUG) /* Always include the OP_Explain opcodes if SQLITE_DEBUG is defined. ** But omit them (for performance) during production builds */ - if( pParse->explain==2 ) + if( pParse->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { char *zMsg; @@ -83191,6 +83656,8 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; + + assert( pParse->db->mallocFailed==0 ); /* tag-20230419-1 */ p->readOnly = 1; p->bIsReader = 0; pOp = &p->aOp[p->nOp-1]; @@ -83250,6 +83717,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ ** have non-negative values for P2. */ assert( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 ); assert( ADDR(pOp->p2)<-pParse->nLabel ); + assert( aLabel!=0 ); /* True because of tag-20230419-1 */ pOp->p2 = aLabel[ADDR(pOp->p2)]; } break; @@ -83493,18 +83961,20 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus( LogEst nEst, /* Estimated number of output rows */ const char *zName /* Name of table or index being scanned */ ){ - sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); - ScanStatus *aNew; - aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); - if( aNew ){ - ScanStatus *pNew = &aNew[p->nScan++]; - memset(pNew, 0, sizeof(ScanStatus)); - pNew->addrExplain = addrExplain; - pNew->addrLoop = addrLoop; - pNew->addrVisit = addrVisit; - pNew->nEst = nEst; - pNew->zName = sqlite3DbStrDup(p->db, zName); - p->aScan = aNew; + if( IS_STMT_SCANSTATUS(p->db) ){ + sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); + ScanStatus *aNew; + aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); + if( aNew ){ + ScanStatus *pNew = &aNew[p->nScan++]; + memset(pNew, 0, sizeof(ScanStatus)); + pNew->addrExplain = addrExplain; + pNew->addrLoop = addrLoop; + pNew->addrVisit = addrVisit; + pNew->nEst = nEst; + pNew->zName = sqlite3DbStrDup(p->db, zName); + p->aScan = aNew; + } } } @@ -83521,20 +83991,22 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatusRange( int addrStart, int addrEnd ){ - ScanStatus *pScan = 0; - int ii; - for(ii=p->nScan-1; ii>=0; ii--){ - pScan = &p->aScan[ii]; - if( pScan->addrExplain==addrExplain ) break; - pScan = 0; - } - if( pScan ){ - if( addrEnd<0 ) addrEnd = sqlite3VdbeCurrentAddr(p)-1; - for(ii=0; iiaAddrRange); ii+=2){ - if( pScan->aAddrRange[ii]==0 ){ - pScan->aAddrRange[ii] = addrStart; - pScan->aAddrRange[ii+1] = addrEnd; - break; + if( IS_STMT_SCANSTATUS(p->db) ){ + ScanStatus *pScan = 0; + int ii; + for(ii=p->nScan-1; ii>=0; ii--){ + pScan = &p->aScan[ii]; + if( pScan->addrExplain==addrExplain ) break; + pScan = 0; + } + if( pScan ){ + if( addrEnd<0 ) addrEnd = sqlite3VdbeCurrentAddr(p)-1; + for(ii=0; iiaAddrRange); ii+=2){ + if( pScan->aAddrRange[ii]==0 ){ + pScan->aAddrRange[ii] = addrStart; + pScan->aAddrRange[ii+1] = addrEnd; + break; + } } } } @@ -83551,19 +84023,21 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters( int addrLoop, int addrVisit ){ - ScanStatus *pScan = 0; - int ii; - for(ii=p->nScan-1; ii>=0; ii--){ - pScan = &p->aScan[ii]; - if( pScan->addrExplain==addrExplain ) break; - pScan = 0; - } - if( pScan ){ - pScan->addrLoop = addrLoop; - pScan->addrVisit = addrVisit; + if( IS_STMT_SCANSTATUS(p->db) ){ + ScanStatus *pScan = 0; + int ii; + for(ii=p->nScan-1; ii>=0; ii--){ + pScan = &p->aScan[ii]; + if( pScan->addrExplain==addrExplain ) break; + pScan = 0; + } + if( pScan ){ + pScan->addrLoop = addrLoop; + pScan->addrVisit = addrVisit; + } } } -#endif +#endif /* defined(SQLITE_ENABLE_STMT_SCANSTATUS) */ /* @@ -83987,7 +84461,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ /* Return the most recently added opcode */ -VdbeOp * sqlite3VdbeGetLastOp(Vdbe *p){ +SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetLastOp(Vdbe *p){ return sqlite3VdbeGetOp(p, p->nOp - 1); } @@ -85691,6 +86165,8 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ db->flags &= ~(u64)SQLITE_DeferFKs; sqlite3CommitInternalChanges(db); } + }else if( p->rc==SQLITE_SCHEMA && db->nVdbeActive>1 ){ + p->nChange = 0; }else{ sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0; @@ -86009,9 +86485,9 @@ static void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ #ifdef SQLITE_ENABLE_NORMALIZE sqlite3DbFree(db, p->zNormSql); { - DblquoteStr *pThis, *pNext; - for(pThis=p->pDblStr; pThis; pThis=pNext){ - pNext = pThis->pNextStr; + DblquoteStr *pThis, *pNxt; + for(pThis=p->pDblStr; pThis; pThis=pNxt){ + pNxt = pThis->pNextStr; sqlite3DbFree(db, pThis); } } @@ -87638,6 +88114,20 @@ SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context *pCtx){ return 1; } +#if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) +/* +** This Walker callback is used to help verify that calls to +** sqlite3BtreeCursorHint() with opcode BTREE_HINT_RANGE have +** byte-code register values correctly initialized. +*/ +SQLITE_PRIVATE int sqlite3CursorRangeHintExprCheck(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_REGISTER ){ + assert( (pWalker->u.aMem[pExpr->iTable].flags & MEM_Undefined)==0 ); + } + return WRC_Continue; +} +#endif /* SQLITE_ENABLE_CURSOR_HINTS && SQLITE_DEBUG */ + #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored @@ -87700,6 +88190,16 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( PreUpdate preupdate; const char *zTbl = pTab->zName; static const u8 fakeSortOrder = 0; +#ifdef SQLITE_DEBUG + int nRealCol; + if( pTab->tabFlags & TF_WithoutRowid ){ + nRealCol = sqlite3PrimaryKeyIndex(pTab)->nColumn; + }else if( pTab->tabFlags & TF_HasVirtual ){ + nRealCol = pTab->nNVCol; + }else{ + nRealCol = pTab->nCol; + } +#endif assert( db->pPreUpdate==0 ); memset(&preupdate, 0, sizeof(PreUpdate)); @@ -87716,8 +88216,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( assert( pCsr!=0 ); assert( pCsr->eCurType==CURTYPE_BTREE ); - assert( pCsr->nField==pTab->nCol - || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1) + assert( pCsr->nField==nRealCol + || (pCsr->nField==nRealCol+1 && op==SQLITE_DELETE && iReg==-1) ); preupdate.v = v; @@ -88024,7 +88524,7 @@ SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){ SQLITE_NULL, /* 0x1f (not possible) */ SQLITE_FLOAT, /* 0x20 INTREAL */ SQLITE_NULL, /* 0x21 (not possible) */ - SQLITE_TEXT, /* 0x22 INTREAL + TEXT */ + SQLITE_FLOAT, /* 0x22 INTREAL + TEXT */ SQLITE_NULL, /* 0x23 (not possible) */ SQLITE_FLOAT, /* 0x24 (not possible) */ SQLITE_NULL, /* 0x25 (not possible) */ @@ -89090,9 +89590,9 @@ static const void *columnName( assert( db!=0 ); n = sqlite3_column_count(pStmt); if( N=0 ){ + u8 prior_mallocFailed = db->mallocFailed; N += useType*n; sqlite3_mutex_enter(db->mutex); - assert( db->mallocFailed==0 ); #ifndef SQLITE_OMIT_UTF16 if( useUtf16 ){ ret = sqlite3_value_text16((sqlite3_value*)&p->aColName[N]); @@ -89104,7 +89604,8 @@ static const void *columnName( /* A malloc may have failed inside of the _text() call. If this ** is the case, clear the mallocFailed flag and return NULL. */ - if( db->mallocFailed ){ + assert( db->mallocFailed==0 || db->mallocFailed==1 ); + if( db->mallocFailed > prior_mallocFailed ){ sqlite3OomClear(db); ret = 0; } @@ -89891,15 +90392,24 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( void *pOut /* OUT: Write the answer here */ ){ Vdbe *p = (Vdbe*)pStmt; - ScanStatus *pScan; + VdbeOp *aOp = p->aOp; + int nOp = p->nOp; + ScanStatus *pScan = 0; int idx; + if( p->pFrame ){ + VdbeFrame *pFrame; + for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); + aOp = pFrame->aOp; + nOp = pFrame->nOp; + } + if( iScan<0 ){ int ii; if( iScanStatusOp==SQLITE_SCANSTAT_NCYCLE ){ i64 res = 0; - for(ii=0; iinOp; ii++){ - res += p->aOp[ii].nCycle; + for(ii=0; iiaddrLoop>0 ){ - *(sqlite3_int64*)pOut = p->aOp[pScan->addrLoop].nExec; + *(sqlite3_int64*)pOut = aOp[pScan->addrLoop].nExec; }else{ *(sqlite3_int64*)pOut = -1; } @@ -89933,7 +90443,7 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } case SQLITE_SCANSTAT_NVISIT: { if( pScan->addrVisit>0 ){ - *(sqlite3_int64*)pOut = p->aOp[pScan->addrVisit].nExec; + *(sqlite3_int64*)pOut = aOp[pScan->addrVisit].nExec; }else{ *(sqlite3_int64*)pOut = -1; } @@ -89955,7 +90465,7 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } case SQLITE_SCANSTAT_EXPLAIN: { if( pScan->addrExplain ){ - *(const char**)pOut = p->aOp[ pScan->addrExplain ].p4.z; + *(const char**)pOut = aOp[ pScan->addrExplain ].p4.z; }else{ *(const char**)pOut = 0; } @@ -89963,7 +90473,7 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } case SQLITE_SCANSTAT_SELECTID: { if( pScan->addrExplain ){ - *(int*)pOut = p->aOp[ pScan->addrExplain ].p1; + *(int*)pOut = aOp[ pScan->addrExplain ].p1; }else{ *(int*)pOut = -1; } @@ -89971,7 +90481,7 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } case SQLITE_SCANSTAT_PARENTID: { if( pScan->addrExplain ){ - *(int*)pOut = p->aOp[ pScan->addrExplain ].p2; + *(int*)pOut = aOp[ pScan->addrExplain ].p2; }else{ *(int*)pOut = -1; } @@ -89989,18 +90499,18 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( if( iIns==0 ) break; if( iIns>0 ){ while( iIns<=iEnd ){ - res += p->aOp[iIns].nCycle; + res += aOp[iIns].nCycle; iIns++; } }else{ int iOp; - for(iOp=0; iOpnOp; iOp++){ - Op *pOp = &p->aOp[iOp]; + for(iOp=0; iOpp1!=iEnd ) continue; if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_NCYCLE)==0 ){ continue; } - res += p->aOp[iOp].nCycle; + res += aOp[iOp].nCycle; } } } @@ -90923,7 +91433,10 @@ static u64 filterHash(const Mem *aMem, const Op *pOp){ }else if( p->flags & MEM_Real ){ h += sqlite3VdbeIntValue(p); }else if( p->flags & (MEM_Str|MEM_Blob) ){ - /* no-op */ + /* All strings have the same hash and all blobs have the same hash, + ** though, at least, those hashes are different from each other and + ** from NULL. */ + h += 4093 + (p->flags & (MEM_Str|MEM_Blob)); } } return h; @@ -90973,6 +91486,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec( Mem *pOut = 0; /* Output operand */ #if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) u64 *pnCycle = 0; + int bStmtScanStatus = IS_STMT_SCANSTATUS(db)!=0; #endif /*** INSERT STACK UNION HERE ***/ @@ -91037,13 +91551,17 @@ SQLITE_PRIVATE int sqlite3VdbeExec( assert( pOp>=aOp && pOp<&aOp[p->nOp]); nVmStep++; -#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + +#if defined(VDBE_PROFILE) pOp->nExec++; pnCycle = &pOp->nCycle; -# ifdef VDBE_PROFILE - if( sqlite3NProfileCnt==0 ) -# endif + if( sqlite3NProfileCnt==0 ) *pnCycle -= sqlite3Hwtime(); +#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( bStmtScanStatus ){ + pOp->nExec++; + pnCycle = &pOp->nCycle; *pnCycle -= sqlite3Hwtime(); + } #endif /* Only allow tracing if SQLITE_DEBUG is defined. @@ -92631,7 +93149,7 @@ case OP_Compare: { /* Opcode: Jump P1 P2 P3 * * ** ** Jump to the instruction at address P1, P2, or P3 depending on whether -** in the most recent OP_Compare instruction the P1 vector was less than +** in the most recent OP_Compare instruction the P1 vector was less than, ** equal to, or greater than the P2 vector, respectively. ** ** This opcode must immediately follow an OP_Compare opcode. @@ -92858,6 +93376,12 @@ case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ ** (0x01) bit. SQLITE_FLOAT is the 0x02 bit. SQLITE_TEXT is 0x04. ** SQLITE_BLOB is 0x08. SQLITE_NULL is 0x10. ** +** WARNING: This opcode does not reliably distinguish between NULL and REAL +** when P1>=0. If the database contains a NaN value, this opcode will think +** that the datatype is REAL when it should be NULL. When P1<0 and the value +** is already stored in register P3, then this opcode does reliably +** distinguish between NULL and REAL. The problem only arises then P1>=0. +** ** Take the jump to address P2 if and only if the datatype of the ** value determined by P1 and P3 corresponds to one of the bits in the ** P5 bitmask. @@ -92971,7 +93495,7 @@ case OP_IfNullRow: { /* jump */ VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; - if( ALWAYS(pC) && pC->nullRow ){ + if( pC && pC->nullRow ){ sqlite3VdbeMemSetNull(aMem + pOp->p3); goto jump_to_p2; } @@ -93466,7 +93990,7 @@ case OP_Affinity: { }else{ pIn1->u.r = (double)pIn1->u.i; pIn1->flags |= MEM_Real; - pIn1->flags &= ~MEM_Int; + pIn1->flags &= ~(MEM_Int|MEM_Str); } } REGISTER_TRACE((int)(pIn1-aMem), pIn1); @@ -95205,6 +95729,7 @@ case OP_SeekScan: { /* ncycle */ break; } nStep--; + pC->cacheStatus = CACHE_STALE; rc = sqlite3BtreeNext(pC->uc.pCursor, 0); if( rc ){ if( rc==SQLITE_DONE ){ @@ -97857,6 +98382,7 @@ case OP_AggFinal: { } sqlite3VdbeChangeEncoding(pMem, encoding); UPDATE_MAX_BLOBSIZE(pMem); + REGISTER_TRACE((int)(pMem-aMem), pMem); break; } @@ -98995,8 +99521,10 @@ default: { /* This is really OP_Noop, OP_Explain */ *pnCycle += sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); pnCycle = 0; #elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) - *pnCycle += sqlite3Hwtime(); - pnCycle = 0; + if( pnCycle ){ + *pnCycle += sqlite3Hwtime(); + pnCycle = 0; + } #endif /* The following code adds nothing to the actual functionality @@ -99475,7 +100003,7 @@ SQLITE_API int sqlite3_blob_open( if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); sqlite3DbFree(db, pBlob); } - sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : (char*)0), zErr); sqlite3DbFree(db, zErr); sqlite3ParseObjectReset(&sParse); rc = sqlite3ApiExit(db, rc); @@ -99634,7 +100162,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ ((Vdbe*)p->pStmt)->rc = SQLITE_OK; rc = blobSeekToRow(p, iRow, &zErr); if( rc!=SQLITE_OK ){ - sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : (char*)0), zErr); sqlite3DbFree(db, zErr); } assert( rc!=SQLITE_SCHEMA ); @@ -104022,7 +104550,8 @@ static int lookupName( assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); if( pParse->bReturning ){ if( (pNC->ncFlags & NC_UBaseReg)!=0 - && (zTab==0 || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + && ALWAYS(zTab==0 + || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) ){ pExpr->iTable = op!=TK_DELETE; pTab = pParse->pTriggerTab; @@ -105996,11 +106525,10 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ }else{ Expr *pNext = p->pRight; /* The Expr.x union is never used at the same time as Expr.pRight */ - assert( ExprUseXList(p) ); - assert( p->x.pList==0 || p->pRight==0 ); - if( p->x.pList!=0 && !db->mallocFailed ){ + assert( !ExprUseXList(p) || p->x.pList==0 || p->pRight==0 ); + if( ExprUseXList(p) && p->x.pList!=0 && !db->mallocFailed ){ int i; - for(i=0; ALWAYS(ix.pList->nExpr); i++){ + for(i=0; ix.pList->nExpr; i++){ if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){ pNext = p->x.pList->a[i].pExpr; break; @@ -106832,9 +107360,9 @@ SQLITE_PRIVATE Select *sqlite3ExprListToValues(Parse *pParse, int nElem, ExprLis ** Join two expressions using an AND operator. If either expression is ** NULL, then just return the other expression. ** -** If one side or the other of the AND is known to be false, then instead -** of returning an AND expression, just return a constant expression with -** a value of false. +** If one side or the other of the AND is known to be false, and neither side +** is part of an ON clause, then instead of returning an AND expression, +** just return a constant expression with a value of false. */ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ sqlite3 *db = pParse->db; @@ -106842,14 +107370,17 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ return pRight; }else if( pRight==0 ){ return pLeft; - }else if( (ExprAlwaysFalse(pLeft) || ExprAlwaysFalse(pRight)) - && !IN_RENAME_OBJECT - ){ - sqlite3ExprDeferredDelete(pParse, pLeft); - sqlite3ExprDeferredDelete(pParse, pRight); - return sqlite3Expr(db, TK_INTEGER, "0"); }else{ - return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); + u32 f = pLeft->flags | pRight->flags; + if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse))==EP_IsFalse + && !IN_RENAME_OBJECT + ){ + sqlite3ExprDeferredDelete(pParse, pLeft); + sqlite3ExprDeferredDelete(pParse, pRight); + return sqlite3Expr(db, TK_INTEGER, "0"); + }else{ + return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); + } } } @@ -108094,12 +108625,17 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ } /* -** Check pExpr to see if it is an invariant constraint on data source pSrc. +** Check pExpr to see if it is an constraint on the single data source +** pSrc = &pSrcList->a[iSrc]. In other words, check to see if pExpr +** constrains pSrc but does not depend on any other tables or data +** sources anywhere else in the query. Return true (non-zero) if pExpr +** is a constraint on pSrc only. +** ** This is an optimization. False negatives will perhaps cause slower ** queries, but false positives will yield incorrect answers. So when in ** doubt, return 0. ** -** To be an invariant constraint, the following must be true: +** To be an single-source constraint, the following must be true: ** ** (1) pExpr cannot refer to any table other than pSrc->iCursor. ** @@ -108110,13 +108646,31 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ ** ** (4) If pSrc is the right operand of a LEFT JOIN, then... ** (4a) pExpr must come from an ON clause.. - (4b) and specifically the ON clause associated with the LEFT JOIN. +** (4b) and specifically the ON clause associated with the LEFT JOIN. ** ** (5) If pSrc is not the right operand of a LEFT JOIN or the left ** operand of a RIGHT JOIN, then pExpr must be from the WHERE ** clause, not an ON clause. +** +** (6) Either: +** +** (6a) pExpr does not originate in an ON or USING clause, or +** +** (6b) The ON or USING clause from which pExpr is derived is +** not to the left of a RIGHT JOIN (or FULL JOIN). +** +** Without this restriction, accepting pExpr as a single-table +** constraint might move the the ON/USING filter expression +** from the left side of a RIGHT JOIN over to the right side, +** which leads to incorrect answers. See also restriction (9) +** on push-down. */ -SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr *pExpr, const SrcItem *pSrc){ +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( + Expr *pExpr, /* The constraint */ + const SrcList *pSrcList, /* Complete FROM clause */ + int iSrc /* Which element of pSrcList to use */ +){ + const SrcItem *pSrc = &pSrcList->a[iSrc]; if( pSrc->fg.jointype & JT_LTORJ ){ return 0; /* rule (3) */ } @@ -108126,6 +108680,19 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr *pExpr, const SrcItem *pSrc }else{ if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* rule (5) */ } + if( ExprHasProperty(pExpr, EP_OuterON|EP_InnerON) /* (6a) */ + && (pSrcList->a[0].fg.jointype & JT_LTORJ)!=0 /* Fast pre-test of (6b) */ + ){ + int jj; + for(jj=0; jjw.iJoin==pSrcList->a[jj].iCursor ){ + if( (pSrcList->a[jj].fg.jointype & JT_LTORJ)!=0 ){ + return 0; /* restriction (6) */ + } + break; + } + } + } return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */ } @@ -108368,7 +108935,7 @@ SQLITE_PRIVATE int sqlite3IsRowid(const char *z){ ** pX is the RHS of an IN operator. If pX is a SELECT statement ** that can be simplified to a direct table access, then return ** a pointer to the SELECT statement. If pX is not a SELECT statement, -** or if the SELECT statement needs to be manifested into a transient +** or if the SELECT statement needs to be materialized into a transient ** table, then return NULL. */ #ifndef SQLITE_OMIT_SUBQUERY @@ -108654,7 +109221,6 @@ SQLITE_PRIVATE int sqlite3FindInIndex( CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs); int j; - assert( pReq!=0 || pRhs->iColumn==XN_ROWID || pParse->nErr ); for(j=0; jaiColumn[j]!=pRhs->iColumn ) continue; assert( pIdx->azColl[j] ); @@ -109940,7 +110506,19 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) AggInfo *pAggInfo = pExpr->pAggInfo; struct AggInfo_col *pCol; assert( pAggInfo!=0 ); - assert( pExpr->iAgg>=0 && pExpr->iAggnColumn ); + assert( pExpr->iAgg>=0 ); + if( pExpr->iAgg>=pAggInfo->nColumn ){ + /* Happens when the left table of a RIGHT JOIN is null and + ** is using an expression index */ + sqlite3VdbeAddOp2(v, OP_Null, 0, target); +#ifdef SQLITE_VDBE_COVERAGE + /* Verify that the OP_Null above is exercised by tests + ** tag-20230325-2 */ + sqlite3VdbeAddOp2(v, OP_NotNull, target, 1); + VdbeCoverageNeverTaken(v); +#endif + break; + } pCol = &pAggInfo->aCol[pExpr->iAgg]; if( !pAggInfo->directMode ){ return AggInfoColumnReg(pAggInfo, pExpr->iAgg); @@ -110115,11 +110693,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) #ifndef SQLITE_OMIT_CAST case TK_CAST: { /* Expressions of the form: CAST(pLeft AS token) */ - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - if( inReg!=target ){ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } + sqlite3ExprCode(pParse, pExpr->pLeft, target); + assert( inReg==target ); assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3VdbeAddOp2(v, OP_Cast, target, sqlite3AffinityType(pExpr->u.zToken, 0)); @@ -110458,13 +111033,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) ** Clear subtypes as subtypes may not cross a subquery boundary. */ assert( pExpr->pLeft ); - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - if( inReg!=target ){ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } - sqlite3VdbeAddOp1(v, OP_ClrSubtype, inReg); - return inReg; + sqlite3ExprCode(pParse, pExpr->pLeft, target); + sqlite3VdbeAddOp1(v, OP_ClrSubtype, target); + return target; }else{ pExpr = pExpr->pLeft; goto expr_code_doover; /* 2018-04-28: Prevent deep recursion. */ @@ -110574,12 +111145,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) ** "target" and not someplace else. */ pParse->okConstFactor = 0; /* note (1) above */ - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); + sqlite3ExprCode(pParse, pExpr->pLeft, target); + assert( target==inReg ); pParse->okConstFactor = okConstFactor; - if( inReg!=target ){ /* note (2) above */ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } sqlite3VdbeJumpHere(v, addrINR); break; } @@ -110817,7 +111385,9 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); if( inReg!=target ){ u8 op; - if( ALWAYS(pExpr) && ExprHasProperty(pExpr,EP_Subquery) ){ + if( ALWAYS(pExpr) + && (ExprHasProperty(pExpr,EP_Subquery) || pExpr->op==TK_REGISTER) + ){ op = OP_Copy; }else{ op = OP_SCopy; @@ -112002,9 +112572,11 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ int iAgg = pExpr->iAgg; Parse *pParse = pWalker->pParse; sqlite3 *db = pParse->db; + assert( iAgg>=0 ); if( pExpr->op!=TK_AGG_FUNCTION ){ - assert( iAgg>=0 && iAggnColumn ); - if( pAggInfo->aCol[iAgg].pCExpr==pExpr ){ + if( iAggnColumn + && pAggInfo->aCol[iAgg].pCExpr==pExpr + ){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aCol[iAgg].pCExpr = pExpr; @@ -112013,8 +112585,9 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ } }else{ assert( pExpr->op==TK_AGG_FUNCTION ); - assert( iAgg>=0 && iAggnFunc ); - if( pAggInfo->aFunc[iAgg].pFExpr==pExpr ){ + if( ALWAYS(iAggnFunc) + && pAggInfo->aFunc[iAgg].pFExpr==pExpr + ){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr; @@ -112164,7 +112737,12 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ } if( pIEpr==0 ) break; if( NEVER(!ExprUseYTab(pExpr)) ) break; - if( pExpr->pAggInfo!=0 ) break; /* Already resolved by outer context */ + for(i=0; inSrc; i++){ + if( pSrcList->a[0].iCursor==pIEpr->iDataCur ) break; + } + if( i>=pSrcList->nSrc ) break; + if( NEVER(pExpr->pAggInfo!=0) ) break; /* Resolved by outer context */ + if( pParse->nErr ){ return WRC_Abort; } /* If we reach this point, it means that expression pExpr can be ** translated into a reference to an index column as described by @@ -112175,6 +112753,9 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ tmp.iTable = pIEpr->iIdxCur; tmp.iColumn = pIEpr->iIdxCol; findOrCreateAggInfoColumn(pParse, pAggInfo, &tmp); + if( pParse->nErr ){ return WRC_Abort; } + assert( pAggInfo->aCol!=0 ); + assert( tmp.iAggnColumn ); pAggInfo->aCol[tmp.iAgg].pCExpr = pExpr; pExpr->pAggInfo = pAggInfo; pExpr->iAgg = tmp.iAgg; @@ -112198,7 +112779,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ } /* endif pExpr->iTable==pItem->iCursor */ } /* end loop over pSrcList */ } - return WRC_Prune; + return WRC_Continue; } case TK_AGG_FUNCTION: { if( (pNC->ncFlags & NC_InAggFunc)==0 @@ -112351,6 +112932,37 @@ SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse *pParse){ pParse->nRangeReg = 0; } +/* +** Make sure sufficient registers have been allocated so that +** iReg is a valid register number. +*/ +SQLITE_PRIVATE void sqlite3TouchRegister(Parse *pParse, int iReg){ + if( pParse->nMemnMem = iReg; +} + +#if defined(SQLITE_ENABLE_STAT4) || defined(SQLITE_DEBUG) +/* +** Return the latest reusable register in the set of all registers. +** The value returned is no less than iMin. If any register iMin or +** greater is in permanent use, then return one more than that last +** permanent register. +*/ +SQLITE_PRIVATE int sqlite3FirstAvailableRegister(Parse *pParse, int iMin){ + const ExprList *pList = pParse->pConstExpr; + if( pList ){ + int i; + for(i=0; inExpr; i++){ + if( pList->a[i].u.iConstExprReg>=iMin ){ + iMin = pList->a[i].u.iConstExprReg + 1; + } + } + } + pParse->nTempReg = 0; + pParse->nRangeReg = 0; + return iMin; +} +#endif /* SQLITE_ENABLE_STAT4 || SQLITE_DEBUG */ + /* ** Validate that no temporary register falls within the range of ** iFirst..iLast, inclusive. This routine is only call from within assert() @@ -112370,6 +112982,14 @@ SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse *pParse, int iFirst, int iLast){ return 0; } } + if( pParse->pConstExpr ){ + ExprList *pList = pParse->pConstExpr; + for(i=0; inExpr; i++){ + int iReg = pList->a[i].u.iConstExprReg; + if( iReg==0 ) continue; + if( iReg>=iFirst && iReg<=iLast ) return 0; + } + } return 1; } #endif /* SQLITE_DEBUG */ @@ -113657,6 +114277,19 @@ static int renameEditSql( return rc; } +/* +** Set all pEList->a[].fg.eEName fields in the expression-list to val. +*/ +static void renameSetENames(ExprList *pEList, int val){ + if( pEList ){ + int i; + for(i=0; inExpr; i++){ + assert( val==ENAME_NAME || pEList->a[i].fg.eEName==ENAME_NAME ); + pEList->a[i].fg.eEName = val; + } + } +} + /* ** Resolve all symbols in the trigger at pParse->pNewTrigger, assuming ** it was read from the schema of database zDb. Return SQLITE_OK if @@ -113704,7 +114337,17 @@ static int renameResolveTrigger(Parse *pParse){ pSrc = 0; rc = SQLITE_NOMEM; }else{ + /* pStep->pExprList contains an expression-list used for an UPDATE + ** statement. So the a[].zEName values are the RHS of the + ** " = " clauses of the UPDATE statement. So, before + ** running SelectPrep(), change all the eEName values in + ** pStep->pExprList to ENAME_SPAN (from their current value of + ** ENAME_NAME). This is to prevent any ids in ON() clauses that are + ** part of pSrc from being incorrectly resolved against the + ** a[].zEName values as if they were column aliases. */ + renameSetENames(pStep->pExprList, ENAME_SPAN); sqlite3SelectPrep(pParse, pSel, 0); + renameSetENames(pStep->pExprList, ENAME_NAME); rc = pParse->nErr ? SQLITE_ERROR : SQLITE_OK; assert( pStep->pExprList==0 || pStep->pExprList==pSel->pEList ); assert( pSrc==pSel->pSrc ); @@ -115653,11 +116296,15 @@ static void analyzeOneTable( int regIdxname = iMem++; /* Register containing index name */ int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ int regPrev = iMem; /* MUST BE LAST (see below) */ +#ifdef SQLITE_ENABLE_STAT4 + int doOnce = 1; /* Flag for a one-time computation */ +#endif #ifdef SQLITE_ENABLE_PREUPDATE_HOOK Table *pStat1 = 0; #endif - pParse->nMem = MAX(pParse->nMem, iMem); + sqlite3TouchRegister(pParse, iMem); + assert( sqlite3NoTempsInRange(pParse, regNewRowid, iMem) ); v = sqlite3GetVdbe(pParse); if( v==0 || NEVER(pTab==0) ){ return; @@ -115763,7 +116410,7 @@ static void analyzeOneTable( ** the regPrev array and a trailing rowid (the rowid slot is required ** when building a record to insert into the sample column of ** the sqlite_stat4 table. */ - pParse->nMem = MAX(pParse->nMem, regPrev+nColTest); + sqlite3TouchRegister(pParse, regPrev+nColTest); /* Open a read-only cursor on the index being analyzed. */ assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) ); @@ -115935,7 +116582,35 @@ static void analyzeOneTable( int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; - pParse->nMem = MAX(pParse->nMem, regCol+nCol); + if( doOnce ){ + int mxCol = nCol; + Index *pX; + + /* Compute the maximum number of columns in any index */ + for(pX=pTab->pIndex; pX; pX=pX->pNext){ + int nColX; /* Number of columns in pX */ + if( !HasRowid(pTab) && IsPrimaryKeyIndex(pX) ){ + nColX = pX->nKeyCol; + }else{ + nColX = pX->nColumn; + } + if( nColX>mxCol ) mxCol = nColX; + } + + /* Allocate space to compute results for the largest index */ + sqlite3TouchRegister(pParse, regCol+mxCol); + doOnce = 0; +#ifdef SQLITE_DEBUG + /* Verify that the call to sqlite3ClearTempRegCache() below + ** really is needed. + ** https://sqlite.org/forum/forumpost/83cb4a95a0 (2023-03-25) + */ + testcase( !sqlite3NoTempsInRange(pParse, regEq, regCol+mxCol) ); +#endif + sqlite3ClearTempRegCache(pParse); /* tag-20230325-1 */ + assert( sqlite3NoTempsInRange(pParse, regEq, regCol+mxCol) ); + } + assert( sqlite3NoTempsInRange(pParse, regEq, regCol+nCol) ); addrNext = sqlite3VdbeCurrentAddr(v); callStatGet(pParse, regStat, STAT_GET_ROWID, regSampleRowid); @@ -116016,6 +116691,11 @@ static void analyzeDatabase(Parse *pParse, int iDb){ for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ Table *pTab = (Table*)sqliteHashData(k); analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab); +#ifdef SQLITE_ENABLE_STAT4 + iMem = sqlite3FirstAvailableRegister(pParse, iMem); +#else + assert( iMem==sqlite3FirstAvailableRegister(pParse,iMem) ); +#endif } loadAnalysis(pParse, iDb); } @@ -116403,6 +117083,10 @@ static int loadStatTbl( pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); assert( pIdx==0 || pIdx->nSample==0 ); if( pIdx==0 ) continue; + if( pIdx->aSample!=0 ){ + /* The same index appears in sqlite_stat4 under multiple names */ + continue; + } assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 ); if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){ nIdxCol = pIdx->nKeyCol; @@ -116410,6 +117094,7 @@ static int loadStatTbl( nIdxCol = pIdx->nColumn; } pIdx->nSampleCol = nIdxCol; + pIdx->mxSample = nSample; nByte = sizeof(IndexSample) * nSample; nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ @@ -116449,6 +117134,11 @@ static int loadStatTbl( if( zIndex==0 ) continue; pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); if( pIdx==0 ) continue; + if( pIdx->nSample>=pIdx->mxSample ){ + /* Too many slots used because the same index appears in + ** sqlite_stat4 using multiple names */ + continue; + } /* This next condition is true if data has already been loaded from ** the sqlite_stat4 table. */ nCol = pIdx->nSampleCol; @@ -116492,11 +117182,12 @@ static int loadStat4(sqlite3 *db, const char *zDb){ const Table *pStat4; assert( db->lookaside.bDisable ); - if( (pStat4 = sqlite3FindTable(db, "sqlite_stat4", zDb))!=0 + if( OptimizationEnabled(db, SQLITE_Stat4) + && (pStat4 = sqlite3FindTable(db, "sqlite_stat4", zDb))!=0 && IsOrdinaryTable(pStat4) ){ rc = loadStatTbl(db, - "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx", + "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx COLLATE nocase", "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4", zDb ); @@ -118340,7 +119031,7 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ if( IsOrdinaryTable(pTable) ){ sqlite3FkDelete(db, pTable); } -#ifndef SQLITE_OMIT_VIRTUAL_TABLE +#ifndef SQLITE_OMIT_VIRTUALTABLE else if( IsVirtual(pTable) ){ sqlite3VtabClear(db, pTable); } @@ -123372,6 +124063,7 @@ SQLITE_PRIVATE void sqlite3SetTextEncoding(sqlite3 *db, u8 enc){ ** strings is BINARY. */ db->pDfltColl = sqlite3FindCollSeq(db, enc, sqlite3StrBINARY, 0); + sqlite3ExpirePreparedStatements(db, 1); } /* @@ -123843,13 +124535,15 @@ static int tabIsReadOnly(Parse *pParse, Table *pTab){ ** If pTab is writable but other errors have occurred -> return 1. ** If pTab is writable and no prior errors -> return 0; */ -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, Trigger *pTrigger){ if( tabIsReadOnly(pParse, pTab) ){ sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName); return 1; } #ifndef SQLITE_OMIT_VIEW - if( !viewOk && IsView(pTab) ){ + if( IsView(pTab) + && (pTrigger==0 || (pTrigger->bReturning && pTrigger->pNext==0)) + ){ sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); return 1; } @@ -124103,7 +124797,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( goto delete_from_cleanup; } - if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto delete_from_cleanup; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -126266,7 +126960,7 @@ static void trimFunc( /* ** The "unknown" function is automatically substituted in place of ** any unrecognized function name when doing an EXPLAIN or EXPLAIN QUERY PLAN -** when the SQLITE_ENABLE_UNKNOWN_FUNCTION compile-time option is used. +** when the SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION compile-time option is used. ** When the "sqlite3" command-line shell is built using this functionality, ** that allows an EXPLAIN or EXPLAIN QUERY PLAN for complex queries ** involving application-defined functions to be examined in a generic @@ -128569,22 +129263,22 @@ static Trigger *fkActionTrigger( if( action==OE_Restrict ){ int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - Token tFrom; - Token tDb; + SrcList *pSrc; Expr *pRaise; - tFrom.z = zFrom; - tFrom.n = nFrom; - tDb.z = db->aDb[iDb].zDbSName; - tDb.n = sqlite3Strlen30(tDb.z); - pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); if( pRaise ){ pRaise->affExpr = OE_Abort; } + pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); + if( pSrc ){ + assert( pSrc->nSrc==1 ); + pSrc->a[0].zName = sqlite3DbStrDup(db, zFrom); + pSrc->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + } pSelect = sqlite3SelectNew(pParse, sqlite3ExprListAppend(pParse, 0, pRaise), - sqlite3SrcListAppend(pParse, 0, &tDb, &tFrom), + pSrc, pWhere, 0, 0, 0, 0, 0 ); @@ -128800,46 +129494,48 @@ SQLITE_PRIVATE void sqlite3OpenTable( ** is managed along with the rest of the Index structure. It will be ** released when sqlite3DeleteIndex() is called. */ -SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ +static SQLITE_NOINLINE const char *computeIndexAffStr(sqlite3 *db, Index *pIdx){ + /* The first time a column affinity string for a particular index is + ** required, it is allocated and populated here. It is then stored as + ** a member of the Index structure for subsequent use. + ** + ** The column affinity string will eventually be deleted by + ** sqliteDeleteIndex() when the Index structure itself is cleaned + ** up. + */ + int n; + Table *pTab = pIdx->pTable; + pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); if( !pIdx->zColAff ){ - /* The first time a column affinity string for a particular index is - ** required, it is allocated and populated here. It is then stored as - ** a member of the Index structure for subsequent use. - ** - ** The column affinity string will eventually be deleted by - ** sqliteDeleteIndex() when the Index structure itself is cleaned - ** up. - */ - int n; - Table *pTab = pIdx->pTable; - pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); - if( !pIdx->zColAff ){ - sqlite3OomFault(db); - return 0; - } - for(n=0; nnColumn; n++){ - i16 x = pIdx->aiColumn[n]; - char aff; - if( x>=0 ){ - aff = pTab->aCol[x].affinity; - }else if( x==XN_ROWID ){ - aff = SQLITE_AFF_INTEGER; - }else{ - assert( x==XN_EXPR ); - assert( pIdx->bHasExpr ); - assert( pIdx->aColExpr!=0 ); - aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); - } - if( affSQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC; - pIdx->zColAff[n] = aff; + sqlite3OomFault(db); + return 0; + } + for(n=0; nnColumn; n++){ + i16 x = pIdx->aiColumn[n]; + char aff; + if( x>=0 ){ + aff = pTab->aCol[x].affinity; + }else if( x==XN_ROWID ){ + aff = SQLITE_AFF_INTEGER; + }else{ + assert( x==XN_EXPR ); + assert( pIdx->bHasExpr ); + assert( pIdx->aColExpr!=0 ); + aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); } - pIdx->zColAff[n] = 0; + if( affSQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC; + pIdx->zColAff[n] = aff; } - + pIdx->zColAff[n] = 0; + return pIdx->zColAff; +} +SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ + if( !pIdx->zColAff ) return computeIndexAffStr(db, pIdx); return pIdx->zColAff; } + /* ** Compute an affinity string for a table. Space is obtained ** from sqlite3DbMalloc(). The caller is responsible for freeing @@ -129524,7 +130220,7 @@ SQLITE_PRIVATE void sqlite3Insert( /* Cannot insert into a read-only table. */ - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto insert_cleanup; } @@ -129971,7 +130667,7 @@ SQLITE_PRIVATE void sqlite3Insert( } /* Copy the new data already generated. */ - assert( pTab->nNVCol>0 ); + assert( pTab->nNVCol>0 || pParse->nErr>0 ); sqlite3VdbeAddOp3(v, OP_Copy, regRowid+1, regCols+1, pTab->nNVCol-1); #ifndef SQLITE_OMIT_GENERATED_COLUMNS @@ -133334,7 +134030,11 @@ static int sqlite3LoadExtension( /* tag-20210611-1. Some dlopen() implementations will segfault if given ** an oversize filename. Most filesystems have a pathname limit of 4K, ** so limit the extension filename length to about twice that. - ** https://sqlite.org/forum/forumpost/08a0d6d9bf */ + ** https://sqlite.org/forum/forumpost/08a0d6d9bf + ** + ** Later (2023-03-25): Save an extra 6 bytes for the filename suffix. + ** See https://sqlite.org/forum/forumpost/24083b579d. + */ if( nMsg>SQLITE_MAX_PATHLEN ) goto extension_not_found; handle = sqlite3OsDlOpen(pVfs, zFile); @@ -133342,7 +134042,9 @@ static int sqlite3LoadExtension( for(ii=0; iiaDb[iDb].zDbSName; sqlite3CodeVerifySchema(pParse, iDb); sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; + sqlite3TouchRegister(pParse, pTab->nCol+regRow); sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead); sqlite3VdbeLoadString(v, regResult, pTab->zName); assert( IsOrdinaryTable(pTab) ); @@ -135878,7 +136580,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** regRow..regRow+n. If any of the child key values are NULL, this ** row cannot cause an FK violation. Jump directly to addrOk in ** this case. */ - if( regRow+pFK->nCol>pParse->nMem ) pParse->nMem = regRow+pFK->nCol; + sqlite3TouchRegister(pParse, regRow + pFK->nCol); for(j=0; jnCol; j++){ int iCol = aiCols ? aiCols[j] : pFK->aCol[j].iFrom; sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, iCol, regRow+j); @@ -136007,6 +136709,7 @@ SQLITE_PRIVATE void sqlite3Pragma( if( iDb>=0 && i!=iDb ) continue; sqlite3CodeVerifySchema(pParse, i); + pParse->okConstFactor = 0; /* tag-20230327-1 */ /* Do an integrity check of the B-Tree ** @@ -136042,7 +136745,7 @@ SQLITE_PRIVATE void sqlite3Pragma( aRoot[0] = cnt; /* Make sure sufficient number of registers have been allocated */ - pParse->nMem = MAX( pParse->nMem, 8+mxIdx ); + sqlite3TouchRegister(pParse, 8+mxIdx); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ @@ -136192,15 +136895,29 @@ SQLITE_PRIVATE void sqlite3Pragma( labelOk = sqlite3VdbeMakeLabel(pParse); if( pCol->notNull ){ /* (1) NOT NULL columns may not contain a NULL */ + int jmp3; int jmp2 = sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); - sqlite3VdbeChangeP5(v, 0x0f); VdbeCoverage(v); + if( p1<0 ){ + sqlite3VdbeChangeP5(v, 0x0f); /* INT, REAL, TEXT, or BLOB */ + jmp3 = jmp2; + }else{ + sqlite3VdbeChangeP5(v, 0x0d); /* INT, TEXT, or BLOB */ + /* OP_IsType does not detect NaN values in the database file + ** which should be treated as a NULL. So if the header type + ** is REAL, we have to load the actual data using OP_Column + ** to reliably determine if the value is a NULL. */ + sqlite3VdbeAddOp3(v, OP_Column, p1, p3, 3); + jmp3 = sqlite3VdbeAddOp2(v, OP_NotNull, 3, labelOk); + VdbeCoverage(v); + } zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, pCol->zCnName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); if( doTypeCheck ){ sqlite3VdbeGoto(v, labelError); sqlite3VdbeJumpHere(v, jmp2); + sqlite3VdbeJumpHere(v, jmp3); }else{ /* VDBE byte code will fall thru */ } @@ -136308,7 +137025,7 @@ SQLITE_PRIVATE void sqlite3Pragma( int jmp7; sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur+j, 3); jmp7 = sqlite3VdbeAddOp3(v, OP_Eq, 3, 0, r1+pIdx->nColumn-1); - VdbeCoverage(v); + VdbeCoverageNeverNull(v); sqlite3VdbeLoadString(v, 3, "rowid not at end-of-record for row "); sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); @@ -137514,7 +138231,9 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl #else encoding = SQLITE_UTF8; #endif - if( db->nVdbeActive>0 && encoding!=ENC(db) ){ + if( db->nVdbeActive>0 && encoding!=ENC(db) + && (db->mDbFlags & DBFLAG_Vacuum)==0 + ){ rc = SQLITE_LOCKED; goto initone_error_out; }else{ @@ -137908,7 +138627,11 @@ static int sqlite3Prepare( sParse.db = db; sParse.pReprepare = pReprepare; assert( ppStmt && *ppStmt==0 ); - if( db->mallocFailed ) sqlite3ErrorMsg(&sParse, "out of memory"); + if( db->mallocFailed ){ + sqlite3ErrorMsg(&sParse, "out of memory"); + db->errCode = rc = SQLITE_NOMEM; + goto end_prepare; + } assert( sqlite3_mutex_held(db->mutex) ); /* For a long-term use prepared statement avoid the use of @@ -138997,7 +139720,7 @@ static void pushOntoSorter( ** (2) All output columns are included in the sort record. In that ** case regData==regOrigData. ** (3) Some output columns are omitted from the sort record due to - ** the SQLITE_ENABLE_SORTER_REFERENCE optimization, or due to the + ** the SQLITE_ENABLE_SORTER_REFERENCES optimization, or due to the ** SQLITE_ECEL_OMITREF optimization, or due to the ** SortCtx.pDeferredRowLoad optimiation. In any of these cases ** regOrigData is 0 to prevent this routine from trying to copy @@ -140598,7 +141321,7 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( assert( (pSelect->selFlags & SF_Resolved)!=0 ); assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); - if( db->mallocFailed ) return; + if( db->mallocFailed || IN_RENAME_OBJECT ) return; while( pSelect->pPrior ) pSelect = pSelect->pPrior; a = pSelect->pEList->a; memset(&sNC, 0, sizeof(sNC)); @@ -140643,18 +141366,16 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( break; } } - } - } - if( zType ){ - i64 m = sqlite3Strlen30(zType); - n = sqlite3Strlen30(pCol->zCnName); - pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); - if( pCol->zCnName ){ - memcpy(&pCol->zCnName[n+1], zType, m+1); - pCol->colFlags |= COLFLAG_HASTYPE; - }else{ - testcase( pCol->colFlags & COLFLAG_HASTYPE ); - pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); + } + } + if( zType ){ + i64 m = sqlite3Strlen30(zType); + n = sqlite3Strlen30(pCol->zCnName); + pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); + pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); + if( pCol->zCnName ){ + memcpy(&pCol->zCnName[n+1], zType, m+1); + pCol->colFlags |= COLFLAG_HASTYPE; } } pColl = sqlite3ExprCollSeq(pParse, p); @@ -142521,8 +143242,7 @@ static int compoundHasDifferentAffinities(Select *p){ ** query or there are no RIGHT or FULL JOINs in any arm ** of the subquery. (This is a duplicate of condition (27b).) ** (17h) The corresponding result set expressions in all arms of the -** compound must have the same affinity. (See restriction (9) -** on the push-down optimization.) +** compound must have the same affinity. ** ** The parent and sub-query may contain WHERE clauses. Subject to ** rules (11), (13) and (14), they may also contain ORDER BY, @@ -143390,10 +144110,24 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** or EXCEPT, then all of the result set columns for all arms of ** the compound must use the BINARY collating sequence. ** -** (9) If the subquery is a compound, then all arms of the compound must -** have the same affinity. (This is the same as restriction (17h) -** for query flattening.) +** (9) All three of the following are true: ** +** (9a) The WHERE clause expression originates in the ON or USING clause +** of a join (either an INNER or an OUTER join), and +** +** (9b) The subquery is to the right of the ON/USING clause +** +** (9c) There is a RIGHT JOIN (or FULL JOIN) in between the ON/USING +** clause and the subquery. +** +** Without this restriction, the push-down optimization might move +** the ON/USING filter expression from the left side of a RIGHT JOIN +** over to the right side, which leads to incorrect answers. See +** also restriction (6) in sqlite3ExprIsSingleTableConstraint(). +** +** (10) The inner query is not the right-hand table of a RIGHT JOIN. +** +** (11) The subquery is not a VALUES clause ** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. @@ -143402,13 +144136,20 @@ static int pushDownWhereTerms( Parse *pParse, /* Parse context (for malloc() and error reporting) */ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ Expr *pWhere, /* The WHERE clause of the outer query */ - SrcItem *pSrc /* The subquery term of the outer FROM clause */ + SrcList *pSrcList, /* The complete from clause of the outer query */ + int iSrc /* Which FROM clause term to try to push into */ ){ Expr *pNew; + SrcItem *pSrc; /* The subquery FROM term into which WHERE is pushed */ int nChng = 0; + pSrc = &pSrcList->a[iSrc]; if( pWhere==0 ) return 0; - if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ) return 0; - if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ) return 0; + if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ){ + return 0; /* restrictions (2) and (11) */ + } + if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ){ + return 0; /* restrictions (10) */ + } if( pSubq->pPrior ){ Select *pSel; @@ -143424,9 +144165,6 @@ static int pushDownWhereTerms( if( pSel->pWin ) return 0; /* restriction (6b) */ #endif } - if( compoundHasDifferentAffinities(pSubq) ){ - return 0; /* restriction (9) */ - } if( notUnionAll ){ /* If any of the compound arms are connected using UNION, INTERSECT, ** or EXCEPT, then we must ensure that none of the columns use a @@ -143466,11 +144204,28 @@ static int pushDownWhereTerms( return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ - nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, pSrc); + nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, pSrcList, iSrc); pWhere = pWhere->pLeft; } -#if 0 /* Legacy code. Checks now done by sqlite3ExprIsTableConstraint() */ +#if 0 /* These checks now done by sqlite3ExprIsSingleTableConstraint() */ + if( ExprHasProperty(pWhere, EP_OuterON|EP_InnerON) /* (9a) */ + && (pSrcList->a[0].fg.jointype & JT_LTORJ)!=0 /* Fast pre-test of (9c) */ + ){ + int jj; + for(jj=0; jjw.iJoin==pSrcList->a[jj].iCursor ){ + /* If we reach this point, both (9a) and (9b) are satisfied. + ** The following loop checks (9c): + */ + for(jj++; jja[jj].fg.jointype & JT_RIGHT)!=0 ){ + return 0; /* restriction (9) */ + } + } + } + } + } if( isLeftJoin && (ExprHasProperty(pWhere,EP_OuterON)==0 || pWhere->w.iJoin!=iCursor) @@ -143484,7 +144239,7 @@ static int pushDownWhereTerms( } #endif - if( sqlite3ExprIsTableConstraint(pWhere, pSrc) ){ + if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc) ){ nChng++; pSubq->selFlags |= SF_PushDown; while( pSubq ){ @@ -143518,6 +144273,78 @@ static int pushDownWhereTerms( } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ +/* +** Check to see if a subquery contains result-set columns that are +** never used. If it does, change the value of those result-set columns +** to NULL so that they do not cause unnecessary work to compute. +** +** Return the number of column that were changed to NULL. +*/ +static int disableUnusedSubqueryResultColumns(SrcItem *pItem){ + int nCol; + Select *pSub; /* The subquery to be simplified */ + Select *pX; /* For looping over compound elements of pSub */ + Table *pTab; /* The table that describes the subquery */ + int j; /* Column number */ + int nChng = 0; /* Number of columns converted to NULL */ + Bitmask colUsed; /* Columns that may not be NULLed out */ + + assert( pItem!=0 ); + if( pItem->fg.isCorrelated || pItem->fg.isCte ){ + return 0; + } + assert( pItem->pTab!=0 ); + pTab = pItem->pTab; + assert( pItem->pSelect!=0 ); + pSub = pItem->pSelect; + assert( pSub->pEList->nExpr==pTab->nCol ); + if( (pSub->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){ + testcase( pSub->selFlags & SF_Distinct ); + testcase( pSub->selFlags & SF_Aggregate ); + return 0; + } + for(pX=pSub; pX; pX=pX->pPrior){ + if( pX->pPrior && pX->op!=TK_ALL ){ + /* This optimization does not work for compound subqueries that + ** use UNION, INTERSECT, or EXCEPT. Only UNION ALL is allowed. */ + return 0; + } +#ifndef SQLITE_OMIT_WINDOWFUNC + if( pX->pWin ){ + /* This optimization does not work for subqueries that use window + ** functions. */ + return 0; + } +#endif + } + colUsed = pItem->colUsed; + if( pSub->pOrderBy ){ + ExprList *pList = pSub->pOrderBy; + for(j=0; jnExpr; j++){ + u16 iCol = pList->a[j].u.x.iOrderByCol; + if( iCol>0 ){ + iCol--; + colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); + } + } + } + nCol = pTab->nCol; + for(j=0; jpPrior) { + Expr *pY = pX->pEList->a[j].pExpr; + if( pY->op==TK_NULL ) continue; + pY->op = TK_NULL; + ExprClearProperty(pY, EP_Skip|EP_Unlikely); + pX->selFlags |= SF_PushDown; + nChng++; + } + } + return nChng; +} + + /* ** The pFunc is the only aggregate function in the query. Check to see ** if the query is a candidate for the min/max optimization. @@ -144664,12 +145491,13 @@ static void optimizeAggregateUseOfIndexedExpr( assert( pSelect->pGroupBy!=0 ); pAggInfo->nColumn = pAggInfo->nAccumulator; if( ALWAYS(pAggInfo->nSortingColumn>0) ){ - if( pAggInfo->nColumn==0 ){ - pAggInfo->nSortingColumn = pSelect->pGroupBy->nExpr; - }else{ - pAggInfo->nSortingColumn = - pAggInfo->aCol[pAggInfo->nColumn-1].iSorterColumn+1; + int mx = pSelect->pGroupBy->nExpr - 1; + int j, k; + for(j=0; jnColumn; j++){ + k = pAggInfo->aCol[j].iSorterColumn; + if( k>mx ) mx = k; } + pAggInfo->nSortingColumn = mx+1; } analyzeAggFuncArgs(pAggInfo, pNC); #if TREETRACE_ENABLED @@ -144703,11 +145531,13 @@ static int aggregateIdxEprRefToColCallback(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_AGG_FUNCTION ) return WRC_Continue; if( pExpr->op==TK_IF_NULL_ROW ) return WRC_Continue; pAggInfo = pExpr->pAggInfo; - assert( pExpr->iAgg>=0 && pExpr->iAggnColumn ); + if( NEVER(pExpr->iAgg>=pAggInfo->nColumn) ) return WRC_Continue; + assert( pExpr->iAgg>=0 ); pCol = &pAggInfo->aCol[pExpr->iAgg]; pExpr->op = TK_AGG_COLUMN; pExpr->iTable = pCol->iTable; pExpr->iColumn = pCol->iColumn; + ExprClearProperty(pExpr, EP_Skip|EP_Collate); return WRC_Prune; } @@ -145061,7 +145891,6 @@ static void agginfoFree(sqlite3 *db, AggInfo *p){ sqlite3DbFreeNN(db, p); } -#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION /* ** Attempt to transform a query of the form ** @@ -145089,6 +145918,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */ if( p->pEList->nExpr!=1 ) return 0; /* Single result column */ if( p->pWhere ) return 0; + if( p->pHaving ) return 0; if( p->pGroupBy ) return 0; if( p->pOrderBy ) return 0; pExpr = p->pEList->a[0].pExpr; @@ -145108,7 +145938,8 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pSub->pWhere ) return 0; /* No WHERE clause */ if( pSub->pLimit ) return 0; /* No LIMIT clause */ if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ - pSub = pSub->pPrior; /* Repeat over compound */ + assert( pSub->pHaving==0 ); /* Due to the previous */ + pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); /* If we reach this point then it is OK to perform the transformation */ @@ -145151,7 +145982,6 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ #endif return 1; } -#endif /* SQLITE_COUNTOFVIEW_OPTIMIZATION */ /* ** If any term of pSrc, or any SF_NestedFrom sub-query, is not the same @@ -145407,7 +146237,7 @@ SQLITE_PRIVATE int sqlite3Select( pTabList->a[0].fg.jointype & JT_LTORJ); } - /* No futher action if this term of the FROM clause is no a subquery */ + /* No futher action if this term of the FROM clause is not a subquery */ if( pSub==0 ) continue; /* Catch mismatch in the declared columns of a view and the number of @@ -145540,14 +146370,12 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x2000,pParse,p,("Constant propagation not helpful\n")); } -#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView) && countOfViewOptimization(pParse, p) ){ if( db->mallocFailed ) goto select_end; pTabList = p->pSrc; } -#endif /* For each term in the FROM clause, do two things: ** (1) Authorized unreferenced tables @@ -145606,7 +146434,7 @@ SQLITE_PRIVATE int sqlite3Select( if( OptimizationEnabled(db, SQLITE_PushDown) && (pItem->fg.isCte==0 || (pItem->u2.pCteUse->eM10d!=M10d_Yes && pItem->u2.pCteUse->nUse<2)) - && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem) + && pushDownWhereTerms(pParse, pSub, p->pWhere, pTabList, i) ){ #if TREETRACE_ENABLED if( sqlite3TreeTrace & 0x4000 ){ @@ -145620,6 +146448,22 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x4000,pParse,p,("Push-down not possible\n")); } + /* Convert unused result columns of the subquery into simple NULL + ** expressions, to avoid unneeded searching and computation. + */ + if( OptimizationEnabled(db, SQLITE_NullUnusedCols) + && disableUnusedSubqueryResultColumns(pItem) + ){ +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x4000 ){ + TREETRACE(0x4000,pParse,p, + ("Change unused result columns to NULL for subquery %d:\n", + pSub->selId)); + sqlite3TreeViewSelect(0, p, 0); + } +#endif + } + zSavedAuthContext = pParse->zAuthContext; pParse->zAuthContext = pItem->zName; @@ -148157,6 +149001,9 @@ SQLITE_PRIVATE u32 sqlite3TriggerColmask( Trigger *p; assert( isNew==1 || isNew==0 ); + if( IsView(pTab) ){ + return 0xffffffff; + } for(p=pTrigger; p; p=p->pNext){ if( p->op==op && (tr_tm&p->tr_tm) @@ -148591,7 +149438,7 @@ SQLITE_PRIVATE void sqlite3Update( if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto update_cleanup; } - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto update_cleanup; } @@ -151382,7 +152229,10 @@ SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *db, int op, int iSavepoint){ break; } if( xMethod && pVTab->iSavepoint>iSavepoint ){ + u64 savedFlags = (db->flags & SQLITE_Defensive); + db->flags &= ~(u64)SQLITE_Defensive; rc = xMethod(pVTab->pVtab, iSavepoint); + db->flags |= savedFlags; } sqlite3VtabUnlock(pVTab); } @@ -151611,6 +152461,10 @@ SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){ p->pVTable->eVtabRisk = SQLITE_VTABRISK_High; break; } + case SQLITE_VTAB_USES_ALL_SCHEMAS: { + p->pVTable->bAllSchemas = 1; + break; + } default: { rc = SQLITE_MISUSE_BKPT; break; @@ -152384,9 +153238,9 @@ static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){ /* ** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was -** defined at compile-time. If it is not a no-op, a single OP_Explain opcode -** is added to the output to describe the table scan strategy in pLevel. +** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG +** was defined at compile-time. If it is not a no-op, a single OP_Explain +** opcode is added to the output to describe the table scan strategy in pLevel. ** ** If an OP_Explain opcode is added to the VM, its address is returned. ** Otherwise, if no OP_Explain is coded, zero is returned. @@ -152398,8 +153252,8 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ){ int ret = 0; -#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS) - if( sqlite3ParseToplevel(pParse)->explain==2 ) +#if !defined(SQLITE_DEBUG) + if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { SrcItem *pItem = &pTabList->a[pLevel->iFrom]; @@ -152565,27 +153419,29 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus( WhereLevel *pLvl, /* Level to add scanstatus() entry for */ int addrExplain /* Address of OP_Explain (or 0) */ ){ - const char *zObj = 0; - WhereLoop *pLoop = pLvl->pWLoop; - int wsFlags = pLoop->wsFlags; - int viaCoroutine = 0; - - if( (wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ - zObj = pLoop->u.btree.pIndex->zName; - }else{ - zObj = pSrclist->a[pLvl->iFrom].zName; - viaCoroutine = pSrclist->a[pLvl->iFrom].fg.viaCoroutine; - } - sqlite3VdbeScanStatus( - v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj - ); + if( IS_STMT_SCANSTATUS( sqlite3VdbeDb(v) ) ){ + const char *zObj = 0; + WhereLoop *pLoop = pLvl->pWLoop; + int wsFlags = pLoop->wsFlags; + int viaCoroutine = 0; - if( viaCoroutine==0 ){ - if( (wsFlags & (WHERE_MULTI_OR|WHERE_AUTO_INDEX))==0 ){ - sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iTabCur); + if( (wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ + zObj = pLoop->u.btree.pIndex->zName; + }else{ + zObj = pSrclist->a[pLvl->iFrom].zName; + viaCoroutine = pSrclist->a[pLvl->iFrom].fg.viaCoroutine; } - if( wsFlags & WHERE_INDEXED ){ - sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur); + sqlite3VdbeScanStatus( + v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj + ); + + if( viaCoroutine==0 ){ + if( (wsFlags & (WHERE_MULTI_OR|WHERE_AUTO_INDEX))==0 ){ + sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iTabCur); + } + if( wsFlags & WHERE_INDEXED ){ + sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur); + } } } } @@ -153282,11 +154138,12 @@ static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){ */ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ int rc = WRC_Continue; + int reg; struct CCurHint *pHint = pWalker->u.pCCurHint; if( pExpr->op==TK_COLUMN ){ if( pExpr->iTable!=pHint->iTabCur ){ - int reg = ++pWalker->pParse->nMem; /* Register for column value */ - sqlite3ExprCode(pWalker->pParse, pExpr, reg); + reg = ++pWalker->pParse->nMem; /* Register for column value */ + reg = sqlite3ExprCodeTarget(pWalker->pParse, pExpr, reg); pExpr->op = TK_REGISTER; pExpr->iTable = reg; }else if( pHint->pIdx!=0 ){ @@ -153294,15 +154151,15 @@ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ pExpr->iColumn = sqlite3TableColumnToIndex(pHint->pIdx, pExpr->iColumn); assert( pExpr->iColumn>=0 ); } - }else if( pExpr->op==TK_AGG_FUNCTION ){ - /* An aggregate function in the WHERE clause of a query means this must - ** be a correlated sub-query, and expression pExpr is an aggregate from - ** the parent context. Do not walk the function arguments in this case. - ** - ** todo: It should be possible to replace this node with a TK_REGISTER - ** expression, as the result of the expression must be stored in a - ** register at this point. The same holds for TK_AGG_COLUMN nodes. */ + }else if( pExpr->pAggInfo ){ rc = WRC_Prune; + reg = ++pWalker->pParse->nMem; /* Register for column value */ + reg = sqlite3ExprCodeTarget(pWalker->pParse, pExpr, reg); + pExpr->op = TK_REGISTER; + pExpr->iTable = reg; + }else if( pExpr->op==TK_TRUEFALSE ){ + /* Do not walk disabled expressions. tag-20230504-1 */ + return WRC_Prune; } return rc; } @@ -153404,7 +154261,7 @@ static void codeCursorHint( } if( pExpr!=0 ){ sWalker.xExprCallback = codeCursorHintFixExpr; - sqlite3WalkExpr(&sWalker, pExpr); + if( pParse->nErr==0 ) sqlite3WalkExpr(&sWalker, pExpr); sqlite3VdbeAddOp4(v, OP_CursorHint, (sHint.pIdx ? sHint.iIdxCur : sHint.iTabCur), 0, 0, (const char*)pExpr, P4_EXPR); @@ -154198,7 +155055,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** guess. */ addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan, (pIdx->aiRowLogEst[0]+9)/10); - if( pRangeStart ){ + if( pRangeStart || pRangeEnd ){ sqlite3VdbeChangeP5(v, 1); sqlite3VdbeChangeP2(v, addrSeekScan, sqlite3VdbeCurrentAddr(v)+1); addrSeekScan = 0; @@ -154239,16 +155096,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( assert( pLevel->p2==0 ); if( pRangeEnd ){ Expr *pRight = pRangeEnd->pExpr->pRight; - if( addrSeekScan ){ - /* For a seek-scan that has a range on the lowest term of the index, - ** we have to make the top of the loop be code that sets the end - ** condition of the range. Otherwise, the OP_SeekScan might jump - ** over that initialization, leaving the range-end value set to the - ** range-start value, resulting in a wrong answer. - ** See ticket 5981a8c041a3c2f3 (2021-11-02). - */ - pLevel->p2 = sqlite3VdbeCurrentAddr(v); - } + assert( addrSeekScan==0 ); codeExprOrVector(pParse, pRight, regBase+nEq, nTop); whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd); if( (pRangeEnd->wtFlags & TERM_VNULL)==0 @@ -154282,7 +155130,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( zEndAff ) sqlite3DbNNFreeNN(db, zEndAff); /* Top of the loop body */ - if( pLevel->p2==0 ) pLevel->p2 = sqlite3VdbeCurrentAddr(v); + pLevel->p2 = sqlite3VdbeCurrentAddr(v); /* Check if the index cursor is past the end of the range. */ if( nConstraint ){ @@ -156279,7 +157127,7 @@ static void exprAnalyze( && 0==sqlite3ExprCanBeNull(pLeft) ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); - pExpr->op = TK_TRUEFALSE; + pExpr->op = TK_TRUEFALSE; /* See tag-20230504-1 */ pExpr->u.zToken = "false"; ExprSetProperty(pExpr, EP_IsFalse); pTerm->prereqAll = 0; @@ -156924,9 +157772,12 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( pRhs = sqlite3PExpr(pParse, TK_UPLUS, sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0); pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, pRhs); - if( pItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT) ){ + if( pItem->fg.jointype & (JT_LEFT|JT_RIGHT) ){ + testcase( pItem->fg.jointype & JT_LEFT ); /* testtag-20230227a */ + testcase( pItem->fg.jointype & JT_RIGHT ); /* testtag-20230227b */ joinType = EP_OuterON; }else{ + testcase( pItem->fg.jointype & JT_LTORJ ); /* testtag-20230227c */ joinType = EP_InnerON; } sqlite3SetJoinExpr(pTerm, pItem->iCursor, joinType); @@ -157769,7 +158620,7 @@ static void explainAutomaticIndex( int bPartial, /* True if pIdx is a partial index */ int *pAddrExplain /* OUT: Address of OP_Explain */ ){ - if( pParse->explain!=2 ){ + if( IS_STMT_SCANSTATUS(pParse->db) && pParse->explain!=2 ){ Table *pTab = pIdx->pTable; const char *zSep = ""; char *zText = 0; @@ -157808,8 +158659,7 @@ static void explainAutomaticIndex( */ static SQLITE_NOINLINE void constructAutomaticIndex( Parse *pParse, /* The parsing context */ - const WhereClause *pWC, /* The WHERE clause */ - const SrcItem *pSrc, /* The FROM clause term to get the next index */ + WhereClause *pWC, /* The WHERE clause */ const Bitmask notReady, /* Mask of cursors that are not available */ WhereLevel *pLevel /* Write new index here */ ){ @@ -157830,10 +158680,12 @@ static SQLITE_NOINLINE void constructAutomaticIndex( char *zNotUsed; /* Extra space on the end of pIdx */ Bitmask idxCols; /* Bitmap of columns used for indexing */ Bitmask extraCols; /* Bitmap of additional columns */ - u8 sentWarning = 0; /* True if a warnning has been issued */ + u8 sentWarning = 0; /* True if a warning has been issued */ + u8 useBloomFilter = 0; /* True to also add a Bloom filter */ Expr *pPartial = 0; /* Partial Index Expression */ int iContinue = 0; /* Jump here to skip excluded rows */ - SrcItem *pTabItem; /* FROM clause term being indexed */ + SrcList *pTabList; /* The complete FROM clause */ + SrcItem *pSrc; /* The FROM clause term to get the next index */ int addrCounter = 0; /* Address where integer counter is initialized */ int regBase; /* Array of registers where record is assembled */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS @@ -157849,6 +158701,8 @@ static SQLITE_NOINLINE void constructAutomaticIndex( /* Count the number of columns that will be added to the index ** and used to match WHERE clause constraints */ nKeyCol = 0; + pTabList = pWC->pWInfo->pTabList; + pSrc = &pTabList->a[pLevel->iFrom]; pTable = pSrc->pTab; pWCEnd = &pWC->a[pWC->nTerm]; pLoop = pLevel->pWLoop; @@ -157859,7 +158713,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** WHERE clause (or the ON clause of a LEFT join) that constrain which ** rows of the target table (pSrc) that can be used. */ if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsTableConstraint(pExpr, pSrc) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom) ){ pPartial = sqlite3ExprAnd(pParse, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0)); @@ -157900,7 +158754,11 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** original table changes and the index and table cannot both be used ** if they go out of sync. */ - extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); + if( IsView(pTable) ){ + extraCols = ALLBITS; + }else{ + extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); + } mxBitCol = MIN(BMS-1,pTable->nCol); testcase( pTable->nCol==BMS-1 ); testcase( pTable->nCol==BMS-2 ); @@ -157936,6 +158794,16 @@ static SQLITE_NOINLINE void constructAutomaticIndex( assert( pColl!=0 || pParse->nErr>0 ); /* TH3 collate01.800 */ pIdx->azColl[n] = pColl ? pColl->zName : sqlite3StrBINARY; n++; + if( ALWAYS(pX->pLeft!=0) + && sqlite3ExprAffinity(pX->pLeft)!=SQLITE_AFF_TEXT + ){ + /* TUNING: only use a Bloom filter on an automatic index + ** if one or more key columns has the ability to hold numeric + ** values, since strings all have the same hash in the Bloom + ** filter implementation and hence a Bloom filter on a text column + ** is not usually helpful. */ + useBloomFilter = 1; + } } } } @@ -157968,20 +158836,21 @@ static SQLITE_NOINLINE void constructAutomaticIndex( sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "for %s", pTable->zName)); - if( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ){ + if( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) && useBloomFilter ){ + sqlite3WhereExplainBloomFilter(pParse, pWC->pWInfo, pLevel); pLevel->regFilter = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Blob, 10000, pLevel->regFilter); } /* Fill the automatic index with content */ - pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom]; - if( pTabItem->fg.viaCoroutine ){ - int regYield = pTabItem->regReturn; + assert( pSrc == &pWC->pWInfo->pTabList->a[pLevel->iFrom] ); + if( pSrc->fg.viaCoroutine ){ + int regYield = pSrc->regReturn; addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0); - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSrc->addrFillSub); addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pTabItem->pTab->zName)); + VdbeComment((v, "next row of %s", pSrc->pTab->zName)); }else{ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); } @@ -158002,14 +158871,14 @@ static SQLITE_NOINLINE void constructAutomaticIndex( sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); - if( pTabItem->fg.viaCoroutine ){ + if( pSrc->fg.viaCoroutine ){ sqlite3VdbeChangeP2(v, addrCounter, regBase+n); testcase( pParse->db->mallocFailed ); assert( pLevel->iIdxCur>0 ); translateColumnToCopy(pParse, addrTop, pLevel->iTabCur, - pTabItem->regResult, pLevel->iIdxCur); + pSrc->regResult, pLevel->iIdxCur); sqlite3VdbeGoto(v, addrTop); - pTabItem->fg.viaCoroutine = 0; + pSrc->fg.viaCoroutine = 0; }else{ sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX); @@ -158072,9 +158941,11 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); do{ + const SrcList *pTabList; const SrcItem *pItem; const Table *pTab; u64 sz; + int iSrc; sqlite3WhereExplainBloomFilter(pParse, pWInfo, pLevel); addrCont = sqlite3VdbeMakeLabel(pParse); iCur = pLevel->iTabCur; @@ -158088,7 +158959,9 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( ** testing complicated. By basing the blob size on the value in the ** sqlite_stat1 table, testing is much easier. */ - pItem = &pWInfo->pTabList->a[pLevel->iFrom]; + pTabList = pWInfo->pTabList; + iSrc = pLevel->iFrom; + pItem = &pTabList->a[iSrc]; assert( pItem!=0 ); pTab = pItem->pTab; assert( pTab!=0 ); @@ -158105,7 +158978,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( for(pTerm=pWInfo->sWC.a; pTermpExpr; if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsTableConstraint(pExpr, pItem) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc) ){ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); } @@ -158409,6 +159282,9 @@ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg); } } + if( pTab->u.vtab.p->bAllSchemas ){ + sqlite3VtabUsesAllSchemas(pParse); + } sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = 0; return rc; @@ -158939,7 +159815,7 @@ static int whereRangeScanEst( UNUSED_PARAMETER(pBuilder); assert( pLower || pUpper ); #endif - assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 ); + assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 || pParse->nErr>0 ); nNew = whereRangeAdjust(pLower, nOut); nNew = whereRangeAdjust(pUpper, nNew); @@ -161040,8 +161916,6 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info *pIdxInfo){ return pHidden->eDistinct; } -#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ - && !defined(SQLITE_OMIT_VIRTUALTABLE) /* ** Cause the prepared statement that is associated with a call to ** xBestIndex to potentially use all schemas. If the statement being @@ -161051,9 +161925,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info *pIdxInfo){ ** ** This is used by the (built-in) sqlite_dbpage virtual table. */ -SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info *pIdxInfo){ - HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; - Parse *pParse = pHidden->pParse; +SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(Parse *pParse){ int nDb = pParse->db->nDb; int i; for(i=0; inLevel>=2 ); @@ -162485,6 +163364,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( if( pWInfo->pOrderBy ){ tabUsed |= sqlite3WhereExprListUsage(&pWInfo->sMaskSet, pWInfo->pOrderBy); } + hasRightJoin = (pWInfo->pTabList->a[0].fg.jointype & JT_LTORJ)!=0; for(i=pWInfo->nLevel-1; i>=1; i--){ WhereTerm *pTerm, *pEnd; SrcItem *pItem; @@ -162507,6 +163387,12 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( break; } } + if( hasRightJoin + && ExprHasProperty(pTerm->pExpr, EP_InnerON) + && pTerm->pExpr->w.iJoin==pItem->iCursor + ){ + break; /* restriction (5) */ + } } if( pTerm drop loop %c not used\n", pLoop->cId)); @@ -162906,22 +163792,45 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } if( pParse->nErr ) goto whereBeginError; - /* Special case: WHERE terms that do not refer to any tables in the join - ** (constant expressions). Evaluate each such term, and jump over all the - ** generated code if the result is not true. + /* The False-WHERE-Term-Bypass optimization: + ** + ** If there are WHERE terms that are false, then no rows will be output, + ** so skip over all of the code generated here. + ** + ** Conditions: + ** + ** (1) The WHERE term must not refer to any tables in the join. + ** (2) The term must not come from an ON clause on the + ** right-hand side of a LEFT or FULL JOIN. + ** (3) The term must not come from an ON clause, or there must be + ** no RIGHT or FULL OUTER joins in pTabList. + ** (4) If the expression contains non-deterministic functions + ** that are not within a sub-select. This is not required + ** for correctness but rather to preserves SQLite's legacy + ** behaviour in the following two cases: ** - ** Do not do this if the expression contains non-deterministic functions - ** that are not within a sub-select. This is not strictly required, but - ** preserves SQLite's legacy behaviour in the following two cases: + ** WHERE random()>0; -- eval random() once per row + ** WHERE (SELECT random())>0; -- eval random() just once overall ** - ** FROM ... WHERE random()>0; -- eval random() once per row - ** FROM ... WHERE (SELECT random())>0; -- eval random() once overall + ** Note that the Where term need not be a constant in order for this + ** optimization to apply, though it does need to be constant relative to + ** the current subquery (condition 1). The term might include variables + ** from outer queries so that the value of the term changes from one + ** invocation of the current subquery to the next. */ for(ii=0; iinBase; ii++){ - WhereTerm *pT = &sWLB.pWC->a[ii]; + WhereTerm *pT = &sWLB.pWC->a[ii]; /* A term of the WHERE clause */ + Expr *pX; /* The expression of pT */ if( pT->wtFlags & TERM_VIRTUAL ) continue; - if( pT->prereqAll==0 && (nTabList==0 || exprIsDeterministic(pT->pExpr)) ){ - sqlite3ExprIfFalse(pParse, pT->pExpr, pWInfo->iBreak, SQLITE_JUMPIFNULL); + pX = pT->pExpr; + assert( pX!=0 ); + assert( pT->prereqAll!=0 || !ExprHasProperty(pX, EP_OuterON) ); + if( pT->prereqAll==0 /* Conditions (1) and (2) */ + && (nTabList==0 || exprIsDeterministic(pX)) /* Condition (4) */ + && !(ExprHasProperty(pX, EP_InnerON) /* Condition (3) */ + && (pTabList->a[0].fg.jointype & JT_LTORJ)!=0 ) + ){ + sqlite3ExprIfFalse(pParse, pX, pWInfo->iBreak, SQLITE_JUMPIFNULL); pT->wtFlags |= TERM_CODED; } } @@ -163164,7 +164073,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( assert( n<=pTab->nCol ); } #ifdef SQLITE_ENABLE_CURSOR_HINTS - if( pLoop->u.btree.pIndex!=0 ){ + if( pLoop->u.btree.pIndex!=0 && (pTab->tabFlags & TF_WithoutRowid)==0 ){ sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ|bFordelete); }else #endif @@ -163301,11 +164210,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sqlite3VdbeJumpHere(v, iOnce); } } + assert( pTabList == pWInfo->pTabList ); if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){ if( (wsFlags & WHERE_AUTO_INDEX)!=0 ){ #ifndef SQLITE_OMIT_AUTOMATIC_INDEX - constructAutomaticIndex(pParse, &pWInfo->sWC, - &pTabList->a[pLevel->iFrom], notReady, pLevel); + constructAutomaticIndex(pParse, &pWInfo->sWC, notReady, pLevel); #endif }else{ sqlite3ConstructBloomFilter(pWInfo, ii, pLevel, notReady); @@ -163622,7 +164531,8 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ k = pLevel->addrBody + 1; #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeAddopTrace ){ - printf("TRANSLATE opcodes in range %d..%d\n", k, last-1); + printf("TRANSLATE cursor %d->%d in opcode range %d..%d\n", + pLevel->iTabCur, pLevel->iIdxCur, k, last-1); } /* Proof that the "+1" on the k value above is safe */ pOp = sqlite3VdbeGetOp(v, k - 1); @@ -164497,6 +165407,7 @@ static int selectWindowRewriteExprCb(Walker *pWalker, Expr *pExpr){ } /* no break */ deliberate_fall_through + case TK_IF_NULL_ROW: case TK_AGG_FUNCTION: case TK_COLUMN: { int iCol = -1; @@ -167325,18 +168236,18 @@ typedef union { #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 580 -#define YYNRULE 405 -#define YYNRULE_WITH_ACTION 342 +#define YYNSTATE 579 +#define YYNRULE 403 +#define YYNRULE_WITH_ACTION 340 #define YYNTOKEN 185 -#define YY_MAX_SHIFT 579 -#define YY_MIN_SHIFTREDUCE 839 -#define YY_MAX_SHIFTREDUCE 1243 -#define YY_ERROR_ACTION 1244 -#define YY_ACCEPT_ACTION 1245 -#define YY_NO_ACTION 1246 -#define YY_MIN_REDUCE 1247 -#define YY_MAX_REDUCE 1651 +#define YY_MAX_SHIFT 578 +#define YY_MIN_SHIFTREDUCE 837 +#define YY_MAX_SHIFTREDUCE 1239 +#define YY_ERROR_ACTION 1240 +#define YY_ACCEPT_ACTION 1241 +#define YY_NO_ACTION 1242 +#define YY_MIN_REDUCE 1243 +#define YY_MAX_REDUCE 1645 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -167403,219 +168314,218 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2101) +#define YY_ACTTAB_COUNT (2096) static const YYACTIONTYPE yy_action[] = { /* 0 */ 572, 208, 572, 118, 115, 229, 572, 118, 115, 229, - /* 10 */ 572, 1318, 381, 1297, 412, 566, 566, 566, 572, 413, - /* 20 */ 382, 1318, 1280, 41, 41, 41, 41, 208, 1530, 71, - /* 30 */ 71, 975, 423, 41, 41, 495, 303, 279, 303, 976, - /* 40 */ 401, 71, 71, 125, 126, 80, 1221, 1221, 1054, 1057, - /* 50 */ 1044, 1044, 123, 123, 124, 124, 124, 124, 480, 413, - /* 60 */ 1245, 1, 1, 579, 2, 1249, 554, 118, 115, 229, - /* 70 */ 317, 484, 146, 484, 528, 118, 115, 229, 533, 1331, - /* 80 */ 421, 527, 142, 125, 126, 80, 1221, 1221, 1054, 1057, - /* 90 */ 1044, 1044, 123, 123, 124, 124, 124, 124, 118, 115, + /* 10 */ 572, 1314, 381, 1293, 412, 566, 566, 566, 572, 413, + /* 20 */ 382, 1314, 1276, 41, 41, 41, 41, 208, 1524, 71, + /* 30 */ 71, 973, 423, 41, 41, 495, 303, 279, 303, 974, + /* 40 */ 401, 71, 71, 125, 126, 80, 1216, 1216, 1051, 1054, + /* 50 */ 1041, 1041, 123, 123, 124, 124, 124, 124, 480, 413, + /* 60 */ 1241, 1, 1, 578, 2, 1245, 554, 118, 115, 229, + /* 70 */ 317, 484, 146, 484, 528, 118, 115, 229, 533, 1327, + /* 80 */ 421, 527, 142, 125, 126, 80, 1216, 1216, 1051, 1054, + /* 90 */ 1041, 1041, 123, 123, 124, 124, 124, 124, 118, 115, /* 100 */ 229, 327, 122, 122, 122, 122, 121, 121, 120, 120, /* 110 */ 120, 119, 116, 448, 284, 284, 284, 284, 446, 446, - /* 120 */ 446, 1571, 380, 1573, 1196, 379, 1167, 569, 1167, 569, - /* 130 */ 413, 1571, 541, 259, 226, 448, 101, 145, 453, 316, + /* 120 */ 446, 1565, 380, 1567, 1192, 379, 1163, 569, 1163, 569, + /* 130 */ 413, 1565, 541, 259, 226, 448, 101, 145, 453, 316, /* 140 */ 563, 240, 122, 122, 122, 122, 121, 121, 120, 120, - /* 150 */ 120, 119, 116, 448, 125, 126, 80, 1221, 1221, 1054, - /* 160 */ 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, 142, - /* 170 */ 294, 1196, 343, 452, 120, 120, 120, 119, 116, 448, - /* 180 */ 127, 1196, 1197, 1198, 148, 445, 444, 572, 119, 116, + /* 150 */ 120, 119, 116, 448, 125, 126, 80, 1216, 1216, 1051, + /* 160 */ 1054, 1041, 1041, 123, 123, 124, 124, 124, 124, 142, + /* 170 */ 294, 1192, 343, 452, 120, 120, 120, 119, 116, 448, + /* 180 */ 127, 1192, 1193, 1192, 148, 445, 444, 572, 119, 116, /* 190 */ 448, 124, 124, 124, 124, 117, 122, 122, 122, 122, /* 200 */ 121, 121, 120, 120, 120, 119, 116, 448, 458, 113, /* 210 */ 13, 13, 550, 122, 122, 122, 122, 121, 121, 120, - /* 220 */ 120, 120, 119, 116, 448, 426, 316, 563, 1196, 1197, - /* 230 */ 1198, 149, 1228, 413, 1228, 124, 124, 124, 124, 122, + /* 220 */ 120, 120, 119, 116, 448, 426, 316, 563, 1192, 1193, + /* 230 */ 1192, 149, 1224, 413, 1224, 124, 124, 124, 124, 122, /* 240 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116, - /* 250 */ 448, 469, 346, 1041, 1041, 1055, 1058, 125, 126, 80, - /* 260 */ 1221, 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, - /* 270 */ 124, 124, 1283, 526, 222, 1196, 572, 413, 224, 518, + /* 250 */ 448, 469, 346, 1038, 1038, 1052, 1055, 125, 126, 80, + /* 260 */ 1216, 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, + /* 270 */ 124, 124, 1279, 526, 222, 1192, 572, 413, 224, 518, /* 280 */ 175, 82, 83, 122, 122, 122, 122, 121, 121, 120, - /* 290 */ 120, 120, 119, 116, 448, 1011, 16, 16, 1196, 133, - /* 300 */ 133, 125, 126, 80, 1221, 1221, 1054, 1057, 1044, 1044, + /* 290 */ 120, 120, 119, 116, 448, 1009, 16, 16, 1192, 133, + /* 300 */ 133, 125, 126, 80, 1216, 1216, 1051, 1054, 1041, 1041, /* 310 */ 123, 123, 124, 124, 124, 124, 122, 122, 122, 122, - /* 320 */ 121, 121, 120, 120, 120, 119, 116, 448, 1045, 550, - /* 330 */ 1196, 377, 1196, 1197, 1198, 252, 1438, 403, 508, 505, - /* 340 */ 504, 111, 564, 570, 4, 930, 930, 437, 503, 344, - /* 350 */ 464, 330, 364, 398, 1241, 1196, 1197, 1198, 567, 572, + /* 320 */ 121, 121, 120, 120, 120, 119, 116, 448, 1042, 550, + /* 330 */ 1192, 377, 1192, 1193, 1192, 252, 1433, 403, 508, 505, + /* 340 */ 504, 111, 564, 570, 4, 928, 928, 437, 503, 344, + /* 350 */ 464, 330, 364, 398, 1237, 1192, 1193, 1192, 567, 572, /* 360 */ 122, 122, 122, 122, 121, 121, 120, 120, 120, 119, - /* 370 */ 116, 448, 284, 284, 373, 1584, 1611, 445, 444, 154, - /* 380 */ 413, 449, 71, 71, 1290, 569, 1225, 1196, 1197, 1198, - /* 390 */ 85, 1227, 271, 561, 547, 519, 1565, 572, 98, 1226, - /* 400 */ 6, 1282, 476, 142, 125, 126, 80, 1221, 1221, 1054, - /* 410 */ 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, 554, - /* 420 */ 13, 13, 1031, 511, 1228, 1196, 1228, 553, 109, 109, - /* 430 */ 222, 572, 1242, 175, 572, 431, 110, 197, 449, 574, - /* 440 */ 573, 434, 1556, 1021, 325, 555, 1196, 270, 287, 372, + /* 370 */ 116, 448, 284, 284, 373, 1578, 1604, 445, 444, 154, + /* 380 */ 413, 449, 71, 71, 1286, 569, 1221, 1192, 1193, 1192, + /* 390 */ 85, 1223, 271, 561, 547, 519, 1559, 572, 98, 1222, + /* 400 */ 6, 1278, 476, 142, 125, 126, 80, 1216, 1216, 1051, + /* 410 */ 1054, 1041, 1041, 123, 123, 124, 124, 124, 124, 554, + /* 420 */ 13, 13, 1028, 511, 1224, 1192, 1224, 553, 109, 109, + /* 430 */ 222, 572, 1238, 175, 572, 431, 110, 197, 449, 573, + /* 440 */ 449, 434, 1550, 1018, 325, 555, 1192, 270, 287, 372, /* 450 */ 514, 367, 513, 257, 71, 71, 547, 71, 71, 363, - /* 460 */ 316, 563, 1617, 122, 122, 122, 122, 121, 121, 120, - /* 470 */ 120, 120, 119, 116, 448, 1021, 1021, 1023, 1024, 27, - /* 480 */ 284, 284, 1196, 1197, 1198, 1162, 572, 1616, 413, 905, - /* 490 */ 190, 554, 360, 569, 554, 941, 537, 521, 1162, 520, - /* 500 */ 417, 1162, 556, 1196, 1197, 1198, 572, 548, 1558, 51, - /* 510 */ 51, 214, 125, 126, 80, 1221, 1221, 1054, 1057, 1044, - /* 520 */ 1044, 123, 123, 124, 124, 124, 124, 1196, 478, 135, - /* 530 */ 135, 413, 284, 284, 1494, 509, 121, 121, 120, 120, - /* 540 */ 120, 119, 116, 448, 1011, 569, 522, 217, 545, 1565, - /* 550 */ 316, 563, 142, 6, 536, 125, 126, 80, 1221, 1221, - /* 560 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, - /* 570 */ 1559, 122, 122, 122, 122, 121, 121, 120, 120, 120, - /* 580 */ 119, 116, 448, 489, 1196, 1197, 1198, 486, 281, 1271, - /* 590 */ 961, 252, 1196, 377, 508, 505, 504, 1196, 344, 575, - /* 600 */ 1196, 575, 413, 292, 503, 961, 880, 191, 484, 316, + /* 460 */ 316, 563, 1610, 122, 122, 122, 122, 121, 121, 120, + /* 470 */ 120, 120, 119, 116, 448, 1018, 1018, 1020, 1021, 27, + /* 480 */ 284, 284, 1192, 1193, 1192, 1158, 572, 1609, 413, 903, + /* 490 */ 190, 554, 360, 569, 554, 939, 537, 521, 1158, 520, + /* 500 */ 417, 1158, 556, 1192, 1193, 1192, 572, 548, 1552, 51, + /* 510 */ 51, 214, 125, 126, 80, 1216, 1216, 1051, 1054, 1041, + /* 520 */ 1041, 123, 123, 124, 124, 124, 124, 1192, 478, 135, + /* 530 */ 135, 413, 284, 284, 1488, 509, 121, 121, 120, 120, + /* 540 */ 120, 119, 116, 448, 1009, 569, 522, 217, 545, 1559, + /* 550 */ 316, 563, 142, 6, 536, 125, 126, 80, 1216, 1216, + /* 560 */ 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, 124, + /* 570 */ 1553, 122, 122, 122, 122, 121, 121, 120, 120, 120, + /* 580 */ 119, 116, 448, 489, 1192, 1193, 1192, 486, 281, 1267, + /* 590 */ 959, 252, 1192, 377, 508, 505, 504, 1192, 344, 574, + /* 600 */ 1192, 574, 413, 292, 503, 959, 878, 191, 484, 316, /* 610 */ 563, 388, 290, 384, 122, 122, 122, 122, 121, 121, - /* 620 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221, - /* 630 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, - /* 640 */ 124, 413, 398, 1140, 1196, 873, 100, 284, 284, 1196, - /* 650 */ 1197, 1198, 377, 1097, 1196, 1197, 1198, 1196, 1197, 1198, - /* 660 */ 569, 459, 32, 377, 233, 125, 126, 80, 1221, 1221, - /* 670 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, - /* 680 */ 1437, 963, 572, 228, 962, 122, 122, 122, 122, 121, - /* 690 */ 121, 120, 120, 120, 119, 116, 448, 1162, 228, 1196, - /* 700 */ 157, 1196, 1197, 1198, 1557, 13, 13, 301, 961, 1236, - /* 710 */ 1162, 153, 413, 1162, 377, 1587, 1180, 5, 373, 1584, - /* 720 */ 433, 1242, 3, 961, 122, 122, 122, 122, 121, 121, - /* 730 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221, - /* 740 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, - /* 750 */ 124, 413, 208, 571, 1196, 1032, 1196, 1197, 1198, 1196, - /* 760 */ 392, 856, 155, 1556, 286, 406, 1102, 1102, 492, 572, - /* 770 */ 469, 346, 1323, 1323, 1556, 125, 126, 80, 1221, 1221, - /* 780 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, + /* 620 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1216, + /* 630 */ 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, + /* 640 */ 124, 413, 398, 1136, 1192, 871, 100, 284, 284, 1192, + /* 650 */ 1193, 1192, 377, 1093, 1192, 1193, 1192, 1192, 1193, 1192, + /* 660 */ 569, 459, 32, 377, 233, 125, 126, 80, 1216, 1216, + /* 670 */ 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, 124, + /* 680 */ 1432, 961, 572, 228, 960, 122, 122, 122, 122, 121, + /* 690 */ 121, 120, 120, 120, 119, 116, 448, 1158, 228, 1192, + /* 700 */ 157, 1192, 1193, 1192, 1551, 13, 13, 301, 959, 1232, + /* 710 */ 1158, 153, 413, 1158, 377, 1581, 1176, 5, 373, 1578, + /* 720 */ 433, 1238, 3, 959, 122, 122, 122, 122, 121, 121, + /* 730 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1216, + /* 740 */ 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, + /* 750 */ 124, 413, 208, 571, 1192, 1029, 1192, 1193, 1192, 1192, + /* 760 */ 392, 854, 155, 1550, 286, 406, 1098, 1098, 492, 572, + /* 770 */ 469, 346, 1319, 1319, 1550, 125, 126, 80, 1216, 1216, + /* 780 */ 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, 124, /* 790 */ 129, 572, 13, 13, 378, 122, 122, 122, 122, 121, /* 800 */ 121, 120, 120, 120, 119, 116, 448, 302, 572, 457, - /* 810 */ 532, 1196, 1197, 1198, 13, 13, 1196, 1197, 1198, 1301, - /* 820 */ 467, 1271, 413, 1321, 1321, 1556, 1016, 457, 456, 200, - /* 830 */ 299, 71, 71, 1269, 122, 122, 122, 122, 121, 121, - /* 840 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221, - /* 850 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, - /* 860 */ 124, 413, 227, 1077, 1162, 284, 284, 423, 312, 278, - /* 870 */ 278, 285, 285, 1423, 410, 409, 386, 1162, 569, 572, - /* 880 */ 1162, 1200, 569, 1604, 569, 125, 126, 80, 1221, 1221, - /* 890 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, - /* 900 */ 457, 1486, 13, 13, 1540, 122, 122, 122, 122, 121, + /* 810 */ 532, 1192, 1193, 1192, 13, 13, 1192, 1193, 1192, 1297, + /* 820 */ 467, 1267, 413, 1317, 1317, 1550, 1014, 457, 456, 200, + /* 830 */ 299, 71, 71, 1265, 122, 122, 122, 122, 121, 121, + /* 840 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1216, + /* 850 */ 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, + /* 860 */ 124, 413, 227, 1073, 1158, 284, 284, 423, 312, 278, + /* 870 */ 278, 285, 285, 1419, 410, 409, 386, 1158, 569, 572, + /* 880 */ 1158, 1195, 569, 1598, 569, 125, 126, 80, 1216, 1216, + /* 890 */ 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, 124, + /* 900 */ 457, 1480, 13, 13, 1534, 122, 122, 122, 122, 121, /* 910 */ 121, 120, 120, 120, 119, 116, 448, 201, 572, 358, - /* 920 */ 1590, 579, 2, 1249, 844, 845, 846, 1566, 317, 1216, - /* 930 */ 146, 6, 413, 255, 254, 253, 206, 1331, 9, 1200, + /* 920 */ 1584, 578, 2, 1245, 842, 843, 844, 1560, 317, 1211, + /* 930 */ 146, 6, 413, 255, 254, 253, 206, 1327, 9, 1195, /* 940 */ 262, 71, 71, 428, 122, 122, 122, 122, 121, 121, - /* 950 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221, - /* 960 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, - /* 970 */ 124, 572, 284, 284, 572, 1217, 413, 578, 313, 1249, - /* 980 */ 353, 1300, 356, 423, 317, 569, 146, 495, 529, 1647, - /* 990 */ 399, 375, 495, 1331, 70, 70, 1299, 71, 71, 240, - /* 1000 */ 1329, 104, 80, 1221, 1221, 1054, 1057, 1044, 1044, 123, + /* 950 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1216, + /* 960 */ 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, + /* 970 */ 124, 572, 284, 284, 572, 1212, 413, 577, 313, 1245, + /* 980 */ 353, 1296, 356, 423, 317, 569, 146, 495, 529, 1641, + /* 990 */ 399, 375, 495, 1327, 70, 70, 1295, 71, 71, 240, + /* 1000 */ 1325, 104, 80, 1216, 1216, 1051, 1054, 1041, 1041, 123, /* 1010 */ 123, 124, 124, 124, 124, 122, 122, 122, 122, 121, - /* 1020 */ 121, 120, 120, 120, 119, 116, 448, 1118, 284, 284, - /* 1030 */ 432, 452, 1529, 1217, 443, 284, 284, 1493, 1356, 311, - /* 1040 */ 478, 569, 1119, 975, 495, 495, 217, 1267, 569, 1542, - /* 1050 */ 572, 976, 207, 572, 1031, 240, 387, 1120, 523, 122, + /* 1020 */ 121, 120, 120, 120, 119, 116, 448, 1114, 284, 284, + /* 1030 */ 432, 452, 1523, 1212, 443, 284, 284, 1487, 1352, 311, + /* 1040 */ 478, 569, 1115, 973, 495, 495, 217, 1263, 569, 1536, + /* 1050 */ 572, 974, 207, 572, 1028, 240, 387, 1116, 523, 122, /* 1060 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116, - /* 1070 */ 448, 1022, 107, 71, 71, 1021, 13, 13, 916, 572, - /* 1080 */ 1499, 572, 284, 284, 97, 530, 495, 452, 917, 1330, - /* 1090 */ 1326, 549, 413, 284, 284, 569, 151, 209, 1499, 1501, - /* 1100 */ 262, 454, 55, 55, 56, 56, 569, 1021, 1021, 1023, - /* 1110 */ 447, 336, 413, 531, 12, 295, 125, 126, 80, 1221, - /* 1120 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, - /* 1130 */ 124, 351, 413, 868, 1538, 1217, 125, 126, 80, 1221, - /* 1140 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, - /* 1150 */ 124, 1141, 1645, 478, 1645, 375, 125, 114, 80, 1221, - /* 1160 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, - /* 1170 */ 124, 1499, 333, 478, 335, 122, 122, 122, 122, 121, - /* 1180 */ 121, 120, 120, 120, 119, 116, 448, 203, 1423, 572, - /* 1190 */ 1298, 868, 468, 1217, 440, 122, 122, 122, 122, 121, - /* 1200 */ 121, 120, 120, 120, 119, 116, 448, 557, 1141, 1646, - /* 1210 */ 543, 1646, 15, 15, 896, 122, 122, 122, 122, 121, + /* 1070 */ 448, 1019, 107, 71, 71, 1018, 13, 13, 914, 572, + /* 1080 */ 1493, 572, 284, 284, 97, 530, 495, 452, 915, 1326, + /* 1090 */ 1322, 549, 413, 284, 284, 569, 151, 209, 1493, 1495, + /* 1100 */ 262, 454, 55, 55, 56, 56, 569, 1018, 1018, 1020, + /* 1110 */ 447, 336, 413, 531, 12, 295, 125, 126, 80, 1216, + /* 1120 */ 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, + /* 1130 */ 124, 351, 413, 866, 1532, 1212, 125, 126, 80, 1216, + /* 1140 */ 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, + /* 1150 */ 124, 1137, 1639, 478, 1639, 375, 125, 114, 80, 1216, + /* 1160 */ 1216, 1051, 1054, 1041, 1041, 123, 123, 124, 124, 124, + /* 1170 */ 124, 1493, 333, 478, 335, 122, 122, 122, 122, 121, + /* 1180 */ 121, 120, 120, 120, 119, 116, 448, 203, 1419, 572, + /* 1190 */ 1294, 866, 468, 1212, 440, 122, 122, 122, 122, 121, + /* 1200 */ 121, 120, 120, 120, 119, 116, 448, 557, 1137, 1640, + /* 1210 */ 543, 1640, 15, 15, 894, 122, 122, 122, 122, 121, /* 1220 */ 121, 120, 120, 120, 119, 116, 448, 572, 298, 542, - /* 1230 */ 1139, 1423, 1563, 1564, 1335, 413, 6, 6, 1173, 1272, - /* 1240 */ 419, 320, 284, 284, 1423, 512, 569, 529, 300, 461, - /* 1250 */ 43, 43, 572, 897, 12, 569, 334, 482, 429, 411, - /* 1260 */ 126, 80, 1221, 1221, 1054, 1057, 1044, 1044, 123, 123, - /* 1270 */ 124, 124, 124, 124, 572, 57, 57, 288, 1196, 1423, - /* 1280 */ 500, 462, 396, 396, 395, 273, 393, 1139, 1562, 853, - /* 1290 */ 1173, 411, 6, 572, 321, 1162, 474, 44, 44, 1561, - /* 1300 */ 1118, 430, 234, 6, 323, 256, 544, 256, 1162, 435, - /* 1310 */ 572, 1162, 322, 17, 491, 1119, 58, 58, 122, 122, + /* 1230 */ 1135, 1419, 1557, 1558, 1331, 413, 6, 6, 1169, 1268, + /* 1240 */ 419, 320, 284, 284, 1419, 512, 569, 529, 300, 461, + /* 1250 */ 43, 43, 572, 895, 12, 569, 334, 482, 429, 411, + /* 1260 */ 126, 80, 1216, 1216, 1051, 1054, 1041, 1041, 123, 123, + /* 1270 */ 124, 124, 124, 124, 572, 57, 57, 288, 1192, 1419, + /* 1280 */ 500, 462, 396, 396, 395, 273, 393, 1135, 1556, 851, + /* 1290 */ 1169, 411, 6, 572, 321, 1158, 474, 44, 44, 1555, + /* 1300 */ 1114, 430, 234, 6, 323, 256, 544, 256, 1158, 435, + /* 1310 */ 572, 1158, 322, 17, 491, 1115, 58, 58, 122, 122, /* 1320 */ 122, 122, 121, 121, 120, 120, 120, 119, 116, 448, - /* 1330 */ 1120, 216, 485, 59, 59, 1196, 1197, 1198, 111, 564, + /* 1330 */ 1116, 216, 485, 59, 59, 1192, 1193, 1192, 111, 564, /* 1340 */ 324, 4, 236, 460, 530, 572, 237, 460, 572, 441, - /* 1350 */ 168, 560, 424, 141, 483, 567, 572, 293, 572, 1099, - /* 1360 */ 572, 293, 572, 1099, 535, 572, 876, 8, 60, 60, + /* 1350 */ 168, 560, 424, 141, 483, 567, 572, 293, 572, 1095, + /* 1360 */ 572, 293, 572, 1095, 535, 572, 874, 8, 60, 60, /* 1370 */ 235, 61, 61, 572, 418, 572, 418, 572, 449, 62, /* 1380 */ 62, 45, 45, 46, 46, 47, 47, 199, 49, 49, /* 1390 */ 561, 572, 363, 572, 100, 490, 50, 50, 63, 63, - /* 1400 */ 64, 64, 565, 419, 539, 414, 572, 1031, 572, 538, - /* 1410 */ 316, 563, 316, 563, 65, 65, 14, 14, 572, 1031, - /* 1420 */ 572, 516, 936, 876, 1022, 109, 109, 935, 1021, 66, - /* 1430 */ 66, 131, 131, 110, 455, 449, 574, 573, 420, 177, - /* 1440 */ 1021, 132, 132, 67, 67, 572, 471, 572, 936, 475, - /* 1450 */ 1368, 283, 226, 935, 315, 1367, 411, 572, 463, 411, - /* 1460 */ 1021, 1021, 1023, 239, 411, 86, 213, 1354, 52, 52, - /* 1470 */ 68, 68, 1021, 1021, 1023, 1024, 27, 1589, 1184, 451, - /* 1480 */ 69, 69, 288, 97, 108, 1545, 106, 396, 396, 395, - /* 1490 */ 273, 393, 572, 883, 853, 887, 572, 111, 564, 470, - /* 1500 */ 4, 572, 152, 30, 38, 572, 1136, 234, 400, 323, + /* 1400 */ 64, 64, 565, 419, 539, 414, 572, 1028, 572, 538, + /* 1410 */ 316, 563, 316, 563, 65, 65, 14, 14, 572, 1028, + /* 1420 */ 572, 516, 934, 874, 1019, 109, 109, 933, 1018, 66, + /* 1430 */ 66, 131, 131, 110, 455, 449, 573, 449, 420, 177, + /* 1440 */ 1018, 132, 132, 67, 67, 572, 471, 572, 934, 475, + /* 1450 */ 1364, 283, 226, 933, 315, 1363, 411, 572, 463, 411, + /* 1460 */ 1018, 1018, 1020, 239, 411, 86, 213, 1350, 52, 52, + /* 1470 */ 68, 68, 1018, 1018, 1020, 1021, 27, 1583, 1180, 451, + /* 1480 */ 69, 69, 288, 97, 108, 1539, 106, 396, 396, 395, + /* 1490 */ 273, 393, 572, 881, 851, 885, 572, 111, 564, 470, + /* 1500 */ 4, 572, 152, 30, 38, 572, 1132, 234, 400, 323, /* 1510 */ 111, 564, 531, 4, 567, 53, 53, 322, 572, 163, /* 1520 */ 163, 572, 341, 472, 164, 164, 337, 567, 76, 76, - /* 1530 */ 572, 289, 1518, 572, 31, 1517, 572, 449, 342, 487, - /* 1540 */ 100, 54, 54, 348, 72, 72, 296, 236, 1084, 561, - /* 1550 */ 449, 883, 1364, 134, 134, 168, 73, 73, 141, 161, - /* 1560 */ 161, 1578, 561, 539, 572, 319, 572, 352, 540, 1013, - /* 1570 */ 477, 261, 261, 895, 894, 235, 539, 572, 1031, 572, + /* 1530 */ 572, 289, 1512, 572, 31, 1511, 572, 449, 342, 487, + /* 1540 */ 100, 54, 54, 348, 72, 72, 296, 236, 1080, 561, + /* 1550 */ 449, 881, 1360, 134, 134, 168, 73, 73, 141, 161, + /* 1560 */ 161, 1572, 561, 539, 572, 319, 572, 352, 540, 1011, + /* 1570 */ 477, 261, 261, 893, 892, 235, 539, 572, 1028, 572, /* 1580 */ 479, 538, 261, 371, 109, 109, 525, 136, 136, 130, - /* 1590 */ 130, 1031, 110, 370, 449, 574, 573, 109, 109, 1021, - /* 1600 */ 162, 162, 156, 156, 572, 110, 1084, 449, 574, 573, - /* 1610 */ 414, 355, 1021, 572, 357, 316, 563, 572, 347, 572, - /* 1620 */ 100, 501, 361, 258, 100, 902, 903, 140, 140, 359, - /* 1630 */ 1314, 1021, 1021, 1023, 1024, 27, 139, 139, 366, 455, - /* 1640 */ 137, 137, 138, 138, 1021, 1021, 1023, 1024, 27, 1184, - /* 1650 */ 451, 572, 376, 288, 111, 564, 1025, 4, 396, 396, - /* 1660 */ 395, 273, 393, 572, 1145, 853, 572, 1080, 572, 258, - /* 1670 */ 496, 567, 572, 211, 75, 75, 559, 966, 234, 261, - /* 1680 */ 323, 111, 564, 933, 4, 113, 77, 77, 322, 74, - /* 1690 */ 74, 42, 42, 1377, 449, 48, 48, 1422, 567, 978, - /* 1700 */ 979, 1096, 1095, 1096, 1095, 866, 561, 150, 934, 1350, - /* 1710 */ 113, 1362, 558, 1428, 1025, 1279, 1270, 1258, 236, 1257, - /* 1720 */ 1259, 449, 1597, 1347, 308, 276, 168, 309, 11, 141, - /* 1730 */ 397, 310, 232, 561, 1409, 1031, 339, 291, 329, 219, - /* 1740 */ 340, 109, 109, 940, 297, 1414, 235, 345, 481, 110, - /* 1750 */ 506, 449, 574, 573, 332, 1413, 1021, 404, 1297, 369, - /* 1760 */ 223, 1490, 1031, 1489, 1359, 1360, 1358, 1357, 109, 109, - /* 1770 */ 204, 1600, 1236, 562, 265, 218, 110, 205, 449, 574, - /* 1780 */ 573, 414, 391, 1021, 1537, 179, 316, 563, 1021, 1021, - /* 1790 */ 1023, 1024, 27, 230, 1535, 1233, 79, 564, 85, 4, - /* 1800 */ 422, 215, 552, 81, 84, 188, 1410, 128, 1404, 550, - /* 1810 */ 455, 35, 328, 567, 173, 1021, 1021, 1023, 1024, 27, - /* 1820 */ 181, 1495, 1397, 331, 465, 183, 184, 185, 186, 466, - /* 1830 */ 499, 242, 98, 402, 1416, 1418, 449, 1415, 473, 36, - /* 1840 */ 192, 488, 405, 1506, 246, 91, 494, 196, 561, 1484, - /* 1850 */ 350, 497, 277, 354, 248, 249, 111, 564, 1260, 4, - /* 1860 */ 250, 407, 515, 436, 1317, 1308, 93, 1316, 1315, 887, - /* 1870 */ 1307, 224, 1583, 567, 438, 524, 439, 1031, 263, 264, - /* 1880 */ 442, 1615, 10, 109, 109, 1287, 408, 1614, 1286, 368, - /* 1890 */ 1285, 110, 1613, 449, 574, 573, 449, 306, 1021, 307, - /* 1900 */ 374, 1382, 1569, 1470, 1381, 385, 105, 314, 561, 99, - /* 1910 */ 1568, 534, 34, 576, 1190, 272, 1340, 551, 383, 274, - /* 1920 */ 1339, 210, 389, 390, 275, 577, 1255, 1250, 415, 165, - /* 1930 */ 1021, 1021, 1023, 1024, 27, 147, 1522, 1031, 166, 1523, - /* 1940 */ 416, 1521, 178, 109, 109, 1520, 304, 167, 840, 450, - /* 1950 */ 220, 110, 221, 449, 574, 573, 212, 78, 1021, 318, - /* 1960 */ 231, 1094, 1092, 144, 180, 326, 169, 1216, 241, 182, - /* 1970 */ 919, 338, 238, 1108, 187, 170, 171, 425, 427, 189, - /* 1980 */ 87, 88, 89, 90, 172, 1111, 243, 1107, 244, 158, - /* 1990 */ 1021, 1021, 1023, 1024, 27, 18, 245, 1230, 493, 349, - /* 2000 */ 1100, 261, 247, 193, 194, 37, 370, 855, 498, 251, - /* 2010 */ 195, 510, 92, 19, 174, 362, 502, 20, 507, 885, - /* 2020 */ 365, 898, 94, 305, 159, 95, 517, 96, 1178, 160, - /* 2030 */ 1060, 1147, 39, 1146, 225, 280, 282, 970, 198, 964, - /* 2040 */ 113, 1164, 1168, 260, 1166, 21, 1172, 7, 22, 1152, - /* 2050 */ 33, 23, 24, 25, 1171, 546, 26, 202, 100, 102, - /* 2060 */ 1075, 103, 1061, 1059, 1063, 1117, 1064, 1116, 266, 267, - /* 2070 */ 28, 40, 929, 1026, 867, 112, 29, 568, 394, 143, - /* 2080 */ 1186, 268, 176, 1185, 269, 1246, 1246, 1246, 1246, 1246, - /* 2090 */ 1246, 1246, 1246, 1246, 1246, 1606, 1246, 1246, 1246, 1246, - /* 2100 */ 1605, + /* 1590 */ 130, 1028, 110, 370, 449, 573, 449, 109, 109, 1018, + /* 1600 */ 162, 162, 156, 156, 572, 110, 1080, 449, 573, 449, + /* 1610 */ 414, 355, 1018, 572, 357, 316, 563, 572, 347, 572, + /* 1620 */ 100, 501, 361, 258, 100, 900, 901, 140, 140, 359, + /* 1630 */ 1310, 1018, 1018, 1020, 1021, 27, 139, 139, 366, 455, + /* 1640 */ 137, 137, 138, 138, 1018, 1018, 1020, 1021, 27, 1180, + /* 1650 */ 451, 572, 376, 288, 111, 564, 1022, 4, 396, 396, + /* 1660 */ 395, 273, 393, 572, 1141, 851, 572, 1076, 572, 258, + /* 1670 */ 496, 567, 572, 211, 75, 75, 559, 964, 234, 261, + /* 1680 */ 323, 111, 564, 931, 4, 113, 77, 77, 322, 74, + /* 1690 */ 74, 42, 42, 1373, 449, 48, 48, 1418, 567, 976, + /* 1700 */ 977, 1092, 1091, 1092, 1091, 864, 561, 150, 932, 1346, + /* 1710 */ 113, 1358, 558, 1423, 1022, 1275, 1266, 1254, 236, 1253, + /* 1720 */ 1255, 449, 1591, 1343, 308, 276, 168, 309, 11, 141, + /* 1730 */ 397, 310, 232, 561, 1405, 1028, 339, 291, 329, 219, + /* 1740 */ 340, 109, 109, 938, 297, 1410, 235, 345, 481, 110, + /* 1750 */ 506, 449, 573, 449, 332, 1409, 1018, 404, 1293, 369, + /* 1760 */ 223, 1484, 1028, 1483, 1355, 1356, 1354, 1353, 109, 109, + /* 1770 */ 204, 1594, 1232, 562, 265, 218, 110, 205, 449, 573, + /* 1780 */ 449, 414, 391, 1018, 1531, 179, 316, 563, 1018, 1018, + /* 1790 */ 1020, 1021, 27, 230, 1529, 1229, 79, 564, 85, 4, + /* 1800 */ 422, 215, 552, 81, 84, 188, 1406, 128, 1400, 550, + /* 1810 */ 455, 35, 328, 567, 173, 1018, 1018, 1020, 1021, 27, + /* 1820 */ 181, 1489, 1393, 331, 465, 183, 184, 185, 186, 466, + /* 1830 */ 499, 242, 98, 402, 1412, 1414, 449, 1411, 473, 36, + /* 1840 */ 192, 488, 405, 1500, 246, 91, 494, 196, 561, 1478, + /* 1850 */ 350, 497, 277, 354, 248, 249, 111, 564, 1256, 4, + /* 1860 */ 250, 407, 515, 436, 1313, 1304, 93, 1312, 1311, 885, + /* 1870 */ 1303, 224, 1577, 567, 438, 524, 439, 1028, 263, 264, + /* 1880 */ 442, 1608, 10, 109, 109, 1283, 408, 1607, 1282, 368, + /* 1890 */ 1281, 110, 1606, 449, 573, 449, 449, 306, 1018, 307, + /* 1900 */ 374, 1378, 1563, 1465, 1377, 385, 105, 314, 561, 99, + /* 1910 */ 1562, 534, 34, 575, 1186, 272, 1336, 551, 383, 274, + /* 1920 */ 1335, 210, 389, 390, 275, 576, 1251, 1246, 415, 165, + /* 1930 */ 1018, 1018, 1020, 1021, 27, 147, 1516, 1028, 166, 1517, + /* 1940 */ 416, 1515, 178, 109, 109, 1514, 304, 167, 838, 450, + /* 1950 */ 220, 110, 221, 449, 573, 449, 212, 78, 1018, 318, + /* 1960 */ 231, 1090, 1088, 144, 180, 326, 169, 1211, 241, 182, + /* 1970 */ 917, 338, 238, 1104, 187, 170, 171, 425, 427, 189, + /* 1980 */ 87, 88, 89, 90, 172, 1107, 243, 1103, 244, 158, + /* 1990 */ 1018, 1018, 1020, 1021, 27, 18, 245, 1226, 493, 349, + /* 2000 */ 1096, 261, 247, 193, 194, 37, 370, 853, 498, 251, + /* 2010 */ 195, 510, 92, 19, 174, 362, 502, 20, 507, 883, + /* 2020 */ 365, 896, 94, 305, 159, 95, 517, 96, 1174, 160, + /* 2030 */ 1057, 1143, 39, 1142, 225, 280, 282, 968, 198, 962, + /* 2040 */ 113, 1160, 1164, 260, 1162, 21, 1168, 7, 22, 1148, + /* 2050 */ 33, 23, 24, 25, 1167, 546, 26, 202, 100, 102, + /* 2060 */ 1071, 103, 1058, 1056, 1060, 1113, 1061, 1112, 266, 267, + /* 2070 */ 28, 40, 927, 1023, 865, 112, 29, 568, 394, 143, + /* 2080 */ 1182, 268, 176, 1181, 269, 1242, 1242, 1242, 1242, 1242, + /* 2090 */ 1242, 1242, 1242, 1242, 1242, 1599, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276, @@ -167828,7 +168738,7 @@ static const YYCODETYPE yy_lookahead[] = { /* 2070 */ 22, 22, 135, 23, 23, 22, 22, 25, 15, 23, /* 2080 */ 1, 141, 25, 1, 141, 319, 319, 319, 319, 319, /* 2090 */ 319, 319, 319, 319, 319, 141, 319, 319, 319, 319, - /* 2100 */ 141, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2130 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, @@ -167846,9 +168756,9 @@ static const YYCODETYPE yy_lookahead[] = { /* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2280 */ 319, 319, 319, 319, 319, 319, + /* 2280 */ 319, }; -#define YY_SHIFT_COUNT (579) +#define YY_SHIFT_COUNT (578) #define YY_SHIFT_MIN (0) #define YY_SHIFT_MAX (2082) static const unsigned short int yy_shift_ofst[] = { @@ -167868,12 +168778,12 @@ static const unsigned short int yy_shift_ofst[] = { /* 130 */ 137, 181, 181, 181, 181, 181, 181, 181, 94, 430, /* 140 */ 66, 65, 112, 366, 533, 533, 740, 1261, 533, 533, /* 150 */ 79, 79, 533, 412, 412, 412, 77, 412, 123, 113, - /* 160 */ 113, 22, 22, 2101, 2101, 328, 328, 328, 239, 468, + /* 160 */ 113, 22, 22, 2096, 2096, 328, 328, 328, 239, 468, /* 170 */ 468, 468, 468, 1015, 1015, 409, 366, 1129, 1186, 533, /* 180 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 969, /* 200 */ 621, 621, 533, 642, 788, 788, 1228, 1228, 822, 822, - /* 210 */ 67, 1274, 2101, 2101, 2101, 2101, 2101, 2101, 2101, 1307, + /* 210 */ 67, 1274, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 1307, /* 220 */ 954, 954, 585, 472, 640, 387, 695, 538, 541, 700, /* 230 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, /* 240 */ 222, 533, 533, 533, 533, 533, 533, 533, 533, 533, @@ -167891,9 +168801,9 @@ static const unsigned short int yy_shift_ofst[] = { /* 360 */ 1747, 1747, 1747, 1799, 1844, 1844, 1825, 1747, 1743, 1747, /* 370 */ 1799, 1747, 1747, 1706, 1850, 1763, 1763, 1825, 1633, 1788, /* 380 */ 1788, 1798, 1798, 1659, 1664, 1860, 1633, 1748, 1659, 1762, - /* 390 */ 1765, 1683, 1887, 1901, 1901, 1918, 1918, 1918, 2101, 2101, - /* 400 */ 2101, 2101, 2101, 2101, 2101, 2101, 2101, 2101, 2101, 2101, - /* 410 */ 2101, 2101, 2101, 207, 1095, 331, 620, 903, 806, 1074, + /* 390 */ 1765, 1683, 1887, 1901, 1901, 1918, 1918, 1918, 2096, 2096, + /* 400 */ 2096, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 2096, + /* 410 */ 2096, 2096, 2096, 207, 1095, 331, 620, 903, 806, 1074, /* 420 */ 1483, 1432, 1481, 1322, 1370, 1394, 1515, 1291, 1546, 1547, /* 430 */ 1557, 1595, 1598, 1599, 1434, 1453, 1618, 1462, 1567, 1489, /* 440 */ 1644, 1654, 1616, 1660, 1548, 1549, 1682, 1685, 1597, 742, @@ -167909,7 +168819,7 @@ static const unsigned short int yy_shift_ofst[] = { /* 540 */ 1958, 2003, 1971, 1961, 2019, 2026, 2028, 2031, 2032, 2033, /* 550 */ 2022, 1917, 1919, 2037, 2015, 2039, 2040, 2041, 2042, 2043, /* 560 */ 2044, 2047, 2055, 2048, 2049, 2050, 2051, 2053, 2054, 2052, - /* 570 */ 1937, 1940, 1943, 1954, 1959, 2057, 2056, 2063, 2079, 2082, + /* 570 */ 1937, 1940, 1943, 1954, 2057, 2056, 2063, 2079, 2082, }; #define YY_REDUCE_COUNT (412) #define YY_REDUCE_MIN (-271) @@ -167959,64 +168869,64 @@ static const short yy_reduce_ofst[] = { /* 410 */ 1738, 1744, 1740, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1651, 1651, 1651, 1479, 1244, 1355, 1244, 1244, 1244, 1479, - /* 10 */ 1479, 1479, 1244, 1385, 1385, 1532, 1277, 1244, 1244, 1244, - /* 20 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1478, 1244, 1244, - /* 30 */ 1244, 1244, 1567, 1567, 1244, 1244, 1244, 1244, 1244, 1244, - /* 40 */ 1244, 1244, 1394, 1244, 1401, 1244, 1244, 1244, 1244, 1244, - /* 50 */ 1480, 1481, 1244, 1244, 1244, 1531, 1533, 1496, 1408, 1407, - /* 60 */ 1406, 1405, 1514, 1373, 1399, 1392, 1396, 1474, 1475, 1473, - /* 70 */ 1477, 1481, 1480, 1244, 1395, 1442, 1458, 1441, 1244, 1244, - /* 80 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 90 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 100 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 110 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 120 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 130 */ 1450, 1457, 1456, 1455, 1464, 1454, 1451, 1444, 1443, 1445, - /* 140 */ 1446, 1244, 1244, 1268, 1244, 1244, 1265, 1319, 1244, 1244, - /* 150 */ 1244, 1244, 1244, 1551, 1550, 1244, 1447, 1244, 1277, 1436, - /* 160 */ 1435, 1461, 1448, 1460, 1459, 1539, 1603, 1602, 1497, 1244, - /* 170 */ 1244, 1244, 1244, 1244, 1244, 1567, 1244, 1244, 1244, 1244, - /* 180 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 190 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1375, - /* 200 */ 1567, 1567, 1244, 1277, 1567, 1567, 1376, 1376, 1273, 1273, - /* 210 */ 1379, 1244, 1546, 1346, 1346, 1346, 1346, 1355, 1346, 1244, - /* 220 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 230 */ 1244, 1244, 1244, 1244, 1536, 1534, 1244, 1244, 1244, 1244, - /* 240 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 250 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 260 */ 1244, 1244, 1244, 1351, 1244, 1244, 1244, 1244, 1244, 1244, - /* 270 */ 1244, 1244, 1244, 1244, 1244, 1596, 1244, 1509, 1333, 1351, - /* 280 */ 1351, 1351, 1351, 1353, 1334, 1332, 1345, 1278, 1251, 1643, - /* 290 */ 1411, 1400, 1352, 1400, 1640, 1398, 1411, 1411, 1398, 1411, - /* 300 */ 1352, 1640, 1294, 1619, 1289, 1385, 1385, 1385, 1375, 1375, - /* 310 */ 1375, 1375, 1379, 1379, 1476, 1352, 1345, 1244, 1643, 1643, - /* 320 */ 1361, 1361, 1642, 1642, 1361, 1497, 1627, 1420, 1393, 1379, - /* 330 */ 1322, 1393, 1379, 1328, 1328, 1328, 1328, 1361, 1262, 1398, - /* 340 */ 1627, 1627, 1398, 1420, 1322, 1398, 1322, 1398, 1361, 1262, - /* 350 */ 1513, 1637, 1361, 1262, 1487, 1361, 1262, 1361, 1262, 1487, - /* 360 */ 1320, 1320, 1320, 1309, 1244, 1244, 1487, 1320, 1294, 1320, - /* 370 */ 1309, 1320, 1320, 1585, 1244, 1491, 1491, 1487, 1361, 1577, - /* 380 */ 1577, 1388, 1388, 1393, 1379, 1482, 1361, 1244, 1393, 1391, - /* 390 */ 1389, 1398, 1312, 1599, 1599, 1595, 1595, 1595, 1648, 1648, - /* 400 */ 1546, 1612, 1277, 1277, 1277, 1277, 1612, 1296, 1296, 1278, - /* 410 */ 1278, 1277, 1612, 1244, 1244, 1244, 1244, 1244, 1244, 1607, - /* 420 */ 1244, 1541, 1498, 1365, 1244, 1244, 1244, 1244, 1244, 1244, - /* 430 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1552, 1244, - /* 440 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1425, - /* 450 */ 1244, 1247, 1543, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 460 */ 1244, 1402, 1403, 1366, 1244, 1244, 1244, 1244, 1244, 1244, - /* 470 */ 1244, 1417, 1244, 1244, 1244, 1412, 1244, 1244, 1244, 1244, - /* 480 */ 1244, 1244, 1244, 1244, 1639, 1244, 1244, 1244, 1244, 1244, - /* 490 */ 1244, 1512, 1511, 1244, 1244, 1363, 1244, 1244, 1244, 1244, - /* 500 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1292, - /* 510 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 520 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, - /* 530 */ 1244, 1244, 1244, 1390, 1244, 1244, 1244, 1244, 1244, 1244, - /* 540 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1582, 1380, - /* 550 */ 1244, 1244, 1244, 1244, 1630, 1244, 1244, 1244, 1244, 1244, - /* 560 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1623, - /* 570 */ 1336, 1427, 1244, 1426, 1430, 1266, 1244, 1256, 1244, 1244, + /* 0 */ 1645, 1645, 1645, 1473, 1240, 1351, 1240, 1240, 1240, 1473, + /* 10 */ 1473, 1473, 1240, 1381, 1381, 1526, 1273, 1240, 1240, 1240, + /* 20 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1472, 1240, 1240, + /* 30 */ 1240, 1240, 1561, 1561, 1240, 1240, 1240, 1240, 1240, 1240, + /* 40 */ 1240, 1240, 1390, 1240, 1397, 1240, 1240, 1240, 1240, 1240, + /* 50 */ 1474, 1475, 1240, 1240, 1240, 1525, 1527, 1490, 1404, 1403, + /* 60 */ 1402, 1401, 1508, 1369, 1395, 1388, 1392, 1469, 1470, 1468, + /* 70 */ 1623, 1475, 1474, 1240, 1391, 1437, 1453, 1436, 1240, 1240, + /* 80 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 90 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 100 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 110 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 120 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 130 */ 1445, 1452, 1451, 1450, 1459, 1449, 1446, 1439, 1438, 1440, + /* 140 */ 1441, 1240, 1240, 1264, 1240, 1240, 1261, 1315, 1240, 1240, + /* 150 */ 1240, 1240, 1240, 1545, 1544, 1240, 1442, 1240, 1273, 1431, + /* 160 */ 1430, 1456, 1443, 1455, 1454, 1533, 1597, 1596, 1491, 1240, + /* 170 */ 1240, 1240, 1240, 1240, 1240, 1561, 1240, 1240, 1240, 1240, + /* 180 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 190 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1371, + /* 200 */ 1561, 1561, 1240, 1273, 1561, 1561, 1372, 1372, 1269, 1269, + /* 210 */ 1375, 1240, 1540, 1342, 1342, 1342, 1342, 1351, 1342, 1240, + /* 220 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 230 */ 1240, 1240, 1240, 1240, 1530, 1528, 1240, 1240, 1240, 1240, + /* 240 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 250 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 260 */ 1240, 1240, 1240, 1347, 1240, 1240, 1240, 1240, 1240, 1240, + /* 270 */ 1240, 1240, 1240, 1240, 1240, 1590, 1240, 1503, 1329, 1347, + /* 280 */ 1347, 1347, 1347, 1349, 1330, 1328, 1341, 1274, 1247, 1637, + /* 290 */ 1407, 1396, 1348, 1396, 1634, 1394, 1407, 1407, 1394, 1407, + /* 300 */ 1348, 1634, 1290, 1612, 1285, 1381, 1381, 1381, 1371, 1371, + /* 310 */ 1371, 1371, 1375, 1375, 1471, 1348, 1341, 1240, 1637, 1637, + /* 320 */ 1357, 1357, 1636, 1636, 1357, 1491, 1620, 1416, 1389, 1375, + /* 330 */ 1318, 1389, 1375, 1324, 1324, 1324, 1324, 1357, 1258, 1394, + /* 340 */ 1620, 1620, 1394, 1416, 1318, 1394, 1318, 1394, 1357, 1258, + /* 350 */ 1507, 1631, 1357, 1258, 1481, 1357, 1258, 1357, 1258, 1481, + /* 360 */ 1316, 1316, 1316, 1305, 1240, 1240, 1481, 1316, 1290, 1316, + /* 370 */ 1305, 1316, 1316, 1579, 1240, 1485, 1485, 1481, 1357, 1571, + /* 380 */ 1571, 1384, 1384, 1389, 1375, 1476, 1357, 1240, 1389, 1387, + /* 390 */ 1385, 1394, 1308, 1593, 1593, 1589, 1589, 1589, 1642, 1642, + /* 400 */ 1540, 1605, 1273, 1273, 1273, 1273, 1605, 1292, 1292, 1274, + /* 410 */ 1274, 1273, 1605, 1240, 1240, 1240, 1240, 1240, 1240, 1600, + /* 420 */ 1240, 1535, 1492, 1361, 1240, 1240, 1240, 1240, 1240, 1240, + /* 430 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1546, 1240, + /* 440 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1421, + /* 450 */ 1240, 1243, 1537, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 460 */ 1240, 1398, 1399, 1362, 1240, 1240, 1240, 1240, 1240, 1240, + /* 470 */ 1240, 1413, 1240, 1240, 1240, 1408, 1240, 1240, 1240, 1240, + /* 480 */ 1240, 1240, 1240, 1240, 1633, 1240, 1240, 1240, 1240, 1240, + /* 490 */ 1240, 1506, 1505, 1240, 1240, 1359, 1240, 1240, 1240, 1240, + /* 500 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1288, + /* 510 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 520 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, + /* 530 */ 1240, 1240, 1240, 1386, 1240, 1240, 1240, 1240, 1240, 1240, + /* 540 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1576, 1376, + /* 550 */ 1240, 1240, 1240, 1240, 1624, 1240, 1240, 1240, 1240, 1240, + /* 560 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1616, + /* 570 */ 1332, 1422, 1240, 1425, 1262, 1240, 1252, 1240, 1240, }; /********** End of lemon-generated parsing tables *****************************/ @@ -168813,233 +169723,231 @@ static const char *const yyRuleName[] = { /* 175 */ "idlist ::= idlist COMMA nm", /* 176 */ "idlist ::= nm", /* 177 */ "expr ::= LP expr RP", - /* 178 */ "expr ::= ID|INDEXED", - /* 179 */ "expr ::= JOIN_KW", - /* 180 */ "expr ::= nm DOT nm", - /* 181 */ "expr ::= nm DOT nm DOT nm", - /* 182 */ "term ::= NULL|FLOAT|BLOB", - /* 183 */ "term ::= STRING", - /* 184 */ "term ::= INTEGER", - /* 185 */ "expr ::= VARIABLE", - /* 186 */ "expr ::= expr COLLATE ID|STRING", - /* 187 */ "expr ::= CAST LP expr AS typetoken RP", - /* 188 */ "expr ::= ID|INDEXED LP distinct exprlist RP", - /* 189 */ "expr ::= ID|INDEXED LP STAR RP", - /* 190 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", - /* 191 */ "expr ::= ID|INDEXED LP STAR RP filter_over", - /* 192 */ "term ::= CTIME_KW", - /* 193 */ "expr ::= LP nexprlist COMMA expr RP", - /* 194 */ "expr ::= expr AND expr", - /* 195 */ "expr ::= expr OR expr", - /* 196 */ "expr ::= expr LT|GT|GE|LE expr", - /* 197 */ "expr ::= expr EQ|NE expr", - /* 198 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 199 */ "expr ::= expr PLUS|MINUS expr", - /* 200 */ "expr ::= expr STAR|SLASH|REM expr", - /* 201 */ "expr ::= expr CONCAT expr", - /* 202 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 203 */ "expr ::= expr likeop expr", - /* 204 */ "expr ::= expr likeop expr ESCAPE expr", - /* 205 */ "expr ::= expr ISNULL|NOTNULL", - /* 206 */ "expr ::= expr NOT NULL", - /* 207 */ "expr ::= expr IS expr", - /* 208 */ "expr ::= expr IS NOT expr", - /* 209 */ "expr ::= expr IS NOT DISTINCT FROM expr", - /* 210 */ "expr ::= expr IS DISTINCT FROM expr", - /* 211 */ "expr ::= NOT expr", - /* 212 */ "expr ::= BITNOT expr", - /* 213 */ "expr ::= PLUS|MINUS expr", - /* 214 */ "expr ::= expr PTR expr", - /* 215 */ "between_op ::= BETWEEN", - /* 216 */ "between_op ::= NOT BETWEEN", - /* 217 */ "expr ::= expr between_op expr AND expr", - /* 218 */ "in_op ::= IN", - /* 219 */ "in_op ::= NOT IN", - /* 220 */ "expr ::= expr in_op LP exprlist RP", - /* 221 */ "expr ::= LP select RP", - /* 222 */ "expr ::= expr in_op LP select RP", - /* 223 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 224 */ "expr ::= EXISTS LP select RP", - /* 225 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 226 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 227 */ "case_exprlist ::= WHEN expr THEN expr", - /* 228 */ "case_else ::= ELSE expr", - /* 229 */ "case_else ::=", - /* 230 */ "case_operand ::= expr", - /* 231 */ "case_operand ::=", - /* 232 */ "exprlist ::=", - /* 233 */ "nexprlist ::= nexprlist COMMA expr", - /* 234 */ "nexprlist ::= expr", - /* 235 */ "paren_exprlist ::=", - /* 236 */ "paren_exprlist ::= LP exprlist RP", - /* 237 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 238 */ "uniqueflag ::= UNIQUE", - /* 239 */ "uniqueflag ::=", - /* 240 */ "eidlist_opt ::=", - /* 241 */ "eidlist_opt ::= LP eidlist RP", - /* 242 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 243 */ "eidlist ::= nm collate sortorder", - /* 244 */ "collate ::=", - /* 245 */ "collate ::= COLLATE ID|STRING", - /* 246 */ "cmd ::= DROP INDEX ifexists fullname", - /* 247 */ "cmd ::= VACUUM vinto", - /* 248 */ "cmd ::= VACUUM nm vinto", - /* 249 */ "vinto ::= INTO expr", - /* 250 */ "vinto ::=", - /* 251 */ "cmd ::= PRAGMA nm dbnm", - /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 256 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 257 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 258 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 259 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 260 */ "trigger_time ::= BEFORE|AFTER", - /* 261 */ "trigger_time ::= INSTEAD OF", - /* 262 */ "trigger_time ::=", - /* 263 */ "trigger_event ::= DELETE|INSERT", - /* 264 */ "trigger_event ::= UPDATE", - /* 265 */ "trigger_event ::= UPDATE OF idlist", - /* 266 */ "when_clause ::=", - /* 267 */ "when_clause ::= WHEN expr", - /* 268 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 269 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 270 */ "trnm ::= nm DOT nm", - /* 271 */ "tridxby ::= INDEXED BY nm", - /* 272 */ "tridxby ::= NOT INDEXED", - /* 273 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", - /* 274 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", - /* 275 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", - /* 276 */ "trigger_cmd ::= scanpt select scanpt", - /* 277 */ "expr ::= RAISE LP IGNORE RP", - /* 278 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 279 */ "raisetype ::= ROLLBACK", - /* 280 */ "raisetype ::= ABORT", - /* 281 */ "raisetype ::= FAIL", - /* 282 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 283 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 284 */ "cmd ::= DETACH database_kw_opt expr", - /* 285 */ "key_opt ::=", - /* 286 */ "key_opt ::= KEY expr", - /* 287 */ "cmd ::= REINDEX", - /* 288 */ "cmd ::= REINDEX nm dbnm", - /* 289 */ "cmd ::= ANALYZE", - /* 290 */ "cmd ::= ANALYZE nm dbnm", - /* 291 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 292 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 293 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", - /* 294 */ "add_column_fullname ::= fullname", - /* 295 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", - /* 296 */ "cmd ::= create_vtab", - /* 297 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 298 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 299 */ "vtabarg ::=", - /* 300 */ "vtabargtoken ::= ANY", - /* 301 */ "vtabargtoken ::= lp anylist RP", - /* 302 */ "lp ::= LP", - /* 303 */ "with ::= WITH wqlist", - /* 304 */ "with ::= WITH RECURSIVE wqlist", - /* 305 */ "wqas ::= AS", - /* 306 */ "wqas ::= AS MATERIALIZED", - /* 307 */ "wqas ::= AS NOT MATERIALIZED", - /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP", - /* 309 */ "wqlist ::= wqitem", - /* 310 */ "wqlist ::= wqlist COMMA wqitem", - /* 311 */ "windowdefn_list ::= windowdefn", - /* 312 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 313 */ "windowdefn ::= nm AS LP window RP", - /* 314 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 315 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 316 */ "window ::= ORDER BY sortlist frame_opt", - /* 317 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 318 */ "window ::= frame_opt", - /* 319 */ "window ::= nm frame_opt", - /* 320 */ "frame_opt ::=", - /* 321 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 322 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 323 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 324 */ "frame_bound_s ::= frame_bound", - /* 325 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 326 */ "frame_bound_e ::= frame_bound", - /* 327 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 328 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 329 */ "frame_bound ::= CURRENT ROW", - /* 330 */ "frame_exclude_opt ::=", - /* 331 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 332 */ "frame_exclude ::= NO OTHERS", - /* 333 */ "frame_exclude ::= CURRENT ROW", - /* 334 */ "frame_exclude ::= GROUP|TIES", - /* 335 */ "window_clause ::= WINDOW windowdefn_list", - /* 336 */ "filter_over ::= filter_clause over_clause", - /* 337 */ "filter_over ::= over_clause", - /* 338 */ "filter_over ::= filter_clause", - /* 339 */ "over_clause ::= OVER LP window RP", - /* 340 */ "over_clause ::= OVER nm", - /* 341 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 342 */ "input ::= cmdlist", - /* 343 */ "cmdlist ::= cmdlist ecmd", - /* 344 */ "cmdlist ::= ecmd", - /* 345 */ "ecmd ::= SEMI", - /* 346 */ "ecmd ::= cmdx SEMI", - /* 347 */ "ecmd ::= explain cmdx SEMI", - /* 348 */ "trans_opt ::=", - /* 349 */ "trans_opt ::= TRANSACTION", - /* 350 */ "trans_opt ::= TRANSACTION nm", - /* 351 */ "savepoint_opt ::= SAVEPOINT", - /* 352 */ "savepoint_opt ::=", - /* 353 */ "cmd ::= create_table create_table_args", - /* 354 */ "table_option_set ::= table_option", - /* 355 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 356 */ "columnlist ::= columnname carglist", - /* 357 */ "nm ::= ID|INDEXED", - /* 358 */ "nm ::= STRING", - /* 359 */ "nm ::= JOIN_KW", - /* 360 */ "typetoken ::= typename", - /* 361 */ "typename ::= ID|STRING", - /* 362 */ "signed ::= plus_num", - /* 363 */ "signed ::= minus_num", - /* 364 */ "carglist ::= carglist ccons", - /* 365 */ "carglist ::=", - /* 366 */ "ccons ::= NULL onconf", - /* 367 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 368 */ "ccons ::= AS generated", - /* 369 */ "conslist_opt ::= COMMA conslist", - /* 370 */ "conslist ::= conslist tconscomma tcons", - /* 371 */ "conslist ::= tcons", - /* 372 */ "tconscomma ::=", - /* 373 */ "defer_subclause_opt ::= defer_subclause", - /* 374 */ "resolvetype ::= raisetype", - /* 375 */ "selectnowith ::= oneselect", - /* 376 */ "oneselect ::= values", - /* 377 */ "sclp ::= selcollist COMMA", - /* 378 */ "as ::= ID|STRING", - /* 379 */ "indexed_opt ::= indexed_by", - /* 380 */ "returning ::=", - /* 381 */ "expr ::= term", - /* 382 */ "likeop ::= LIKE_KW|MATCH", - /* 383 */ "exprlist ::= nexprlist", - /* 384 */ "nmnum ::= plus_num", - /* 385 */ "nmnum ::= nm", - /* 386 */ "nmnum ::= ON", - /* 387 */ "nmnum ::= DELETE", - /* 388 */ "nmnum ::= DEFAULT", - /* 389 */ "plus_num ::= INTEGER|FLOAT", - /* 390 */ "foreach_clause ::=", - /* 391 */ "foreach_clause ::= FOR EACH ROW", - /* 392 */ "trnm ::= nm", - /* 393 */ "tridxby ::=", - /* 394 */ "database_kw_opt ::= DATABASE", - /* 395 */ "database_kw_opt ::=", - /* 396 */ "kwcolumn_opt ::=", - /* 397 */ "kwcolumn_opt ::= COLUMNKW", - /* 398 */ "vtabarglist ::= vtabarg", - /* 399 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 400 */ "vtabarg ::= vtabarg vtabargtoken", - /* 401 */ "anylist ::=", - /* 402 */ "anylist ::= anylist LP anylist RP", - /* 403 */ "anylist ::= anylist ANY", - /* 404 */ "with ::=", + /* 178 */ "expr ::= ID|INDEXED|JOIN_KW", + /* 179 */ "expr ::= nm DOT nm", + /* 180 */ "expr ::= nm DOT nm DOT nm", + /* 181 */ "term ::= NULL|FLOAT|BLOB", + /* 182 */ "term ::= STRING", + /* 183 */ "term ::= INTEGER", + /* 184 */ "expr ::= VARIABLE", + /* 185 */ "expr ::= expr COLLATE ID|STRING", + /* 186 */ "expr ::= CAST LP expr AS typetoken RP", + /* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", + /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", + /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", + /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", + /* 191 */ "term ::= CTIME_KW", + /* 192 */ "expr ::= LP nexprlist COMMA expr RP", + /* 193 */ "expr ::= expr AND expr", + /* 194 */ "expr ::= expr OR expr", + /* 195 */ "expr ::= expr LT|GT|GE|LE expr", + /* 196 */ "expr ::= expr EQ|NE expr", + /* 197 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 198 */ "expr ::= expr PLUS|MINUS expr", + /* 199 */ "expr ::= expr STAR|SLASH|REM expr", + /* 200 */ "expr ::= expr CONCAT expr", + /* 201 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 202 */ "expr ::= expr likeop expr", + /* 203 */ "expr ::= expr likeop expr ESCAPE expr", + /* 204 */ "expr ::= expr ISNULL|NOTNULL", + /* 205 */ "expr ::= expr NOT NULL", + /* 206 */ "expr ::= expr IS expr", + /* 207 */ "expr ::= expr IS NOT expr", + /* 208 */ "expr ::= expr IS NOT DISTINCT FROM expr", + /* 209 */ "expr ::= expr IS DISTINCT FROM expr", + /* 210 */ "expr ::= NOT expr", + /* 211 */ "expr ::= BITNOT expr", + /* 212 */ "expr ::= PLUS|MINUS expr", + /* 213 */ "expr ::= expr PTR expr", + /* 214 */ "between_op ::= BETWEEN", + /* 215 */ "between_op ::= NOT BETWEEN", + /* 216 */ "expr ::= expr between_op expr AND expr", + /* 217 */ "in_op ::= IN", + /* 218 */ "in_op ::= NOT IN", + /* 219 */ "expr ::= expr in_op LP exprlist RP", + /* 220 */ "expr ::= LP select RP", + /* 221 */ "expr ::= expr in_op LP select RP", + /* 222 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 223 */ "expr ::= EXISTS LP select RP", + /* 224 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 225 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 226 */ "case_exprlist ::= WHEN expr THEN expr", + /* 227 */ "case_else ::= ELSE expr", + /* 228 */ "case_else ::=", + /* 229 */ "case_operand ::=", + /* 230 */ "exprlist ::=", + /* 231 */ "nexprlist ::= nexprlist COMMA expr", + /* 232 */ "nexprlist ::= expr", + /* 233 */ "paren_exprlist ::=", + /* 234 */ "paren_exprlist ::= LP exprlist RP", + /* 235 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 236 */ "uniqueflag ::= UNIQUE", + /* 237 */ "uniqueflag ::=", + /* 238 */ "eidlist_opt ::=", + /* 239 */ "eidlist_opt ::= LP eidlist RP", + /* 240 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 241 */ "eidlist ::= nm collate sortorder", + /* 242 */ "collate ::=", + /* 243 */ "collate ::= COLLATE ID|STRING", + /* 244 */ "cmd ::= DROP INDEX ifexists fullname", + /* 245 */ "cmd ::= VACUUM vinto", + /* 246 */ "cmd ::= VACUUM nm vinto", + /* 247 */ "vinto ::= INTO expr", + /* 248 */ "vinto ::=", + /* 249 */ "cmd ::= PRAGMA nm dbnm", + /* 250 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 251 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 252 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 253 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 254 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 255 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 256 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 257 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 258 */ "trigger_time ::= BEFORE|AFTER", + /* 259 */ "trigger_time ::= INSTEAD OF", + /* 260 */ "trigger_time ::=", + /* 261 */ "trigger_event ::= DELETE|INSERT", + /* 262 */ "trigger_event ::= UPDATE", + /* 263 */ "trigger_event ::= UPDATE OF idlist", + /* 264 */ "when_clause ::=", + /* 265 */ "when_clause ::= WHEN expr", + /* 266 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 267 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 268 */ "trnm ::= nm DOT nm", + /* 269 */ "tridxby ::= INDEXED BY nm", + /* 270 */ "tridxby ::= NOT INDEXED", + /* 271 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", + /* 272 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", + /* 273 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", + /* 274 */ "trigger_cmd ::= scanpt select scanpt", + /* 275 */ "expr ::= RAISE LP IGNORE RP", + /* 276 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 277 */ "raisetype ::= ROLLBACK", + /* 278 */ "raisetype ::= ABORT", + /* 279 */ "raisetype ::= FAIL", + /* 280 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 281 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 282 */ "cmd ::= DETACH database_kw_opt expr", + /* 283 */ "key_opt ::=", + /* 284 */ "key_opt ::= KEY expr", + /* 285 */ "cmd ::= REINDEX", + /* 286 */ "cmd ::= REINDEX nm dbnm", + /* 287 */ "cmd ::= ANALYZE", + /* 288 */ "cmd ::= ANALYZE nm dbnm", + /* 289 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 290 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 291 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", + /* 292 */ "add_column_fullname ::= fullname", + /* 293 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", + /* 294 */ "cmd ::= create_vtab", + /* 295 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 296 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 297 */ "vtabarg ::=", + /* 298 */ "vtabargtoken ::= ANY", + /* 299 */ "vtabargtoken ::= lp anylist RP", + /* 300 */ "lp ::= LP", + /* 301 */ "with ::= WITH wqlist", + /* 302 */ "with ::= WITH RECURSIVE wqlist", + /* 303 */ "wqas ::= AS", + /* 304 */ "wqas ::= AS MATERIALIZED", + /* 305 */ "wqas ::= AS NOT MATERIALIZED", + /* 306 */ "wqitem ::= nm eidlist_opt wqas LP select RP", + /* 307 */ "wqlist ::= wqitem", + /* 308 */ "wqlist ::= wqlist COMMA wqitem", + /* 309 */ "windowdefn_list ::= windowdefn", + /* 310 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 311 */ "windowdefn ::= nm AS LP window RP", + /* 312 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 313 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 314 */ "window ::= ORDER BY sortlist frame_opt", + /* 315 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 316 */ "window ::= frame_opt", + /* 317 */ "window ::= nm frame_opt", + /* 318 */ "frame_opt ::=", + /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 322 */ "frame_bound_s ::= frame_bound", + /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 324 */ "frame_bound_e ::= frame_bound", + /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 327 */ "frame_bound ::= CURRENT ROW", + /* 328 */ "frame_exclude_opt ::=", + /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 330 */ "frame_exclude ::= NO OTHERS", + /* 331 */ "frame_exclude ::= CURRENT ROW", + /* 332 */ "frame_exclude ::= GROUP|TIES", + /* 333 */ "window_clause ::= WINDOW windowdefn_list", + /* 334 */ "filter_over ::= filter_clause over_clause", + /* 335 */ "filter_over ::= over_clause", + /* 336 */ "filter_over ::= filter_clause", + /* 337 */ "over_clause ::= OVER LP window RP", + /* 338 */ "over_clause ::= OVER nm", + /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 340 */ "input ::= cmdlist", + /* 341 */ "cmdlist ::= cmdlist ecmd", + /* 342 */ "cmdlist ::= ecmd", + /* 343 */ "ecmd ::= SEMI", + /* 344 */ "ecmd ::= cmdx SEMI", + /* 345 */ "ecmd ::= explain cmdx SEMI", + /* 346 */ "trans_opt ::=", + /* 347 */ "trans_opt ::= TRANSACTION", + /* 348 */ "trans_opt ::= TRANSACTION nm", + /* 349 */ "savepoint_opt ::= SAVEPOINT", + /* 350 */ "savepoint_opt ::=", + /* 351 */ "cmd ::= create_table create_table_args", + /* 352 */ "table_option_set ::= table_option", + /* 353 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 354 */ "columnlist ::= columnname carglist", + /* 355 */ "nm ::= ID|INDEXED|JOIN_KW", + /* 356 */ "nm ::= STRING", + /* 357 */ "typetoken ::= typename", + /* 358 */ "typename ::= ID|STRING", + /* 359 */ "signed ::= plus_num", + /* 360 */ "signed ::= minus_num", + /* 361 */ "carglist ::= carglist ccons", + /* 362 */ "carglist ::=", + /* 363 */ "ccons ::= NULL onconf", + /* 364 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 365 */ "ccons ::= AS generated", + /* 366 */ "conslist_opt ::= COMMA conslist", + /* 367 */ "conslist ::= conslist tconscomma tcons", + /* 368 */ "conslist ::= tcons", + /* 369 */ "tconscomma ::=", + /* 370 */ "defer_subclause_opt ::= defer_subclause", + /* 371 */ "resolvetype ::= raisetype", + /* 372 */ "selectnowith ::= oneselect", + /* 373 */ "oneselect ::= values", + /* 374 */ "sclp ::= selcollist COMMA", + /* 375 */ "as ::= ID|STRING", + /* 376 */ "indexed_opt ::= indexed_by", + /* 377 */ "returning ::=", + /* 378 */ "expr ::= term", + /* 379 */ "likeop ::= LIKE_KW|MATCH", + /* 380 */ "case_operand ::= expr", + /* 381 */ "exprlist ::= nexprlist", + /* 382 */ "nmnum ::= plus_num", + /* 383 */ "nmnum ::= nm", + /* 384 */ "nmnum ::= ON", + /* 385 */ "nmnum ::= DELETE", + /* 386 */ "nmnum ::= DEFAULT", + /* 387 */ "plus_num ::= INTEGER|FLOAT", + /* 388 */ "foreach_clause ::=", + /* 389 */ "foreach_clause ::= FOR EACH ROW", + /* 390 */ "trnm ::= nm", + /* 391 */ "tridxby ::=", + /* 392 */ "database_kw_opt ::= DATABASE", + /* 393 */ "database_kw_opt ::=", + /* 394 */ "kwcolumn_opt ::=", + /* 395 */ "kwcolumn_opt ::= COLUMNKW", + /* 396 */ "vtabarglist ::= vtabarg", + /* 397 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 398 */ "vtabarg ::= vtabarg vtabargtoken", + /* 399 */ "anylist ::=", + /* 400 */ "anylist ::= anylist LP anylist RP", + /* 401 */ "anylist ::= anylist ANY", + /* 402 */ "with ::=", }; #endif /* NDEBUG */ @@ -169724,233 +170632,231 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 263, /* (175) idlist ::= idlist COMMA nm */ 263, /* (176) idlist ::= nm */ 217, /* (177) expr ::= LP expr RP */ - 217, /* (178) expr ::= ID|INDEXED */ - 217, /* (179) expr ::= JOIN_KW */ - 217, /* (180) expr ::= nm DOT nm */ - 217, /* (181) expr ::= nm DOT nm DOT nm */ - 216, /* (182) term ::= NULL|FLOAT|BLOB */ - 216, /* (183) term ::= STRING */ - 216, /* (184) term ::= INTEGER */ - 217, /* (185) expr ::= VARIABLE */ - 217, /* (186) expr ::= expr COLLATE ID|STRING */ - 217, /* (187) expr ::= CAST LP expr AS typetoken RP */ - 217, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ - 217, /* (189) expr ::= ID|INDEXED LP STAR RP */ - 217, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - 217, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ - 216, /* (192) term ::= CTIME_KW */ - 217, /* (193) expr ::= LP nexprlist COMMA expr RP */ - 217, /* (194) expr ::= expr AND expr */ - 217, /* (195) expr ::= expr OR expr */ - 217, /* (196) expr ::= expr LT|GT|GE|LE expr */ - 217, /* (197) expr ::= expr EQ|NE expr */ - 217, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 217, /* (199) expr ::= expr PLUS|MINUS expr */ - 217, /* (200) expr ::= expr STAR|SLASH|REM expr */ - 217, /* (201) expr ::= expr CONCAT expr */ - 274, /* (202) likeop ::= NOT LIKE_KW|MATCH */ - 217, /* (203) expr ::= expr likeop expr */ - 217, /* (204) expr ::= expr likeop expr ESCAPE expr */ - 217, /* (205) expr ::= expr ISNULL|NOTNULL */ - 217, /* (206) expr ::= expr NOT NULL */ - 217, /* (207) expr ::= expr IS expr */ - 217, /* (208) expr ::= expr IS NOT expr */ - 217, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */ - 217, /* (210) expr ::= expr IS DISTINCT FROM expr */ - 217, /* (211) expr ::= NOT expr */ - 217, /* (212) expr ::= BITNOT expr */ - 217, /* (213) expr ::= PLUS|MINUS expr */ - 217, /* (214) expr ::= expr PTR expr */ - 275, /* (215) between_op ::= BETWEEN */ - 275, /* (216) between_op ::= NOT BETWEEN */ - 217, /* (217) expr ::= expr between_op expr AND expr */ - 276, /* (218) in_op ::= IN */ - 276, /* (219) in_op ::= NOT IN */ - 217, /* (220) expr ::= expr in_op LP exprlist RP */ - 217, /* (221) expr ::= LP select RP */ - 217, /* (222) expr ::= expr in_op LP select RP */ - 217, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */ - 217, /* (224) expr ::= EXISTS LP select RP */ - 217, /* (225) expr ::= CASE case_operand case_exprlist case_else END */ - 279, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 279, /* (227) case_exprlist ::= WHEN expr THEN expr */ - 280, /* (228) case_else ::= ELSE expr */ - 280, /* (229) case_else ::= */ - 278, /* (230) case_operand ::= expr */ - 278, /* (231) case_operand ::= */ - 261, /* (232) exprlist ::= */ - 253, /* (233) nexprlist ::= nexprlist COMMA expr */ - 253, /* (234) nexprlist ::= expr */ - 277, /* (235) paren_exprlist ::= */ - 277, /* (236) paren_exprlist ::= LP exprlist RP */ - 190, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 281, /* (238) uniqueflag ::= UNIQUE */ - 281, /* (239) uniqueflag ::= */ - 221, /* (240) eidlist_opt ::= */ - 221, /* (241) eidlist_opt ::= LP eidlist RP */ - 232, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - 232, /* (243) eidlist ::= nm collate sortorder */ - 282, /* (244) collate ::= */ - 282, /* (245) collate ::= COLLATE ID|STRING */ - 190, /* (246) cmd ::= DROP INDEX ifexists fullname */ - 190, /* (247) cmd ::= VACUUM vinto */ - 190, /* (248) cmd ::= VACUUM nm vinto */ - 283, /* (249) vinto ::= INTO expr */ - 283, /* (250) vinto ::= */ - 190, /* (251) cmd ::= PRAGMA nm dbnm */ - 190, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 190, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 190, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 190, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 211, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - 212, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - 190, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 285, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 287, /* (260) trigger_time ::= BEFORE|AFTER */ - 287, /* (261) trigger_time ::= INSTEAD OF */ - 287, /* (262) trigger_time ::= */ - 288, /* (263) trigger_event ::= DELETE|INSERT */ - 288, /* (264) trigger_event ::= UPDATE */ - 288, /* (265) trigger_event ::= UPDATE OF idlist */ - 290, /* (266) when_clause ::= */ - 290, /* (267) when_clause ::= WHEN expr */ - 286, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 286, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - 292, /* (270) trnm ::= nm DOT nm */ - 293, /* (271) tridxby ::= INDEXED BY nm */ - 293, /* (272) tridxby ::= NOT INDEXED */ - 291, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 291, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 291, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 291, /* (276) trigger_cmd ::= scanpt select scanpt */ - 217, /* (277) expr ::= RAISE LP IGNORE RP */ - 217, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - 236, /* (279) raisetype ::= ROLLBACK */ - 236, /* (280) raisetype ::= ABORT */ - 236, /* (281) raisetype ::= FAIL */ - 190, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - 190, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 190, /* (284) cmd ::= DETACH database_kw_opt expr */ - 295, /* (285) key_opt ::= */ - 295, /* (286) key_opt ::= KEY expr */ - 190, /* (287) cmd ::= REINDEX */ - 190, /* (288) cmd ::= REINDEX nm dbnm */ - 190, /* (289) cmd ::= ANALYZE */ - 190, /* (290) cmd ::= ANALYZE nm dbnm */ - 190, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 190, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 190, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 296, /* (294) add_column_fullname ::= fullname */ - 190, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 190, /* (296) cmd ::= create_vtab */ - 190, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - 298, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 300, /* (299) vtabarg ::= */ - 301, /* (300) vtabargtoken ::= ANY */ - 301, /* (301) vtabargtoken ::= lp anylist RP */ - 302, /* (302) lp ::= LP */ - 266, /* (303) with ::= WITH wqlist */ - 266, /* (304) with ::= WITH RECURSIVE wqlist */ - 305, /* (305) wqas ::= AS */ - 305, /* (306) wqas ::= AS MATERIALIZED */ - 305, /* (307) wqas ::= AS NOT MATERIALIZED */ - 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - 241, /* (309) wqlist ::= wqitem */ - 241, /* (310) wqlist ::= wqlist COMMA wqitem */ - 306, /* (311) windowdefn_list ::= windowdefn */ - 306, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 307, /* (313) windowdefn ::= nm AS LP window RP */ - 308, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (316) window ::= ORDER BY sortlist frame_opt */ - 308, /* (317) window ::= nm ORDER BY sortlist frame_opt */ - 308, /* (318) window ::= frame_opt */ - 308, /* (319) window ::= nm frame_opt */ - 309, /* (320) frame_opt ::= */ - 309, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 309, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 313, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */ - 315, /* (324) frame_bound_s ::= frame_bound */ - 315, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */ - 316, /* (326) frame_bound_e ::= frame_bound */ - 316, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 314, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */ - 314, /* (329) frame_bound ::= CURRENT ROW */ - 317, /* (330) frame_exclude_opt ::= */ - 317, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 318, /* (332) frame_exclude ::= NO OTHERS */ - 318, /* (333) frame_exclude ::= CURRENT ROW */ - 318, /* (334) frame_exclude ::= GROUP|TIES */ - 251, /* (335) window_clause ::= WINDOW windowdefn_list */ - 273, /* (336) filter_over ::= filter_clause over_clause */ - 273, /* (337) filter_over ::= over_clause */ - 273, /* (338) filter_over ::= filter_clause */ - 312, /* (339) over_clause ::= OVER LP window RP */ - 312, /* (340) over_clause ::= OVER nm */ - 311, /* (341) filter_clause ::= FILTER LP WHERE expr RP */ - 185, /* (342) input ::= cmdlist */ - 186, /* (343) cmdlist ::= cmdlist ecmd */ - 186, /* (344) cmdlist ::= ecmd */ - 187, /* (345) ecmd ::= SEMI */ - 187, /* (346) ecmd ::= cmdx SEMI */ - 187, /* (347) ecmd ::= explain cmdx SEMI */ - 192, /* (348) trans_opt ::= */ - 192, /* (349) trans_opt ::= TRANSACTION */ - 192, /* (350) trans_opt ::= TRANSACTION nm */ - 194, /* (351) savepoint_opt ::= SAVEPOINT */ - 194, /* (352) savepoint_opt ::= */ - 190, /* (353) cmd ::= create_table create_table_args */ - 203, /* (354) table_option_set ::= table_option */ - 201, /* (355) columnlist ::= columnlist COMMA columnname carglist */ - 201, /* (356) columnlist ::= columnname carglist */ - 193, /* (357) nm ::= ID|INDEXED */ - 193, /* (358) nm ::= STRING */ - 193, /* (359) nm ::= JOIN_KW */ - 208, /* (360) typetoken ::= typename */ - 209, /* (361) typename ::= ID|STRING */ - 210, /* (362) signed ::= plus_num */ - 210, /* (363) signed ::= minus_num */ - 207, /* (364) carglist ::= carglist ccons */ - 207, /* (365) carglist ::= */ - 215, /* (366) ccons ::= NULL onconf */ - 215, /* (367) ccons ::= GENERATED ALWAYS AS generated */ - 215, /* (368) ccons ::= AS generated */ - 202, /* (369) conslist_opt ::= COMMA conslist */ - 228, /* (370) conslist ::= conslist tconscomma tcons */ - 228, /* (371) conslist ::= tcons */ - 229, /* (372) tconscomma ::= */ - 233, /* (373) defer_subclause_opt ::= defer_subclause */ - 235, /* (374) resolvetype ::= raisetype */ - 239, /* (375) selectnowith ::= oneselect */ - 240, /* (376) oneselect ::= values */ - 254, /* (377) sclp ::= selcollist COMMA */ - 255, /* (378) as ::= ID|STRING */ - 264, /* (379) indexed_opt ::= indexed_by */ - 272, /* (380) returning ::= */ - 217, /* (381) expr ::= term */ - 274, /* (382) likeop ::= LIKE_KW|MATCH */ - 261, /* (383) exprlist ::= nexprlist */ - 284, /* (384) nmnum ::= plus_num */ - 284, /* (385) nmnum ::= nm */ - 284, /* (386) nmnum ::= ON */ - 284, /* (387) nmnum ::= DELETE */ - 284, /* (388) nmnum ::= DEFAULT */ - 211, /* (389) plus_num ::= INTEGER|FLOAT */ - 289, /* (390) foreach_clause ::= */ - 289, /* (391) foreach_clause ::= FOR EACH ROW */ - 292, /* (392) trnm ::= nm */ - 293, /* (393) tridxby ::= */ - 294, /* (394) database_kw_opt ::= DATABASE */ - 294, /* (395) database_kw_opt ::= */ - 297, /* (396) kwcolumn_opt ::= */ - 297, /* (397) kwcolumn_opt ::= COLUMNKW */ - 299, /* (398) vtabarglist ::= vtabarg */ - 299, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ - 300, /* (400) vtabarg ::= vtabarg vtabargtoken */ - 303, /* (401) anylist ::= */ - 303, /* (402) anylist ::= anylist LP anylist RP */ - 303, /* (403) anylist ::= anylist ANY */ - 266, /* (404) with ::= */ + 217, /* (178) expr ::= ID|INDEXED|JOIN_KW */ + 217, /* (179) expr ::= nm DOT nm */ + 217, /* (180) expr ::= nm DOT nm DOT nm */ + 216, /* (181) term ::= NULL|FLOAT|BLOB */ + 216, /* (182) term ::= STRING */ + 216, /* (183) term ::= INTEGER */ + 217, /* (184) expr ::= VARIABLE */ + 217, /* (185) expr ::= expr COLLATE ID|STRING */ + 217, /* (186) expr ::= CAST LP expr AS typetoken RP */ + 217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 216, /* (191) term ::= CTIME_KW */ + 217, /* (192) expr ::= LP nexprlist COMMA expr RP */ + 217, /* (193) expr ::= expr AND expr */ + 217, /* (194) expr ::= expr OR expr */ + 217, /* (195) expr ::= expr LT|GT|GE|LE expr */ + 217, /* (196) expr ::= expr EQ|NE expr */ + 217, /* (197) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 217, /* (198) expr ::= expr PLUS|MINUS expr */ + 217, /* (199) expr ::= expr STAR|SLASH|REM expr */ + 217, /* (200) expr ::= expr CONCAT expr */ + 274, /* (201) likeop ::= NOT LIKE_KW|MATCH */ + 217, /* (202) expr ::= expr likeop expr */ + 217, /* (203) expr ::= expr likeop expr ESCAPE expr */ + 217, /* (204) expr ::= expr ISNULL|NOTNULL */ + 217, /* (205) expr ::= expr NOT NULL */ + 217, /* (206) expr ::= expr IS expr */ + 217, /* (207) expr ::= expr IS NOT expr */ + 217, /* (208) expr ::= expr IS NOT DISTINCT FROM expr */ + 217, /* (209) expr ::= expr IS DISTINCT FROM expr */ + 217, /* (210) expr ::= NOT expr */ + 217, /* (211) expr ::= BITNOT expr */ + 217, /* (212) expr ::= PLUS|MINUS expr */ + 217, /* (213) expr ::= expr PTR expr */ + 275, /* (214) between_op ::= BETWEEN */ + 275, /* (215) between_op ::= NOT BETWEEN */ + 217, /* (216) expr ::= expr between_op expr AND expr */ + 276, /* (217) in_op ::= IN */ + 276, /* (218) in_op ::= NOT IN */ + 217, /* (219) expr ::= expr in_op LP exprlist RP */ + 217, /* (220) expr ::= LP select RP */ + 217, /* (221) expr ::= expr in_op LP select RP */ + 217, /* (222) expr ::= expr in_op nm dbnm paren_exprlist */ + 217, /* (223) expr ::= EXISTS LP select RP */ + 217, /* (224) expr ::= CASE case_operand case_exprlist case_else END */ + 279, /* (225) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 279, /* (226) case_exprlist ::= WHEN expr THEN expr */ + 280, /* (227) case_else ::= ELSE expr */ + 280, /* (228) case_else ::= */ + 278, /* (229) case_operand ::= */ + 261, /* (230) exprlist ::= */ + 253, /* (231) nexprlist ::= nexprlist COMMA expr */ + 253, /* (232) nexprlist ::= expr */ + 277, /* (233) paren_exprlist ::= */ + 277, /* (234) paren_exprlist ::= LP exprlist RP */ + 190, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 281, /* (236) uniqueflag ::= UNIQUE */ + 281, /* (237) uniqueflag ::= */ + 221, /* (238) eidlist_opt ::= */ + 221, /* (239) eidlist_opt ::= LP eidlist RP */ + 232, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */ + 232, /* (241) eidlist ::= nm collate sortorder */ + 282, /* (242) collate ::= */ + 282, /* (243) collate ::= COLLATE ID|STRING */ + 190, /* (244) cmd ::= DROP INDEX ifexists fullname */ + 190, /* (245) cmd ::= VACUUM vinto */ + 190, /* (246) cmd ::= VACUUM nm vinto */ + 283, /* (247) vinto ::= INTO expr */ + 283, /* (248) vinto ::= */ + 190, /* (249) cmd ::= PRAGMA nm dbnm */ + 190, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 190, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 190, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 190, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 211, /* (254) plus_num ::= PLUS INTEGER|FLOAT */ + 212, /* (255) minus_num ::= MINUS INTEGER|FLOAT */ + 190, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 285, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 287, /* (258) trigger_time ::= BEFORE|AFTER */ + 287, /* (259) trigger_time ::= INSTEAD OF */ + 287, /* (260) trigger_time ::= */ + 288, /* (261) trigger_event ::= DELETE|INSERT */ + 288, /* (262) trigger_event ::= UPDATE */ + 288, /* (263) trigger_event ::= UPDATE OF idlist */ + 290, /* (264) when_clause ::= */ + 290, /* (265) when_clause ::= WHEN expr */ + 286, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 286, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */ + 292, /* (268) trnm ::= nm DOT nm */ + 293, /* (269) tridxby ::= INDEXED BY nm */ + 293, /* (270) tridxby ::= NOT INDEXED */ + 291, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 291, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 291, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 291, /* (274) trigger_cmd ::= scanpt select scanpt */ + 217, /* (275) expr ::= RAISE LP IGNORE RP */ + 217, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */ + 236, /* (277) raisetype ::= ROLLBACK */ + 236, /* (278) raisetype ::= ABORT */ + 236, /* (279) raisetype ::= FAIL */ + 190, /* (280) cmd ::= DROP TRIGGER ifexists fullname */ + 190, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 190, /* (282) cmd ::= DETACH database_kw_opt expr */ + 295, /* (283) key_opt ::= */ + 295, /* (284) key_opt ::= KEY expr */ + 190, /* (285) cmd ::= REINDEX */ + 190, /* (286) cmd ::= REINDEX nm dbnm */ + 190, /* (287) cmd ::= ANALYZE */ + 190, /* (288) cmd ::= ANALYZE nm dbnm */ + 190, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 190, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 190, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 296, /* (292) add_column_fullname ::= fullname */ + 190, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 190, /* (294) cmd ::= create_vtab */ + 190, /* (295) cmd ::= create_vtab LP vtabarglist RP */ + 298, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 300, /* (297) vtabarg ::= */ + 301, /* (298) vtabargtoken ::= ANY */ + 301, /* (299) vtabargtoken ::= lp anylist RP */ + 302, /* (300) lp ::= LP */ + 266, /* (301) with ::= WITH wqlist */ + 266, /* (302) with ::= WITH RECURSIVE wqlist */ + 305, /* (303) wqas ::= AS */ + 305, /* (304) wqas ::= AS MATERIALIZED */ + 305, /* (305) wqas ::= AS NOT MATERIALIZED */ + 304, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */ + 241, /* (307) wqlist ::= wqitem */ + 241, /* (308) wqlist ::= wqlist COMMA wqitem */ + 306, /* (309) windowdefn_list ::= windowdefn */ + 306, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 307, /* (311) windowdefn ::= nm AS LP window RP */ + 308, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (314) window ::= ORDER BY sortlist frame_opt */ + 308, /* (315) window ::= nm ORDER BY sortlist frame_opt */ + 308, /* (316) window ::= frame_opt */ + 308, /* (317) window ::= nm frame_opt */ + 309, /* (318) frame_opt ::= */ + 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + 315, /* (322) frame_bound_s ::= frame_bound */ + 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + 316, /* (324) frame_bound_e ::= frame_bound */ + 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + 314, /* (327) frame_bound ::= CURRENT ROW */ + 317, /* (328) frame_exclude_opt ::= */ + 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 318, /* (330) frame_exclude ::= NO OTHERS */ + 318, /* (331) frame_exclude ::= CURRENT ROW */ + 318, /* (332) frame_exclude ::= GROUP|TIES */ + 251, /* (333) window_clause ::= WINDOW windowdefn_list */ + 273, /* (334) filter_over ::= filter_clause over_clause */ + 273, /* (335) filter_over ::= over_clause */ + 273, /* (336) filter_over ::= filter_clause */ + 312, /* (337) over_clause ::= OVER LP window RP */ + 312, /* (338) over_clause ::= OVER nm */ + 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + 185, /* (340) input ::= cmdlist */ + 186, /* (341) cmdlist ::= cmdlist ecmd */ + 186, /* (342) cmdlist ::= ecmd */ + 187, /* (343) ecmd ::= SEMI */ + 187, /* (344) ecmd ::= cmdx SEMI */ + 187, /* (345) ecmd ::= explain cmdx SEMI */ + 192, /* (346) trans_opt ::= */ + 192, /* (347) trans_opt ::= TRANSACTION */ + 192, /* (348) trans_opt ::= TRANSACTION nm */ + 194, /* (349) savepoint_opt ::= SAVEPOINT */ + 194, /* (350) savepoint_opt ::= */ + 190, /* (351) cmd ::= create_table create_table_args */ + 203, /* (352) table_option_set ::= table_option */ + 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + 201, /* (354) columnlist ::= columnname carglist */ + 193, /* (355) nm ::= ID|INDEXED|JOIN_KW */ + 193, /* (356) nm ::= STRING */ + 208, /* (357) typetoken ::= typename */ + 209, /* (358) typename ::= ID|STRING */ + 210, /* (359) signed ::= plus_num */ + 210, /* (360) signed ::= minus_num */ + 207, /* (361) carglist ::= carglist ccons */ + 207, /* (362) carglist ::= */ + 215, /* (363) ccons ::= NULL onconf */ + 215, /* (364) ccons ::= GENERATED ALWAYS AS generated */ + 215, /* (365) ccons ::= AS generated */ + 202, /* (366) conslist_opt ::= COMMA conslist */ + 228, /* (367) conslist ::= conslist tconscomma tcons */ + 228, /* (368) conslist ::= tcons */ + 229, /* (369) tconscomma ::= */ + 233, /* (370) defer_subclause_opt ::= defer_subclause */ + 235, /* (371) resolvetype ::= raisetype */ + 239, /* (372) selectnowith ::= oneselect */ + 240, /* (373) oneselect ::= values */ + 254, /* (374) sclp ::= selcollist COMMA */ + 255, /* (375) as ::= ID|STRING */ + 264, /* (376) indexed_opt ::= indexed_by */ + 272, /* (377) returning ::= */ + 217, /* (378) expr ::= term */ + 274, /* (379) likeop ::= LIKE_KW|MATCH */ + 278, /* (380) case_operand ::= expr */ + 261, /* (381) exprlist ::= nexprlist */ + 284, /* (382) nmnum ::= plus_num */ + 284, /* (383) nmnum ::= nm */ + 284, /* (384) nmnum ::= ON */ + 284, /* (385) nmnum ::= DELETE */ + 284, /* (386) nmnum ::= DEFAULT */ + 211, /* (387) plus_num ::= INTEGER|FLOAT */ + 289, /* (388) foreach_clause ::= */ + 289, /* (389) foreach_clause ::= FOR EACH ROW */ + 292, /* (390) trnm ::= nm */ + 293, /* (391) tridxby ::= */ + 294, /* (392) database_kw_opt ::= DATABASE */ + 294, /* (393) database_kw_opt ::= */ + 297, /* (394) kwcolumn_opt ::= */ + 297, /* (395) kwcolumn_opt ::= COLUMNKW */ + 299, /* (396) vtabarglist ::= vtabarg */ + 299, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ + 300, /* (398) vtabarg ::= vtabarg vtabargtoken */ + 303, /* (399) anylist ::= */ + 303, /* (400) anylist ::= anylist LP anylist RP */ + 303, /* (401) anylist ::= anylist ANY */ + 266, /* (402) with ::= */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -170134,233 +171040,231 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (175) idlist ::= idlist COMMA nm */ -1, /* (176) idlist ::= nm */ -3, /* (177) expr ::= LP expr RP */ - -1, /* (178) expr ::= ID|INDEXED */ - -1, /* (179) expr ::= JOIN_KW */ - -3, /* (180) expr ::= nm DOT nm */ - -5, /* (181) expr ::= nm DOT nm DOT nm */ - -1, /* (182) term ::= NULL|FLOAT|BLOB */ - -1, /* (183) term ::= STRING */ - -1, /* (184) term ::= INTEGER */ - -1, /* (185) expr ::= VARIABLE */ - -3, /* (186) expr ::= expr COLLATE ID|STRING */ - -6, /* (187) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ - -4, /* (189) expr ::= ID|INDEXED LP STAR RP */ - -6, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - -5, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ - -1, /* (192) term ::= CTIME_KW */ - -5, /* (193) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (194) expr ::= expr AND expr */ - -3, /* (195) expr ::= expr OR expr */ - -3, /* (196) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (197) expr ::= expr EQ|NE expr */ - -3, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (199) expr ::= expr PLUS|MINUS expr */ - -3, /* (200) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (201) expr ::= expr CONCAT expr */ - -2, /* (202) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (203) expr ::= expr likeop expr */ - -5, /* (204) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (205) expr ::= expr ISNULL|NOTNULL */ - -3, /* (206) expr ::= expr NOT NULL */ - -3, /* (207) expr ::= expr IS expr */ - -4, /* (208) expr ::= expr IS NOT expr */ - -6, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */ - -5, /* (210) expr ::= expr IS DISTINCT FROM expr */ - -2, /* (211) expr ::= NOT expr */ - -2, /* (212) expr ::= BITNOT expr */ - -2, /* (213) expr ::= PLUS|MINUS expr */ - -3, /* (214) expr ::= expr PTR expr */ - -1, /* (215) between_op ::= BETWEEN */ - -2, /* (216) between_op ::= NOT BETWEEN */ - -5, /* (217) expr ::= expr between_op expr AND expr */ - -1, /* (218) in_op ::= IN */ - -2, /* (219) in_op ::= NOT IN */ - -5, /* (220) expr ::= expr in_op LP exprlist RP */ - -3, /* (221) expr ::= LP select RP */ - -5, /* (222) expr ::= expr in_op LP select RP */ - -5, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (224) expr ::= EXISTS LP select RP */ - -5, /* (225) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (227) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (228) case_else ::= ELSE expr */ - 0, /* (229) case_else ::= */ - -1, /* (230) case_operand ::= expr */ - 0, /* (231) case_operand ::= */ - 0, /* (232) exprlist ::= */ - -3, /* (233) nexprlist ::= nexprlist COMMA expr */ - -1, /* (234) nexprlist ::= expr */ - 0, /* (235) paren_exprlist ::= */ - -3, /* (236) paren_exprlist ::= LP exprlist RP */ - -12, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - -1, /* (238) uniqueflag ::= UNIQUE */ - 0, /* (239) uniqueflag ::= */ - 0, /* (240) eidlist_opt ::= */ - -3, /* (241) eidlist_opt ::= LP eidlist RP */ - -5, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - -3, /* (243) eidlist ::= nm collate sortorder */ - 0, /* (244) collate ::= */ - -2, /* (245) collate ::= COLLATE ID|STRING */ - -4, /* (246) cmd ::= DROP INDEX ifexists fullname */ - -2, /* (247) cmd ::= VACUUM vinto */ - -3, /* (248) cmd ::= VACUUM nm vinto */ - -2, /* (249) vinto ::= INTO expr */ - 0, /* (250) vinto ::= */ - -3, /* (251) cmd ::= PRAGMA nm dbnm */ - -5, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - -6, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - -5, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - -6, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - -2, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - -2, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - -5, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - -11, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - -1, /* (260) trigger_time ::= BEFORE|AFTER */ - -2, /* (261) trigger_time ::= INSTEAD OF */ - 0, /* (262) trigger_time ::= */ - -1, /* (263) trigger_event ::= DELETE|INSERT */ - -1, /* (264) trigger_event ::= UPDATE */ - -3, /* (265) trigger_event ::= UPDATE OF idlist */ - 0, /* (266) when_clause ::= */ - -2, /* (267) when_clause ::= WHEN expr */ - -3, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - -2, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - -3, /* (270) trnm ::= nm DOT nm */ - -3, /* (271) tridxby ::= INDEXED BY nm */ - -2, /* (272) tridxby ::= NOT INDEXED */ - -9, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - -8, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - -6, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - -3, /* (276) trigger_cmd ::= scanpt select scanpt */ - -4, /* (277) expr ::= RAISE LP IGNORE RP */ - -6, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - -1, /* (279) raisetype ::= ROLLBACK */ - -1, /* (280) raisetype ::= ABORT */ - -1, /* (281) raisetype ::= FAIL */ - -4, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - -6, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - -3, /* (284) cmd ::= DETACH database_kw_opt expr */ - 0, /* (285) key_opt ::= */ - -2, /* (286) key_opt ::= KEY expr */ - -1, /* (287) cmd ::= REINDEX */ - -3, /* (288) cmd ::= REINDEX nm dbnm */ - -1, /* (289) cmd ::= ANALYZE */ - -3, /* (290) cmd ::= ANALYZE nm dbnm */ - -6, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - -7, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - -6, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - -1, /* (294) add_column_fullname ::= fullname */ - -8, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - -1, /* (296) cmd ::= create_vtab */ - -4, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - -8, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 0, /* (299) vtabarg ::= */ - -1, /* (300) vtabargtoken ::= ANY */ - -3, /* (301) vtabargtoken ::= lp anylist RP */ - -1, /* (302) lp ::= LP */ - -2, /* (303) with ::= WITH wqlist */ - -3, /* (304) with ::= WITH RECURSIVE wqlist */ - -1, /* (305) wqas ::= AS */ - -2, /* (306) wqas ::= AS MATERIALIZED */ - -3, /* (307) wqas ::= AS NOT MATERIALIZED */ - -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - -1, /* (309) wqlist ::= wqitem */ - -3, /* (310) wqlist ::= wqlist COMMA wqitem */ - -1, /* (311) windowdefn_list ::= windowdefn */ - -3, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (313) windowdefn ::= nm AS LP window RP */ - -5, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (316) window ::= ORDER BY sortlist frame_opt */ - -5, /* (317) window ::= nm ORDER BY sortlist frame_opt */ - -1, /* (318) window ::= frame_opt */ - -2, /* (319) window ::= nm frame_opt */ - 0, /* (320) frame_opt ::= */ - -3, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (324) frame_bound_s ::= frame_bound */ - -2, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (326) frame_bound_e ::= frame_bound */ - -2, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (329) frame_bound ::= CURRENT ROW */ - 0, /* (330) frame_exclude_opt ::= */ - -2, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (332) frame_exclude ::= NO OTHERS */ - -2, /* (333) frame_exclude ::= CURRENT ROW */ - -1, /* (334) frame_exclude ::= GROUP|TIES */ - -2, /* (335) window_clause ::= WINDOW windowdefn_list */ - -2, /* (336) filter_over ::= filter_clause over_clause */ - -1, /* (337) filter_over ::= over_clause */ - -1, /* (338) filter_over ::= filter_clause */ - -4, /* (339) over_clause ::= OVER LP window RP */ - -2, /* (340) over_clause ::= OVER nm */ - -5, /* (341) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (342) input ::= cmdlist */ - -2, /* (343) cmdlist ::= cmdlist ecmd */ - -1, /* (344) cmdlist ::= ecmd */ - -1, /* (345) ecmd ::= SEMI */ - -2, /* (346) ecmd ::= cmdx SEMI */ - -3, /* (347) ecmd ::= explain cmdx SEMI */ - 0, /* (348) trans_opt ::= */ - -1, /* (349) trans_opt ::= TRANSACTION */ - -2, /* (350) trans_opt ::= TRANSACTION nm */ - -1, /* (351) savepoint_opt ::= SAVEPOINT */ - 0, /* (352) savepoint_opt ::= */ - -2, /* (353) cmd ::= create_table create_table_args */ - -1, /* (354) table_option_set ::= table_option */ - -4, /* (355) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (356) columnlist ::= columnname carglist */ - -1, /* (357) nm ::= ID|INDEXED */ - -1, /* (358) nm ::= STRING */ - -1, /* (359) nm ::= JOIN_KW */ - -1, /* (360) typetoken ::= typename */ - -1, /* (361) typename ::= ID|STRING */ - -1, /* (362) signed ::= plus_num */ - -1, /* (363) signed ::= minus_num */ - -2, /* (364) carglist ::= carglist ccons */ - 0, /* (365) carglist ::= */ - -2, /* (366) ccons ::= NULL onconf */ - -4, /* (367) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (368) ccons ::= AS generated */ - -2, /* (369) conslist_opt ::= COMMA conslist */ - -3, /* (370) conslist ::= conslist tconscomma tcons */ - -1, /* (371) conslist ::= tcons */ - 0, /* (372) tconscomma ::= */ - -1, /* (373) defer_subclause_opt ::= defer_subclause */ - -1, /* (374) resolvetype ::= raisetype */ - -1, /* (375) selectnowith ::= oneselect */ - -1, /* (376) oneselect ::= values */ - -2, /* (377) sclp ::= selcollist COMMA */ - -1, /* (378) as ::= ID|STRING */ - -1, /* (379) indexed_opt ::= indexed_by */ - 0, /* (380) returning ::= */ - -1, /* (381) expr ::= term */ - -1, /* (382) likeop ::= LIKE_KW|MATCH */ - -1, /* (383) exprlist ::= nexprlist */ - -1, /* (384) nmnum ::= plus_num */ - -1, /* (385) nmnum ::= nm */ - -1, /* (386) nmnum ::= ON */ - -1, /* (387) nmnum ::= DELETE */ - -1, /* (388) nmnum ::= DEFAULT */ - -1, /* (389) plus_num ::= INTEGER|FLOAT */ - 0, /* (390) foreach_clause ::= */ - -3, /* (391) foreach_clause ::= FOR EACH ROW */ - -1, /* (392) trnm ::= nm */ - 0, /* (393) tridxby ::= */ - -1, /* (394) database_kw_opt ::= DATABASE */ - 0, /* (395) database_kw_opt ::= */ - 0, /* (396) kwcolumn_opt ::= */ - -1, /* (397) kwcolumn_opt ::= COLUMNKW */ - -1, /* (398) vtabarglist ::= vtabarg */ - -3, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (400) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (401) anylist ::= */ - -4, /* (402) anylist ::= anylist LP anylist RP */ - -2, /* (403) anylist ::= anylist ANY */ - 0, /* (404) with ::= */ + -1, /* (178) expr ::= ID|INDEXED|JOIN_KW */ + -3, /* (179) expr ::= nm DOT nm */ + -5, /* (180) expr ::= nm DOT nm DOT nm */ + -1, /* (181) term ::= NULL|FLOAT|BLOB */ + -1, /* (182) term ::= STRING */ + -1, /* (183) term ::= INTEGER */ + -1, /* (184) expr ::= VARIABLE */ + -3, /* (185) expr ::= expr COLLATE ID|STRING */ + -6, /* (186) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + -4, /* (188) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + -6, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + -5, /* (190) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + -1, /* (191) term ::= CTIME_KW */ + -5, /* (192) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (193) expr ::= expr AND expr */ + -3, /* (194) expr ::= expr OR expr */ + -3, /* (195) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (196) expr ::= expr EQ|NE expr */ + -3, /* (197) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (198) expr ::= expr PLUS|MINUS expr */ + -3, /* (199) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (200) expr ::= expr CONCAT expr */ + -2, /* (201) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (202) expr ::= expr likeop expr */ + -5, /* (203) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (204) expr ::= expr ISNULL|NOTNULL */ + -3, /* (205) expr ::= expr NOT NULL */ + -3, /* (206) expr ::= expr IS expr */ + -4, /* (207) expr ::= expr IS NOT expr */ + -6, /* (208) expr ::= expr IS NOT DISTINCT FROM expr */ + -5, /* (209) expr ::= expr IS DISTINCT FROM expr */ + -2, /* (210) expr ::= NOT expr */ + -2, /* (211) expr ::= BITNOT expr */ + -2, /* (212) expr ::= PLUS|MINUS expr */ + -3, /* (213) expr ::= expr PTR expr */ + -1, /* (214) between_op ::= BETWEEN */ + -2, /* (215) between_op ::= NOT BETWEEN */ + -5, /* (216) expr ::= expr between_op expr AND expr */ + -1, /* (217) in_op ::= IN */ + -2, /* (218) in_op ::= NOT IN */ + -5, /* (219) expr ::= expr in_op LP exprlist RP */ + -3, /* (220) expr ::= LP select RP */ + -5, /* (221) expr ::= expr in_op LP select RP */ + -5, /* (222) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (223) expr ::= EXISTS LP select RP */ + -5, /* (224) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (225) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (226) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (227) case_else ::= ELSE expr */ + 0, /* (228) case_else ::= */ + 0, /* (229) case_operand ::= */ + 0, /* (230) exprlist ::= */ + -3, /* (231) nexprlist ::= nexprlist COMMA expr */ + -1, /* (232) nexprlist ::= expr */ + 0, /* (233) paren_exprlist ::= */ + -3, /* (234) paren_exprlist ::= LP exprlist RP */ + -12, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + -1, /* (236) uniqueflag ::= UNIQUE */ + 0, /* (237) uniqueflag ::= */ + 0, /* (238) eidlist_opt ::= */ + -3, /* (239) eidlist_opt ::= LP eidlist RP */ + -5, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */ + -3, /* (241) eidlist ::= nm collate sortorder */ + 0, /* (242) collate ::= */ + -2, /* (243) collate ::= COLLATE ID|STRING */ + -4, /* (244) cmd ::= DROP INDEX ifexists fullname */ + -2, /* (245) cmd ::= VACUUM vinto */ + -3, /* (246) cmd ::= VACUUM nm vinto */ + -2, /* (247) vinto ::= INTO expr */ + 0, /* (248) vinto ::= */ + -3, /* (249) cmd ::= PRAGMA nm dbnm */ + -5, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */ + -6, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + -5, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */ + -6, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + -2, /* (254) plus_num ::= PLUS INTEGER|FLOAT */ + -2, /* (255) minus_num ::= MINUS INTEGER|FLOAT */ + -5, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + -11, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + -1, /* (258) trigger_time ::= BEFORE|AFTER */ + -2, /* (259) trigger_time ::= INSTEAD OF */ + 0, /* (260) trigger_time ::= */ + -1, /* (261) trigger_event ::= DELETE|INSERT */ + -1, /* (262) trigger_event ::= UPDATE */ + -3, /* (263) trigger_event ::= UPDATE OF idlist */ + 0, /* (264) when_clause ::= */ + -2, /* (265) when_clause ::= WHEN expr */ + -3, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + -2, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */ + -3, /* (268) trnm ::= nm DOT nm */ + -3, /* (269) tridxby ::= INDEXED BY nm */ + -2, /* (270) tridxby ::= NOT INDEXED */ + -9, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + -8, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + -6, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + -3, /* (274) trigger_cmd ::= scanpt select scanpt */ + -4, /* (275) expr ::= RAISE LP IGNORE RP */ + -6, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */ + -1, /* (277) raisetype ::= ROLLBACK */ + -1, /* (278) raisetype ::= ABORT */ + -1, /* (279) raisetype ::= FAIL */ + -4, /* (280) cmd ::= DROP TRIGGER ifexists fullname */ + -6, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + -3, /* (282) cmd ::= DETACH database_kw_opt expr */ + 0, /* (283) key_opt ::= */ + -2, /* (284) key_opt ::= KEY expr */ + -1, /* (285) cmd ::= REINDEX */ + -3, /* (286) cmd ::= REINDEX nm dbnm */ + -1, /* (287) cmd ::= ANALYZE */ + -3, /* (288) cmd ::= ANALYZE nm dbnm */ + -6, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */ + -7, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + -6, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + -1, /* (292) add_column_fullname ::= fullname */ + -8, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + -1, /* (294) cmd ::= create_vtab */ + -4, /* (295) cmd ::= create_vtab LP vtabarglist RP */ + -8, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 0, /* (297) vtabarg ::= */ + -1, /* (298) vtabargtoken ::= ANY */ + -3, /* (299) vtabargtoken ::= lp anylist RP */ + -1, /* (300) lp ::= LP */ + -2, /* (301) with ::= WITH wqlist */ + -3, /* (302) with ::= WITH RECURSIVE wqlist */ + -1, /* (303) wqas ::= AS */ + -2, /* (304) wqas ::= AS MATERIALIZED */ + -3, /* (305) wqas ::= AS NOT MATERIALIZED */ + -6, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */ + -1, /* (307) wqlist ::= wqitem */ + -3, /* (308) wqlist ::= wqlist COMMA wqitem */ + -1, /* (309) windowdefn_list ::= windowdefn */ + -3, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (311) windowdefn ::= nm AS LP window RP */ + -5, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (314) window ::= ORDER BY sortlist frame_opt */ + -5, /* (315) window ::= nm ORDER BY sortlist frame_opt */ + -1, /* (316) window ::= frame_opt */ + -2, /* (317) window ::= nm frame_opt */ + 0, /* (318) frame_opt ::= */ + -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (322) frame_bound_s ::= frame_bound */ + -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (324) frame_bound_e ::= frame_bound */ + -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (327) frame_bound ::= CURRENT ROW */ + 0, /* (328) frame_exclude_opt ::= */ + -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (330) frame_exclude ::= NO OTHERS */ + -2, /* (331) frame_exclude ::= CURRENT ROW */ + -1, /* (332) frame_exclude ::= GROUP|TIES */ + -2, /* (333) window_clause ::= WINDOW windowdefn_list */ + -2, /* (334) filter_over ::= filter_clause over_clause */ + -1, /* (335) filter_over ::= over_clause */ + -1, /* (336) filter_over ::= filter_clause */ + -4, /* (337) over_clause ::= OVER LP window RP */ + -2, /* (338) over_clause ::= OVER nm */ + -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (340) input ::= cmdlist */ + -2, /* (341) cmdlist ::= cmdlist ecmd */ + -1, /* (342) cmdlist ::= ecmd */ + -1, /* (343) ecmd ::= SEMI */ + -2, /* (344) ecmd ::= cmdx SEMI */ + -3, /* (345) ecmd ::= explain cmdx SEMI */ + 0, /* (346) trans_opt ::= */ + -1, /* (347) trans_opt ::= TRANSACTION */ + -2, /* (348) trans_opt ::= TRANSACTION nm */ + -1, /* (349) savepoint_opt ::= SAVEPOINT */ + 0, /* (350) savepoint_opt ::= */ + -2, /* (351) cmd ::= create_table create_table_args */ + -1, /* (352) table_option_set ::= table_option */ + -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (354) columnlist ::= columnname carglist */ + -1, /* (355) nm ::= ID|INDEXED|JOIN_KW */ + -1, /* (356) nm ::= STRING */ + -1, /* (357) typetoken ::= typename */ + -1, /* (358) typename ::= ID|STRING */ + -1, /* (359) signed ::= plus_num */ + -1, /* (360) signed ::= minus_num */ + -2, /* (361) carglist ::= carglist ccons */ + 0, /* (362) carglist ::= */ + -2, /* (363) ccons ::= NULL onconf */ + -4, /* (364) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (365) ccons ::= AS generated */ + -2, /* (366) conslist_opt ::= COMMA conslist */ + -3, /* (367) conslist ::= conslist tconscomma tcons */ + -1, /* (368) conslist ::= tcons */ + 0, /* (369) tconscomma ::= */ + -1, /* (370) defer_subclause_opt ::= defer_subclause */ + -1, /* (371) resolvetype ::= raisetype */ + -1, /* (372) selectnowith ::= oneselect */ + -1, /* (373) oneselect ::= values */ + -2, /* (374) sclp ::= selcollist COMMA */ + -1, /* (375) as ::= ID|STRING */ + -1, /* (376) indexed_opt ::= indexed_by */ + 0, /* (377) returning ::= */ + -1, /* (378) expr ::= term */ + -1, /* (379) likeop ::= LIKE_KW|MATCH */ + -1, /* (380) case_operand ::= expr */ + -1, /* (381) exprlist ::= nexprlist */ + -1, /* (382) nmnum ::= plus_num */ + -1, /* (383) nmnum ::= nm */ + -1, /* (384) nmnum ::= ON */ + -1, /* (385) nmnum ::= DELETE */ + -1, /* (386) nmnum ::= DEFAULT */ + -1, /* (387) plus_num ::= INTEGER|FLOAT */ + 0, /* (388) foreach_clause ::= */ + -3, /* (389) foreach_clause ::= FOR EACH ROW */ + -1, /* (390) trnm ::= nm */ + 0, /* (391) tridxby ::= */ + -1, /* (392) database_kw_opt ::= DATABASE */ + 0, /* (393) database_kw_opt ::= */ + 0, /* (394) kwcolumn_opt ::= */ + -1, /* (395) kwcolumn_opt ::= COLUMNKW */ + -1, /* (396) vtabarglist ::= vtabarg */ + -3, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (398) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (399) anylist ::= */ + -4, /* (400) anylist ::= anylist LP anylist RP */ + -2, /* (401) anylist ::= anylist ANY */ + 0, /* (402) with ::= */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -170420,7 +171324,7 @@ static YYACTIONTYPE yy_reduce( case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 323: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==323); + case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321); {yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ @@ -170457,7 +171361,7 @@ static YYACTIONTYPE yy_reduce( case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72); case 81: /* ifexists ::= */ yytestcase(yyruleno==81); case 98: /* distinct ::= */ yytestcase(yyruleno==98); - case 244: /* collate ::= */ yytestcase(yyruleno==244); + case 242: /* collate ::= */ yytestcase(yyruleno==242); {yymsp[1].minor.yy394 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ @@ -170641,9 +171545,9 @@ static YYACTIONTYPE yy_reduce( break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); - case 216: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==216); - case 219: /* in_op ::= NOT IN */ yytestcase(yyruleno==219); - case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245); + case 215: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==215); + case 218: /* in_op ::= NOT IN */ yytestcase(yyruleno==218); + case 243: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==243); {yymsp[-1].minor.yy394 = 1;} break; case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ @@ -170793,9 +171697,9 @@ static YYACTIONTYPE yy_reduce( case 99: /* sclp ::= */ case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132); case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142); - case 232: /* exprlist ::= */ yytestcase(yyruleno==232); - case 235: /* paren_exprlist ::= */ yytestcase(yyruleno==235); - case 240: /* eidlist_opt ::= */ yytestcase(yyruleno==240); + case 230: /* exprlist ::= */ yytestcase(yyruleno==230); + case 233: /* paren_exprlist ::= */ yytestcase(yyruleno==233); + case 238: /* eidlist_opt ::= */ yytestcase(yyruleno==238); {yymsp[1].minor.yy322 = 0;} break; case 100: /* selcollist ::= sclp scanpt expr scanpt as */ @@ -170821,8 +171725,8 @@ static YYACTIONTYPE yy_reduce( break; case 103: /* as ::= AS nm */ case 115: /* dbnm ::= DOT nm */ yytestcase(yyruleno==115); - case 256: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==256); - case 257: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==257); + case 254: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==254); + case 255: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==255); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; case 105: /* from ::= */ @@ -170866,7 +171770,7 @@ static YYACTIONTYPE yy_reduce( { if( yymsp[-5].minor.yy131==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy561.pOn==0 && yymsp[0].minor.yy561.pUsing==0 ){ yymsp[-5].minor.yy131 = yymsp[-3].minor.yy131; - }else if( yymsp[-3].minor.yy131->nSrc==1 ){ + }else if( ALWAYS(yymsp[-3].minor.yy131!=0) && yymsp[-3].minor.yy131->nSrc==1 ){ yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); if( yymsp[-5].minor.yy131 ){ SrcItem *pNew = &yymsp[-5].minor.yy131->a[yymsp[-5].minor.yy131->nSrc-1]; @@ -170994,16 +171898,16 @@ static YYACTIONTYPE yy_reduce( case 146: /* limit_opt ::= */ yytestcase(yyruleno==146); case 151: /* where_opt ::= */ yytestcase(yyruleno==151); case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153); - case 229: /* case_else ::= */ yytestcase(yyruleno==229); - case 231: /* case_operand ::= */ yytestcase(yyruleno==231); - case 250: /* vinto ::= */ yytestcase(yyruleno==250); + case 228: /* case_else ::= */ yytestcase(yyruleno==228); + case 229: /* case_operand ::= */ yytestcase(yyruleno==229); + case 248: /* vinto ::= */ yytestcase(yyruleno==248); {yymsp[1].minor.yy528 = 0;} break; case 145: /* having_opt ::= HAVING expr */ case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152); case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154); - case 228: /* case_else ::= ELSE expr */ yytestcase(yyruleno==228); - case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249); + case 227: /* case_else ::= ELSE expr */ yytestcase(yyruleno==227); + case 247: /* vinto ::= INTO expr */ yytestcase(yyruleno==247); {yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;} break; case 147: /* limit_opt ::= LIMIT expr */ @@ -171129,11 +172033,10 @@ static YYACTIONTYPE yy_reduce( case 177: /* expr ::= LP expr RP */ {yymsp[-2].minor.yy528 = yymsp[-1].minor.yy528;} break; - case 178: /* expr ::= ID|INDEXED */ - case 179: /* expr ::= JOIN_KW */ yytestcase(yyruleno==179); + case 178: /* expr ::= ID|INDEXED|JOIN_KW */ {yymsp[0].minor.yy528=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 180: /* expr ::= nm DOT nm */ + case 179: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); @@ -171141,7 +172044,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy528 = yylhsminor.yy528; break; - case 181: /* expr ::= nm DOT nm DOT nm */ + case 180: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-4].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); @@ -171154,18 +172057,18 @@ static YYACTIONTYPE yy_reduce( } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 182: /* term ::= NULL|FLOAT|BLOB */ - case 183: /* term ::= STRING */ yytestcase(yyruleno==183); + case 181: /* term ::= NULL|FLOAT|BLOB */ + case 182: /* term ::= STRING */ yytestcase(yyruleno==182); {yymsp[0].minor.yy528=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 184: /* term ::= INTEGER */ + case 183: /* term ::= INTEGER */ { yylhsminor.yy528 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); if( yylhsminor.yy528 ) yylhsminor.yy528->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 185: /* expr ::= VARIABLE */ + case 184: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; @@ -171187,50 +172090,50 @@ static YYACTIONTYPE yy_reduce( } } break; - case 186: /* expr ::= expr COLLATE ID|STRING */ + case 185: /* expr ::= expr COLLATE ID|STRING */ { yymsp[-2].minor.yy528 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy528, &yymsp[0].minor.yy0, 1); } break; - case 187: /* expr ::= CAST LP expr AS typetoken RP */ + case 186: /* expr ::= CAST LP expr AS typetoken RP */ { yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy528, yymsp[-3].minor.yy528, 0); } break; - case 188: /* expr ::= ID|INDEXED LP distinct exprlist RP */ + case 187: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy394); } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 189: /* expr ::= ID|INDEXED LP STAR RP */ + case 188: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } yymsp[-3].minor.yy528 = yylhsminor.yy528; break; - case 190: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394); sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } yymsp[-5].minor.yy528 = yylhsminor.yy528; break; - case 191: /* expr ::= ID|INDEXED LP STAR RP filter_over */ + case 190: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 192: /* term ::= CTIME_KW */ + case 191: /* term ::= CTIME_KW */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 193: /* expr ::= LP nexprlist COMMA expr RP */ + case 192: /* expr ::= LP nexprlist COMMA expr RP */ { ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528); yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); @@ -171244,22 +172147,22 @@ static YYACTIONTYPE yy_reduce( } } break; - case 194: /* expr ::= expr AND expr */ + case 193: /* expr ::= expr AND expr */ {yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 195: /* expr ::= expr OR expr */ - case 196: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==196); - case 197: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==197); - case 198: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==198); - case 199: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==199); - case 200: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==200); - case 201: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==201); + case 194: /* expr ::= expr OR expr */ + case 195: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==195); + case 196: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==196); + case 197: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==197); + case 198: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==198); + case 199: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==199); + case 200: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==200); {yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 202: /* likeop ::= NOT LIKE_KW|MATCH */ + case 201: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 203: /* expr ::= expr likeop expr */ + case 202: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; @@ -171271,7 +172174,7 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc; } break; - case 204: /* expr ::= expr likeop expr ESCAPE expr */ + case 203: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; @@ -171284,47 +172187,47 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc; } break; - case 205: /* expr ::= expr ISNULL|NOTNULL */ + case 204: /* expr ::= expr ISNULL|NOTNULL */ {yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);} break; - case 206: /* expr ::= expr NOT NULL */ + case 205: /* expr ::= expr NOT NULL */ {yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);} break; - case 207: /* expr ::= expr IS expr */ + case 206: /* expr ::= expr IS expr */ { yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL); } break; - case 208: /* expr ::= expr IS NOT expr */ + case 207: /* expr ::= expr IS NOT expr */ { yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL); } break; - case 209: /* expr ::= expr IS NOT DISTINCT FROM expr */ + case 208: /* expr ::= expr IS NOT DISTINCT FROM expr */ { yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL); } break; - case 210: /* expr ::= expr IS DISTINCT FROM expr */ + case 209: /* expr ::= expr IS DISTINCT FROM expr */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL); } break; - case 211: /* expr ::= NOT expr */ - case 212: /* expr ::= BITNOT expr */ yytestcase(yyruleno==212); + case 210: /* expr ::= NOT expr */ + case 211: /* expr ::= BITNOT expr */ yytestcase(yyruleno==211); {yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/} break; - case 213: /* expr ::= PLUS|MINUS expr */ + case 212: /* expr ::= PLUS|MINUS expr */ { yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0); /*A-overwrites-B*/ } break; - case 214: /* expr ::= expr PTR expr */ + case 213: /* expr ::= expr PTR expr */ { ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528); pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528); @@ -171332,11 +172235,11 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy528 = yylhsminor.yy528; break; - case 215: /* between_op ::= BETWEEN */ - case 218: /* in_op ::= IN */ yytestcase(yyruleno==218); + case 214: /* between_op ::= BETWEEN */ + case 217: /* in_op ::= IN */ yytestcase(yyruleno==217); {yymsp[0].minor.yy394 = 0;} break; - case 217: /* expr ::= expr between_op expr AND expr */ + case 216: /* expr ::= expr between_op expr AND expr */ { ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); @@ -171349,7 +172252,7 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 220: /* expr ::= expr in_op LP exprlist RP */ + case 219: /* expr ::= expr in_op LP exprlist RP */ { if( yymsp[-1].minor.yy322==0 ){ /* Expressions of the form @@ -171395,20 +172298,20 @@ static YYACTIONTYPE yy_reduce( } } break; - case 221: /* expr ::= LP select RP */ + case 220: /* expr ::= LP select RP */ { yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47); } break; - case 222: /* expr ::= expr in_op LP select RP */ + case 221: /* expr ::= expr in_op LP select RP */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47); if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 223: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 222: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); @@ -171418,14 +172321,14 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 224: /* expr ::= EXISTS LP select RP */ + case 223: /* expr ::= EXISTS LP select RP */ { Expr *p; p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47); } break; - case 225: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 224: /* expr ::= CASE case_operand case_exprlist case_else END */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0); if( yymsp[-4].minor.yy528 ){ @@ -171437,32 +172340,29 @@ static YYACTIONTYPE yy_reduce( } } break; - case 226: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 225: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528); } break; - case 227: /* case_exprlist ::= WHEN expr THEN expr */ + case 226: /* case_exprlist ::= WHEN expr THEN expr */ { yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528); } break; - case 230: /* case_operand ::= expr */ -{yymsp[0].minor.yy528 = yymsp[0].minor.yy528; /*A-overwrites-X*/} - break; - case 233: /* nexprlist ::= nexprlist COMMA expr */ + case 231: /* nexprlist ::= nexprlist COMMA expr */ {yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);} break; - case 234: /* nexprlist ::= expr */ + case 232: /* nexprlist ::= expr */ {yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/} break; - case 236: /* paren_exprlist ::= LP exprlist RP */ - case 241: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==241); + case 234: /* paren_exprlist ::= LP exprlist RP */ + case 239: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==239); {yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;} break; - case 237: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 235: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394, @@ -171472,48 +172372,48 @@ static YYACTIONTYPE yy_reduce( } } break; - case 238: /* uniqueflag ::= UNIQUE */ - case 280: /* raisetype ::= ABORT */ yytestcase(yyruleno==280); + case 236: /* uniqueflag ::= UNIQUE */ + case 278: /* raisetype ::= ABORT */ yytestcase(yyruleno==278); {yymsp[0].minor.yy394 = OE_Abort;} break; - case 239: /* uniqueflag ::= */ + case 237: /* uniqueflag ::= */ {yymsp[1].minor.yy394 = OE_None;} break; - case 242: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 240: /* eidlist ::= eidlist COMMA nm collate sortorder */ { yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); } break; - case 243: /* eidlist ::= nm collate sortorder */ + case 241: /* eidlist ::= nm collate sortorder */ { yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/ } break; - case 246: /* cmd ::= DROP INDEX ifexists fullname */ + case 244: /* cmd ::= DROP INDEX ifexists fullname */ {sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);} break; - case 247: /* cmd ::= VACUUM vinto */ + case 245: /* cmd ::= VACUUM vinto */ {sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);} break; - case 248: /* cmd ::= VACUUM nm vinto */ + case 246: /* cmd ::= VACUUM nm vinto */ {sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);} break; - case 251: /* cmd ::= PRAGMA nm dbnm */ + case 249: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 250: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 251: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 252: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 253: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 258: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 256: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; @@ -171521,50 +172421,50 @@ static YYACTIONTYPE yy_reduce( sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all); } break; - case 259: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 257: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 260: /* trigger_time ::= BEFORE|AFTER */ + case 258: /* trigger_time ::= BEFORE|AFTER */ { yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ } break; - case 261: /* trigger_time ::= INSTEAD OF */ + case 259: /* trigger_time ::= INSTEAD OF */ { yymsp[-1].minor.yy394 = TK_INSTEAD;} break; - case 262: /* trigger_time ::= */ + case 260: /* trigger_time ::= */ { yymsp[1].minor.yy394 = TK_BEFORE; } break; - case 263: /* trigger_event ::= DELETE|INSERT */ - case 264: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==264); + case 261: /* trigger_event ::= DELETE|INSERT */ + case 262: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==262); {yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;} break; - case 265: /* trigger_event ::= UPDATE OF idlist */ + case 263: /* trigger_event ::= UPDATE OF idlist */ {yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;} break; - case 266: /* when_clause ::= */ - case 285: /* key_opt ::= */ yytestcase(yyruleno==285); + case 264: /* when_clause ::= */ + case 283: /* key_opt ::= */ yytestcase(yyruleno==283); { yymsp[1].minor.yy528 = 0; } break; - case 267: /* when_clause ::= WHEN expr */ - case 286: /* key_opt ::= KEY expr */ yytestcase(yyruleno==286); + case 265: /* when_clause ::= WHEN expr */ + case 284: /* key_opt ::= KEY expr */ yytestcase(yyruleno==284); { yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; } break; - case 268: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 266: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { assert( yymsp[-2].minor.yy33!=0 ); yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33; yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33; } break; - case 269: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 267: /* trigger_cmd_list ::= trigger_cmd SEMI */ { assert( yymsp[-1].minor.yy33!=0 ); yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33; } break; - case 270: /* trnm ::= nm DOT nm */ + case 268: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -171572,39 +172472,39 @@ static YYACTIONTYPE yy_reduce( "statements within triggers"); } break; - case 271: /* tridxby ::= INDEXED BY nm */ + case 269: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 272: /* tridxby ::= NOT INDEXED */ + case 270: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 273: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + case 271: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ {yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);} yymsp[-8].minor.yy33 = yylhsminor.yy33; break; - case 274: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + case 272: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/ } yymsp[-7].minor.yy33 = yylhsminor.yy33; break; - case 275: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + case 273: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ {yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);} yymsp[-5].minor.yy33 = yylhsminor.yy33; break; - case 276: /* trigger_cmd ::= scanpt select scanpt */ + case 274: /* trigger_cmd ::= scanpt select scanpt */ {yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/} yymsp[-2].minor.yy33 = yylhsminor.yy33; break; - case 277: /* expr ::= RAISE LP IGNORE RP */ + case 275: /* expr ::= RAISE LP IGNORE RP */ { yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); if( yymsp[-3].minor.yy528 ){ @@ -171612,7 +172512,7 @@ static YYACTIONTYPE yy_reduce( } } break; - case 278: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 276: /* expr ::= RAISE LP raisetype COMMA nm RP */ { yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); if( yymsp[-5].minor.yy528 ) { @@ -171620,118 +172520,118 @@ static YYACTIONTYPE yy_reduce( } } break; - case 279: /* raisetype ::= ROLLBACK */ + case 277: /* raisetype ::= ROLLBACK */ {yymsp[0].minor.yy394 = OE_Rollback;} break; - case 281: /* raisetype ::= FAIL */ + case 279: /* raisetype ::= FAIL */ {yymsp[0].minor.yy394 = OE_Fail;} break; - case 282: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 280: /* cmd ::= DROP TRIGGER ifexists fullname */ { sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394); } break; - case 283: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 281: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528); } break; - case 284: /* cmd ::= DETACH database_kw_opt expr */ + case 282: /* cmd ::= DETACH database_kw_opt expr */ { sqlite3Detach(pParse, yymsp[0].minor.yy528); } break; - case 287: /* cmd ::= REINDEX */ + case 285: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 288: /* cmd ::= REINDEX nm dbnm */ + case 286: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 289: /* cmd ::= ANALYZE */ + case 287: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 290: /* cmd ::= ANALYZE nm dbnm */ + case 288: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 291: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 289: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0); } break; - case 292: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 290: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 293: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + case 291: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0); } break; - case 294: /* add_column_fullname ::= fullname */ + case 292: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131); } break; - case 295: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + case 293: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 296: /* cmd ::= create_vtab */ + case 294: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 297: /* cmd ::= create_vtab LP vtabarglist RP */ + case 295: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 298: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 296: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394); } break; - case 299: /* vtabarg ::= */ + case 297: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 300: /* vtabargtoken ::= ANY */ - case 301: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==301); - case 302: /* lp ::= LP */ yytestcase(yyruleno==302); + case 298: /* vtabargtoken ::= ANY */ + case 299: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==299); + case 300: /* lp ::= LP */ yytestcase(yyruleno==300); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 303: /* with ::= WITH wqlist */ - case 304: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==304); + case 301: /* with ::= WITH wqlist */ + case 302: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==302); { sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); } break; - case 305: /* wqas ::= AS */ + case 303: /* wqas ::= AS */ {yymsp[0].minor.yy516 = M10d_Any;} break; - case 306: /* wqas ::= AS MATERIALIZED */ + case 304: /* wqas ::= AS MATERIALIZED */ {yymsp[-1].minor.yy516 = M10d_Yes;} break; - case 307: /* wqas ::= AS NOT MATERIALIZED */ + case 305: /* wqas ::= AS NOT MATERIALIZED */ {yymsp[-2].minor.yy516 = M10d_No;} break; - case 308: /* wqitem ::= nm eidlist_opt wqas LP select RP */ + case 306: /* wqitem ::= nm eidlist_opt wqas LP select RP */ { yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/ } break; - case 309: /* wqlist ::= wqitem */ + case 307: /* wqlist ::= wqitem */ { yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/ } break; - case 310: /* wqlist ::= wqlist COMMA wqitem */ + case 308: /* wqlist ::= wqlist COMMA wqitem */ { yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385); } break; - case 311: /* windowdefn_list ::= windowdefn */ + case 309: /* windowdefn_list ::= windowdefn */ { yylhsminor.yy41 = yymsp[0].minor.yy41; } yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 312: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 310: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { assert( yymsp[0].minor.yy41!=0 ); sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41); @@ -171740,7 +172640,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 313: /* windowdefn ::= nm AS LP window RP */ + case 311: /* windowdefn ::= nm AS LP window RP */ { if( ALWAYS(yymsp[-1].minor.yy41) ){ yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); @@ -171749,90 +172649,90 @@ static YYACTIONTYPE yy_reduce( } yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 314: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 312: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0); } break; - case 315: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 313: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0); } yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 316: /* window ::= ORDER BY sortlist frame_opt */ + case 314: /* window ::= ORDER BY sortlist frame_opt */ { yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0); } break; - case 317: /* window ::= nm ORDER BY sortlist frame_opt */ + case 315: /* window ::= nm ORDER BY sortlist frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0); } yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 318: /* window ::= frame_opt */ - case 337: /* filter_over ::= over_clause */ yytestcase(yyruleno==337); + case 316: /* window ::= frame_opt */ + case 335: /* filter_over ::= over_clause */ yytestcase(yyruleno==335); { yylhsminor.yy41 = yymsp[0].minor.yy41; } yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 319: /* window ::= nm frame_opt */ + case 317: /* window ::= nm frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0); } yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 320: /* frame_opt ::= */ + case 318: /* frame_opt ::= */ { yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 321: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516); } yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 322: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516); } yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 324: /* frame_bound_s ::= frame_bound */ - case 326: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==326); + case 322: /* frame_bound_s ::= frame_bound */ + case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324); {yylhsminor.yy595 = yymsp[0].minor.yy595;} yymsp[0].minor.yy595 = yylhsminor.yy595; break; - case 325: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 327: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==327); - case 329: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==329); + case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325); + case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327); {yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;} yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 328: /* frame_bound ::= expr PRECEDING|FOLLOWING */ + case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */ {yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;} yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 330: /* frame_exclude_opt ::= */ + case 328: /* frame_exclude_opt ::= */ {yymsp[1].minor.yy516 = 0;} break; - case 331: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ + case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ {yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;} break; - case 332: /* frame_exclude ::= NO OTHERS */ - case 333: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==333); + case 330: /* frame_exclude ::= NO OTHERS */ + case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331); {yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 334: /* frame_exclude ::= GROUP|TIES */ + case 332: /* frame_exclude ::= GROUP|TIES */ {yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/} break; - case 335: /* window_clause ::= WINDOW windowdefn_list */ + case 333: /* window_clause ::= WINDOW windowdefn_list */ { yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; } break; - case 336: /* filter_over ::= filter_clause over_clause */ + case 334: /* filter_over ::= filter_clause over_clause */ { if( yymsp[0].minor.yy41 ){ yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528; @@ -171843,7 +172743,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 338: /* filter_over ::= filter_clause */ + case 336: /* filter_over ::= filter_clause */ { yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); if( yylhsminor.yy41 ){ @@ -171855,13 +172755,13 @@ static YYACTIONTYPE yy_reduce( } yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 339: /* over_clause ::= OVER LP window RP */ + case 337: /* over_clause ::= OVER LP window RP */ { yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41; assert( yymsp[-3].minor.yy41!=0 ); } break; - case 340: /* over_clause ::= OVER nm */ + case 338: /* over_clause ::= OVER nm */ { yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); if( yymsp[-1].minor.yy41 ){ @@ -171869,73 +172769,73 @@ static YYACTIONTYPE yy_reduce( } } break; - case 341: /* filter_clause ::= FILTER LP WHERE expr RP */ + case 339: /* filter_clause ::= FILTER LP WHERE expr RP */ { yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; } break; default: - /* (342) input ::= cmdlist */ yytestcase(yyruleno==342); - /* (343) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==343); - /* (344) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=344); - /* (345) ecmd ::= SEMI */ yytestcase(yyruleno==345); - /* (346) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==346); - /* (347) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=347); - /* (348) trans_opt ::= */ yytestcase(yyruleno==348); - /* (349) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==349); - /* (350) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==350); - /* (351) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==351); - /* (352) savepoint_opt ::= */ yytestcase(yyruleno==352); - /* (353) cmd ::= create_table create_table_args */ yytestcase(yyruleno==353); - /* (354) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=354); - /* (355) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==355); - /* (356) columnlist ::= columnname carglist */ yytestcase(yyruleno==356); - /* (357) nm ::= ID|INDEXED */ yytestcase(yyruleno==357); - /* (358) nm ::= STRING */ yytestcase(yyruleno==358); - /* (359) nm ::= JOIN_KW */ yytestcase(yyruleno==359); - /* (360) typetoken ::= typename */ yytestcase(yyruleno==360); - /* (361) typename ::= ID|STRING */ yytestcase(yyruleno==361); - /* (362) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=362); - /* (363) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=363); - /* (364) carglist ::= carglist ccons */ yytestcase(yyruleno==364); - /* (365) carglist ::= */ yytestcase(yyruleno==365); - /* (366) ccons ::= NULL onconf */ yytestcase(yyruleno==366); - /* (367) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==367); - /* (368) ccons ::= AS generated */ yytestcase(yyruleno==368); - /* (369) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==369); - /* (370) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==370); - /* (371) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=371); - /* (372) tconscomma ::= */ yytestcase(yyruleno==372); - /* (373) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=373); - /* (374) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=374); - /* (375) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=375); - /* (376) oneselect ::= values */ yytestcase(yyruleno==376); - /* (377) sclp ::= selcollist COMMA */ yytestcase(yyruleno==377); - /* (378) as ::= ID|STRING */ yytestcase(yyruleno==378); - /* (379) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=379); - /* (380) returning ::= */ yytestcase(yyruleno==380); - /* (381) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=381); - /* (382) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==382); - /* (383) exprlist ::= nexprlist */ yytestcase(yyruleno==383); - /* (384) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=384); - /* (385) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=385); - /* (386) nmnum ::= ON */ yytestcase(yyruleno==386); - /* (387) nmnum ::= DELETE */ yytestcase(yyruleno==387); - /* (388) nmnum ::= DEFAULT */ yytestcase(yyruleno==388); - /* (389) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==389); - /* (390) foreach_clause ::= */ yytestcase(yyruleno==390); - /* (391) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==391); - /* (392) trnm ::= nm */ yytestcase(yyruleno==392); - /* (393) tridxby ::= */ yytestcase(yyruleno==393); - /* (394) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==394); - /* (395) database_kw_opt ::= */ yytestcase(yyruleno==395); - /* (396) kwcolumn_opt ::= */ yytestcase(yyruleno==396); - /* (397) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==397); - /* (398) vtabarglist ::= vtabarg */ yytestcase(yyruleno==398); - /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==399); - /* (400) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==400); - /* (401) anylist ::= */ yytestcase(yyruleno==401); - /* (402) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==402); - /* (403) anylist ::= anylist ANY */ yytestcase(yyruleno==403); - /* (404) with ::= */ yytestcase(yyruleno==404); + /* (340) input ::= cmdlist */ yytestcase(yyruleno==340); + /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341); + /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342); + /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343); + /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344); + /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345); + /* (346) trans_opt ::= */ yytestcase(yyruleno==346); + /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347); + /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348); + /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349); + /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350); + /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351); + /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352); + /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353); + /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354); + /* (355) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==355); + /* (356) nm ::= STRING */ yytestcase(yyruleno==356); + /* (357) typetoken ::= typename */ yytestcase(yyruleno==357); + /* (358) typename ::= ID|STRING */ yytestcase(yyruleno==358); + /* (359) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=359); + /* (360) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=360); + /* (361) carglist ::= carglist ccons */ yytestcase(yyruleno==361); + /* (362) carglist ::= */ yytestcase(yyruleno==362); + /* (363) ccons ::= NULL onconf */ yytestcase(yyruleno==363); + /* (364) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==364); + /* (365) ccons ::= AS generated */ yytestcase(yyruleno==365); + /* (366) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==366); + /* (367) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==367); + /* (368) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=368); + /* (369) tconscomma ::= */ yytestcase(yyruleno==369); + /* (370) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=370); + /* (371) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=371); + /* (372) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=372); + /* (373) oneselect ::= values */ yytestcase(yyruleno==373); + /* (374) sclp ::= selcollist COMMA */ yytestcase(yyruleno==374); + /* (375) as ::= ID|STRING */ yytestcase(yyruleno==375); + /* (376) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=376); + /* (377) returning ::= */ yytestcase(yyruleno==377); + /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378); + /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379); + /* (380) case_operand ::= expr */ yytestcase(yyruleno==380); + /* (381) exprlist ::= nexprlist */ yytestcase(yyruleno==381); + /* (382) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=382); + /* (383) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=383); + /* (384) nmnum ::= ON */ yytestcase(yyruleno==384); + /* (385) nmnum ::= DELETE */ yytestcase(yyruleno==385); + /* (386) nmnum ::= DEFAULT */ yytestcase(yyruleno==386); + /* (387) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==387); + /* (388) foreach_clause ::= */ yytestcase(yyruleno==388); + /* (389) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==389); + /* (390) trnm ::= nm */ yytestcase(yyruleno==390); + /* (391) tridxby ::= */ yytestcase(yyruleno==391); + /* (392) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==392); + /* (393) database_kw_opt ::= */ yytestcase(yyruleno==393); + /* (394) kwcolumn_opt ::= */ yytestcase(yyruleno==394); + /* (395) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==395); + /* (396) vtabarglist ::= vtabarg */ yytestcase(yyruleno==396); + /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==397); + /* (398) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==398); + /* (399) anylist ::= */ yytestcase(yyruleno==399); + /* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400); + /* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401); + /* (402) with ::= */ yytestcase(yyruleno==402); break; /********** End reduce actions ************************************************/ }; @@ -172511,7 +173411,7 @@ static const unsigned char aKWHash[127] = { /* aKWNext[] forms the hash collision chain. If aKWHash[i]==0 ** then the i-th keyword has no more hash collisions. Otherwise, ** the next keyword with the same hash is aKWHash[i]-1. */ -static const unsigned char aKWNext[147] = { +static const unsigned char aKWNext[148] = {0, 0, 0, 0, 0, 4, 0, 43, 0, 0, 106, 114, 0, 0, 0, 2, 0, 0, 143, 0, 0, 0, 13, 0, 0, 0, 0, 141, 0, 0, 119, 52, 0, 0, 137, 12, 0, 0, 62, 0, @@ -172526,7 +173426,7 @@ static const unsigned char aKWNext[147] = { 102, 0, 0, 87, }; /* aKWLen[i] is the length (in bytes) of the i-th keyword */ -static const unsigned char aKWLen[147] = { +static const unsigned char aKWLen[148] = {0, 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 7, 6, 9, 4, 2, 6, 5, 9, 9, 4, 7, 3, 2, 4, @@ -172542,7 +173442,7 @@ static const unsigned char aKWLen[147] = { }; /* aKWOffset[i] is the index into zKWText[] of the start of ** the text for the i-th keyword. */ -static const unsigned short int aKWOffset[147] = { +static const unsigned short int aKWOffset[148] = {0, 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, 86, 90, 90, 94, 99, 101, 105, 111, 119, 123, 123, 123, 126, @@ -172557,7 +173457,7 @@ static const unsigned short int aKWOffset[147] = { 648, 650, 655, 659, }; /* aKWCode[i] is the parser symbol code for the i-th keyword */ -static const unsigned char aKWCode[147] = { +static const unsigned char aKWCode[148] = {0, TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD, @@ -172726,7 +173626,7 @@ static int keywordCode(const char *z, int n, int *pType){ const char *zKW; if( n>=2 ){ i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127; - for(i=((int)aKWHash[i])-1; i>=0; i=((int)aKWNext[i])-1){ + for(i=(int)aKWHash[i]; i>0; i=aKWNext[i]){ if( aKWLen[i]!=n ) continue; zKW = &zKWText[aKWOffset[i]]; #ifdef SQLITE_ASCII @@ -172742,153 +173642,153 @@ static int keywordCode(const char *z, int n, int *pType){ while( j=SQLITE_N_KEYWORD ) return SQLITE_ERROR; + i++; *pzName = zKWText + aKWOffset[i]; *pnName = aKWLen[i]; return SQLITE_OK; @@ -174441,9 +175342,21 @@ SQLITE_API int sqlite3_config(int op, ...){ va_list ap; int rc = SQLITE_OK; - /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while - ** the SQLite library is in use. */ - if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE_BKPT; + /* sqlite3_config() normally returns SQLITE_MISUSE if it is invoked while + ** the SQLite library is in use. Except, a few selected opcodes + ** are allowed. + */ + if( sqlite3GlobalConfig.isInit ){ + static const u64 mAnytimeConfigOption = 0 + | MASKBIT64( SQLITE_CONFIG_LOG ) + | MASKBIT64( SQLITE_CONFIG_PCACHE_HDRSZ ) + ; + if( op<0 || op>63 || (MASKBIT64(op) & mAnytimeConfigOption)==0 ){ + return SQLITE_MISUSE_BKPT; + } + testcase( op==SQLITE_CONFIG_LOG ); + testcase( op==SQLITE_CONFIG_PCACHE_HDRSZ ); + } va_start(ap, op); switch( op ){ @@ -174512,6 +175425,7 @@ SQLITE_API int sqlite3_config(int op, ...){ break; } case SQLITE_CONFIG_MEMSTATUS: { + assert( !sqlite3GlobalConfig.isInit ); /* Cannot change at runtime */ /* EVIDENCE-OF: R-61275-35157 The SQLITE_CONFIG_MEMSTATUS option takes ** single argument of type int, interpreted as a boolean, which enables ** or disables the collection of memory allocation statistics. */ @@ -174635,8 +175549,10 @@ SQLITE_API int sqlite3_config(int op, ...){ ** sqlite3GlobalConfig.xLog = va_arg(ap, void(*)(void*,int,const char*)); */ typedef void(*LOGFUNC_t)(void*,int,const char*); - sqlite3GlobalConfig.xLog = va_arg(ap, LOGFUNC_t); - sqlite3GlobalConfig.pLogArg = va_arg(ap, void*); + LOGFUNC_t xLog = va_arg(ap, LOGFUNC_t); + void *pLogArg = va_arg(ap, void*); + AtomicStore(&sqlite3GlobalConfig.xLog, xLog); + AtomicStore(&sqlite3GlobalConfig.pLogArg, pLogArg); break; } @@ -174650,7 +175566,8 @@ SQLITE_API int sqlite3_config(int op, ...){ ** argument of type int. If non-zero, then URI handling is globally ** enabled. If the parameter is zero, then URI handling is globally ** disabled. */ - sqlite3GlobalConfig.bOpenUri = va_arg(ap, int); + int bOpenUri = va_arg(ap, int); + AtomicStore(&sqlite3GlobalConfig.bOpenUri, bOpenUri); break; } @@ -174965,6 +175882,8 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ { SQLITE_DBCONFIG_DQS_DML, SQLITE_DqsDML }, { SQLITE_DBCONFIG_LEGACY_FILE_FORMAT, SQLITE_LegacyFileFmt }, { SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_TrustedSchema }, + { SQLITE_DBCONFIG_STMT_SCANSTATUS, SQLITE_StmtScanStatus }, + { SQLITE_DBCONFIG_REVERSE_SCANORDER, SQLITE_ReverseOrder }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ @@ -176950,9 +177869,9 @@ SQLITE_PRIVATE int sqlite3ParseUri( assert( *pzErrMsg==0 ); - if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ - || sqlite3GlobalConfig.bOpenUri) /* IMP: R-51689-46548 */ - && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ + if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ + || AtomicLoad(&sqlite3GlobalConfig.bOpenUri)) /* IMP: R-51689-46548 */ + && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ ){ char *zOpt; int eState; /* Parser state when parsing URI */ @@ -177358,6 +178277,9 @@ static int openDatabase( #endif #if defined(SQLITE_DEFAULT_LEGACY_ALTER_TABLE) | SQLITE_LegacyAlter +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) + | SQLITE_StmtScanStatus #endif ; sqlite3HashInit(&db->aCollSeq); @@ -177923,7 +178845,7 @@ SQLITE_API int sqlite3_sleep(int ms){ /* This function works in milliseconds, but the underlying OsSleep() ** API uses microseconds. Hence the 1000's. */ - rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); + rc = (sqlite3OsSleep(pVfs, ms<0 ? 0 : 1000*ms)/1000); return rc; } @@ -198927,6 +199849,7 @@ static const char * const jsonType[] = { #define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */ #define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */ #define JNODE_LABEL 0x40 /* Is a label of an object */ +#define JNODE_JSON5 0x80 /* Node contains JSON5 enhancements */ /* A single node of parsed JSON @@ -198953,10 +199876,12 @@ struct JsonParse { JsonNode *aNode; /* Array of nodes containing the parse */ const char *zJson; /* Original JSON string */ u32 *aUp; /* Index of parent of each node */ - u8 oom; /* Set to true if out of memory */ - u8 nErr; /* Number of errors seen */ u16 iDepth; /* Nesting depth */ + u8 nErr; /* Number of errors seen */ + u8 oom; /* Set to true if out of memory */ + u8 hasNonstd; /* True if input uses non-standard features like JSON5 */ int nJson; /* Length of the zJson string in bytes */ + u32 iErr; /* Error location in zJson[] */ u32 iHold; /* Replace cache line with the lowest iHold value */ }; @@ -198964,10 +199889,10 @@ struct JsonParse { ** Maximum nesting depth of JSON for this implementation. ** ** This limit is needed to avoid a stack overflow in the recursive -** descent parser. A depth of 2000 is far deeper than any sane JSON -** should go. +** descent parser. A depth of 1000 is far deeper than any sane JSON +** should go. Historical note: This limit was 2000 prior to version 3.42.0 */ -#define JSON_MAX_DEPTH 2000 +#define JSON_MAX_DEPTH 1000 /************************************************************************** ** Utility routines for dealing with JsonString objects @@ -199117,6 +200042,129 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ assert( p->nUsednAlloc ); } +/* +** The zIn[0..N] string is a JSON5 string literal. Append to p a translation +** of the string literal that standard JSON and that omits all JSON5 +** features. +*/ +static void jsonAppendNormalizedString(JsonString *p, const char *zIn, u32 N){ + u32 i; + jsonAppendChar(p, '"'); + zIn++; + N -= 2; + while( N>0 ){ + for(i=0; i0 ){ + jsonAppendRaw(p, zIn, i); + zIn += i; + N -= i; + if( N==0 ) break; + } + assert( zIn[0]=='\\' ); + switch( (u8)zIn[1] ){ + case '\'': + jsonAppendChar(p, '\''); + break; + case 'v': + jsonAppendRaw(p, "\\u0009", 6); + break; + case 'x': + jsonAppendRaw(p, "\\u00", 4); + jsonAppendRaw(p, &zIn[2], 2); + zIn += 2; + N -= 2; + break; + case '0': + jsonAppendRaw(p, "\\u0000", 6); + break; + case '\r': + if( zIn[2]=='\n' ){ + zIn++; + N--; + } + break; + case '\n': + break; + case 0xe2: + assert( N>=4 ); + assert( 0x80==(u8)zIn[2] ); + assert( 0xa8==(u8)zIn[3] || 0xa9==(u8)zIn[3] ); + zIn += 2; + N -= 2; + break; + default: + jsonAppendRaw(p, zIn, 2); + break; + } + zIn += 2; + N -= 2; + } + jsonAppendChar(p, '"'); +} + +/* +** The zIn[0..N] string is a JSON5 integer literal. Append to p a translation +** of the string literal that standard JSON and that omits all JSON5 +** features. +*/ +static void jsonAppendNormalizedInt(JsonString *p, const char *zIn, u32 N){ + if( zIn[0]=='+' ){ + zIn++; + N--; + }else if( zIn[0]=='-' ){ + jsonAppendChar(p, '-'); + zIn++; + N--; + } + if( zIn[0]=='0' && (zIn[1]=='x' || zIn[1]=='X') ){ + sqlite3_int64 i = 0; + int rc = sqlite3DecOrHexToI64(zIn, &i); + if( rc<=1 ){ + jsonPrintf(100,p,"%lld",i); + }else{ + assert( rc==2 ); + jsonAppendRaw(p, "9.0e999", 7); + } + return; + } + jsonAppendRaw(p, zIn, N); +} + +/* +** The zIn[0..N] string is a JSON5 real literal. Append to p a translation +** of the string literal that standard JSON and that omits all JSON5 +** features. +*/ +static void jsonAppendNormalizedReal(JsonString *p, const char *zIn, u32 N){ + u32 i; + if( zIn[0]=='+' ){ + zIn++; + N--; + }else if( zIn[0]=='-' ){ + jsonAppendChar(p, '-'); + zIn++; + N--; + } + if( zIn[0]=='.' ){ + jsonAppendChar(p, '0'); + } + for(i=0; i0 ){ + jsonAppendRaw(p, zIn, N); + } +} + + + /* ** Append a function parameter value to the JSON string under ** construction. @@ -199130,8 +200178,11 @@ static void jsonAppendValue( jsonAppendRaw(p, "null", 4); break; } - case SQLITE_INTEGER: case SQLITE_FLOAT: { + jsonPrintf(100, p, "%!0.15g", sqlite3_value_double(pValue)); + break; + } + case SQLITE_INTEGER: { const char *z = (const char*)sqlite3_value_text(pValue); u32 n = (u32)sqlite3_value_bytes(pValue); jsonAppendRaw(p, z, n); @@ -199244,17 +200295,38 @@ static void jsonRenderNode( break; } case JSON_STRING: { + assert( pNode->eU==1 ); if( pNode->jnFlags & JNODE_RAW ){ - assert( pNode->eU==1 ); - jsonAppendString(pOut, pNode->u.zJContent, pNode->n); - break; + if( pNode->jnFlags & JNODE_LABEL ){ + jsonAppendChar(pOut, '"'); + jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); + jsonAppendChar(pOut, '"'); + }else{ + jsonAppendString(pOut, pNode->u.zJContent, pNode->n); + } + }else if( pNode->jnFlags & JNODE_JSON5 ){ + jsonAppendNormalizedString(pOut, pNode->u.zJContent, pNode->n); + }else{ + jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); } - /* no break */ deliberate_fall_through + break; + } + case JSON_REAL: { + assert( pNode->eU==1 ); + if( pNode->jnFlags & JNODE_JSON5 ){ + jsonAppendNormalizedReal(pOut, pNode->u.zJContent, pNode->n); + }else{ + jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); + } + break; } - case JSON_REAL: case JSON_INT: { assert( pNode->eU==1 ); - jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); + if( pNode->jnFlags & JNODE_JSON5 ){ + jsonAppendNormalizedInt(pOut, pNode->u.zJContent, pNode->n); + }else{ + jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); + } break; } case JSON_ARRAY: { @@ -199370,59 +200442,41 @@ static void jsonReturn( } case JSON_INT: { sqlite3_int64 i = 0; + int rc; + int bNeg = 0; const char *z; + + assert( pNode->eU==1 ); z = pNode->u.zJContent; - if( z[0]=='-' ){ z++; } - while( z[0]>='0' && z[0]<='9' ){ - unsigned v = *(z++) - '0'; - if( i>=LARGEST_INT64/10 ){ - if( i>LARGEST_INT64/10 ) goto int_as_real; - if( z[0]>='0' && z[0]<='9' ) goto int_as_real; - if( v==9 ) goto int_as_real; - if( v==8 ){ - if( pNode->u.zJContent[0]=='-' ){ - sqlite3_result_int64(pCtx, SMALLEST_INT64); - goto int_done; - }else{ - goto int_as_real; - } - } - } - i = i*10 + v; + if( z[0]=='-' ){ z++; bNeg = 1; } + else if( z[0]=='+' ){ z++; } + rc = sqlite3DecOrHexToI64(z, &i); + if( rc<=1 ){ + sqlite3_result_int64(pCtx, bNeg ? -i : i); + }else if( rc==3 && bNeg ){ + sqlite3_result_int64(pCtx, SMALLEST_INT64); + }else{ + goto to_double; } - if( pNode->u.zJContent[0]=='-' ){ i = -i; } - sqlite3_result_int64(pCtx, i); - int_done: break; - int_as_real: ; /* no break */ deliberate_fall_through } case JSON_REAL: { double r; -#ifdef SQLITE_AMALGAMATION const char *z; assert( pNode->eU==1 ); + to_double: z = pNode->u.zJContent; sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); -#else - assert( pNode->eU==1 ); - r = strtod(pNode->u.zJContent, 0); -#endif sqlite3_result_double(pCtx, r); break; } case JSON_STRING: { -#if 0 /* Never happens because JNODE_RAW is only set by json_set(), - ** json_insert() and json_replace() and those routines do not - ** call jsonReturn() */ if( pNode->jnFlags & JNODE_RAW ){ assert( pNode->eU==1 ); sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n, SQLITE_TRANSIENT); - }else -#endif - assert( (pNode->jnFlags & JNODE_RAW)==0 ); - if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ + }else if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ /* JSON formatted without any backslash-escapes */ assert( pNode->eU==1 ); sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2, @@ -199434,18 +200488,17 @@ static void jsonReturn( const char *z; char *zOut; u32 j; + u32 nOut = n; assert( pNode->eU==1 ); z = pNode->u.zJContent; - zOut = sqlite3_malloc( n+1 ); + zOut = sqlite3_malloc( nOut+1 ); if( zOut==0 ){ sqlite3_result_error_nomem(pCtx); break; } for(i=1, j=0; iaNode[pParse->nNode]; - p->eType = (u8)eType; - p->jnFlags = 0; + p->eType = (u8)(eType & 0xff); + p->jnFlags = (u8)(eType >> 8); VVA( p->eU = zContent ? 1 : 0 ); p->n = n; p->u.zJContent = zContent; return pParse->nNode++; } +/* +** Return true if z[] begins with 2 (or more) hexadecimal digits +*/ +static int jsonIs2Hex(const char *z){ + return sqlite3Isxdigit(z[0]) && sqlite3Isxdigit(z[1]); +} + /* ** Return true if z[] begins with 4 (or more) hexadecimal digits */ static int jsonIs4Hex(const char *z){ - int i; - for(i=0; i<4; i++) if( !sqlite3Isxdigit(z[i]) ) return 0; - return 1; + return jsonIs2Hex(z) && jsonIs2Hex(&z[2]); +} + +/* +** Return the number of bytes of JSON5 whitespace at the beginning of +** the input string z[]. +** +** JSON5 whitespace consists of any of the following characters: +** +** Unicode UTF-8 Name +** U+0009 09 horizontal tab +** U+000a 0a line feed +** U+000b 0b vertical tab +** U+000c 0c form feed +** U+000d 0d carriage return +** U+0020 20 space +** U+00a0 c2 a0 non-breaking space +** U+1680 e1 9a 80 ogham space mark +** U+2000 e2 80 80 en quad +** U+2001 e2 80 81 em quad +** U+2002 e2 80 82 en space +** U+2003 e2 80 83 em space +** U+2004 e2 80 84 three-per-em space +** U+2005 e2 80 85 four-per-em space +** U+2006 e2 80 86 six-per-em space +** U+2007 e2 80 87 figure space +** U+2008 e2 80 88 punctuation space +** U+2009 e2 80 89 thin space +** U+200a e2 80 8a hair space +** U+2028 e2 80 a8 line separator +** U+2029 e2 80 a9 paragraph separator +** U+202f e2 80 af narrow no-break space (NNBSP) +** U+205f e2 81 9f medium mathematical space (MMSP) +** U+3000 e3 80 80 ideographical space +** U+FEFF ef bb bf byte order mark +** +** In addition, comments between '/', '*' and '*', '/' and +** from '/', '/' to end-of-line are also considered to be whitespace. +*/ +static int json5Whitespace(const char *zIn){ + int n = 0; + const u8 *z = (u8*)zIn; + while( 1 /*exit by "goto whitespace_done"*/ ){ + switch( z[n] ){ + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + case 0x20: { + n++; + break; + } + case '/': { + if( z[n+1]=='*' && z[n+2]!=0 ){ + int j; + for(j=n+3; z[j]!='/' || z[j-1]!='*'; j++){ + if( z[j]==0 ) goto whitespace_done; + } + n = j+1; + break; + }else if( z[n+1]=='/' ){ + int j; + char c; + for(j=n+2; (c = z[j])!=0; j++){ + if( c=='\n' || c=='\r' ) break; + if( 0xe2==(u8)c && 0x80==(u8)z[j+1] + && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2]) + ){ + j += 2; + break; + } + } + n = j; + if( z[n] ) n++; + break; + } + goto whitespace_done; + } + case 0xc2: { + if( z[n+1]==0xa0 ){ + n += 2; + break; + } + goto whitespace_done; + } + case 0xe1: { + if( z[n+1]==0x9a && z[n+2]==0x80 ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xe2: { + if( z[n+1]==0x80 ){ + u8 c = z[n+2]; + if( c<0x80 ) goto whitespace_done; + if( c<=0x8a || c==0xa8 || c==0xa9 || c==0xaf ){ + n += 3; + break; + } + }else if( z[n+1]==0x81 && z[n+2]==0x9f ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xe3: { + if( z[n+1]==0x80 && z[n+2]==0x80 ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xef: { + if( z[n+1]==0xbb && z[n+2]==0xbf ){ + n += 3; + break; + } + goto whitespace_done; + } + default: { + goto whitespace_done; + } + } + } + whitespace_done: + return n; } +/* +** Extra floating-point literals to allow in JSON. +*/ +static const struct NanInfName { + char c1; + char c2; + char n; + char eType; + char nRepl; + char *zMatch; + char *zRepl; +} aNanInfName[] = { + { 'i', 'I', 3, JSON_REAL, 7, "inf", "9.0e999" }, + { 'i', 'I', 8, JSON_REAL, 7, "infinity", "9.0e999" }, + { 'n', 'N', 3, JSON_NULL, 4, "NaN", "null" }, + { 'q', 'Q', 4, JSON_NULL, 4, "QNaN", "null" }, + { 's', 'S', 4, JSON_NULL, 4, "SNaN", "null" }, +}; + /* ** Parse a single JSON value which begins at pParse->zJson[i]. Return the ** index of the first character past the end of the value parsed. ** -** Return negative for a syntax error. Special cases: return -2 if the -** first non-whitespace character is '}' and return -3 if the first -** non-whitespace character is ']'. +** Special return values: +** +** 0 End if input +** -1 Syntax error +** -2 '}' seen +** -3 ']' seen +** -4 ',' seen +** -5 ':' seen */ static int jsonParseValue(JsonParse *pParse, u32 i){ char c; @@ -199592,151 +200819,430 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ int x; JsonNode *pNode; const char *z = pParse->zJson; - while( fast_isspace(z[i]) ){ i++; } - if( (c = z[i])=='{' ){ +json_parse_restart: + switch( (u8)z[i] ){ + case '{': { /* Parse object */ iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); if( iThis<0 ) return -1; + if( ++pParse->iDepth > JSON_MAX_DEPTH ){ + pParse->iErr = i; + return -1; + } for(j=i+1;;j++){ - while( fast_isspace(z[j]) ){ j++; } - if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; + u32 nNode = pParse->nNode; x = jsonParseValue(pParse, j); - if( x<0 ){ - pParse->iDepth--; - if( x==(-2) && pParse->nNode==(u32)iThis+1 ) return j+1; - return -1; + if( x<=0 ){ + if( x==(-2) ){ + j = pParse->iErr; + if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + break; + } + j += json5Whitespace(&z[j]); + if( sqlite3JsonId1(z[j]) + || (z[j]=='\\' && z[j+1]=='u' && jsonIs4Hex(&z[j+2])) + ){ + int k = j+1; + while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0) + || (z[k]=='\\' && z[k+1]=='u' && jsonIs4Hex(&z[k+2])) + ){ + k++; + } + jsonParseAddNode(pParse, JSON_STRING | (JNODE_RAW<<8), k-j, &z[j]); + pParse->hasNonstd = 1; + x = k; + }else{ + if( x!=-1 ) pParse->iErr = j; + return -1; + } } if( pParse->oom ) return -1; - pNode = &pParse->aNode[pParse->nNode-1]; - if( pNode->eType!=JSON_STRING ) return -1; + pNode = &pParse->aNode[nNode]; + if( pNode->eType!=JSON_STRING ){ + pParse->iErr = j; + return -1; + } pNode->jnFlags |= JNODE_LABEL; j = x; - while( fast_isspace(z[j]) ){ j++; } - if( z[j]!=':' ) return -1; - j++; + if( z[j]==':' ){ + j++; + }else{ + if( fast_isspace(z[j]) ){ + do{ j++; }while( fast_isspace(z[j]) ); + if( z[j]==':' ){ + j++; + goto parse_object_value; + } + } + x = jsonParseValue(pParse, j); + if( x!=(-5) ){ + if( x!=(-1) ) pParse->iErr = j; + return -1; + } + j = pParse->iErr+1; + } + parse_object_value: x = jsonParseValue(pParse, j); - pParse->iDepth--; - if( x<0 ) return -1; + if( x<=0 ){ + if( x!=(-1) ) pParse->iErr = j; + return -1; + } j = x; - while( fast_isspace(z[j]) ){ j++; } - c = z[j]; - if( c==',' ) continue; - if( c!='}' ) return -1; - break; + if( z[j]==',' ){ + continue; + }else if( z[j]=='}' ){ + break; + }else{ + if( fast_isspace(z[j]) ){ + do{ j++; }while( fast_isspace(z[j]) ); + if( z[j]==',' ){ + continue; + }else if( z[j]=='}' ){ + break; + } + } + x = jsonParseValue(pParse, j); + if( x==(-4) ){ + j = pParse->iErr; + continue; + } + if( x==(-2) ){ + j = pParse->iErr; + break; + } + } + pParse->iErr = j; + return -1; } pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + pParse->iDepth--; return j+1; - }else if( c=='[' ){ + } + case '[': { /* Parse array */ iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); if( iThis<0 ) return -1; + if( ++pParse->iDepth > JSON_MAX_DEPTH ){ + pParse->iErr = i; + return -1; + } memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u)); for(j=i+1;;j++){ - while( fast_isspace(z[j]) ){ j++; } - if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; x = jsonParseValue(pParse, j); - pParse->iDepth--; - if( x<0 ){ - if( x==(-3) && pParse->nNode==(u32)iThis+1 ) return j+1; + if( x<=0 ){ + if( x==(-3) ){ + j = pParse->iErr; + if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + break; + } + if( x!=(-1) ) pParse->iErr = j; return -1; } j = x; - while( fast_isspace(z[j]) ){ j++; } - c = z[j]; - if( c==',' ) continue; - if( c!=']' ) return -1; - break; + if( z[j]==',' ){ + continue; + }else if( z[j]==']' ){ + break; + }else{ + if( fast_isspace(z[j]) ){ + do{ j++; }while( fast_isspace(z[j]) ); + if( z[j]==',' ){ + continue; + }else if( z[j]==']' ){ + break; + } + } + x = jsonParseValue(pParse, j); + if( x==(-4) ){ + j = pParse->iErr; + continue; + } + if( x==(-3) ){ + j = pParse->iErr; + break; + } + } + pParse->iErr = j; + return -1; } pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + pParse->iDepth--; return j+1; - }else if( c=='"' ){ + } + case '\'': { + u8 jnFlags; + char cDelim; + pParse->hasNonstd = 1; + jnFlags = JNODE_JSON5; + goto parse_string; + case '"': /* Parse string */ - u8 jnFlags = 0; + jnFlags = 0; + parse_string: + cDelim = z[i]; j = i+1; for(;;){ c = z[j]; if( (c & ~0x1f)==0 ){ /* Control characters are not allowed in strings */ + pParse->iErr = j; return -1; } if( c=='\\' ){ c = z[++j]; if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f' || c=='n' || c=='r' || c=='t' - || (c=='u' && jsonIs4Hex(z+j+1)) ){ - jnFlags = JNODE_ESCAPE; + || (c=='u' && jsonIs4Hex(&z[j+1])) ){ + jnFlags |= JNODE_ESCAPE; + }else if( c=='\'' || c=='0' || c=='v' || c=='\n' + || (0xe2==(u8)c && 0x80==(u8)z[j+1] + && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) + || (c=='x' && jsonIs2Hex(&z[j+1])) ){ + jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + pParse->hasNonstd = 1; + }else if( c=='\r' ){ + if( z[j+1]=='\n' ) j++; + jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + pParse->hasNonstd = 1; }else{ + pParse->iErr = j; return -1; } - }else if( c=='"' ){ + }else if( c==cDelim ){ break; } j++; } - jsonParseAddNode(pParse, JSON_STRING, j+1-i, &z[i]); - if( !pParse->oom ) pParse->aNode[pParse->nNode-1].jnFlags = jnFlags; + jsonParseAddNode(pParse, JSON_STRING | (jnFlags<<8), j+1-i, &z[i]); return j+1; - }else if( c=='n' - && strncmp(z+i,"null",4)==0 - && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); - return i+4; - }else if( c=='t' - && strncmp(z+i,"true",4)==0 - && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_TRUE, 0, 0); - return i+4; - }else if( c=='f' - && strncmp(z+i,"false",5)==0 - && !sqlite3Isalnum(z[i+5]) ){ - jsonParseAddNode(pParse, JSON_FALSE, 0, 0); - return i+5; - }else if( c=='-' || (c>='0' && c<='9') ){ + } + case 't': { + if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){ + jsonParseAddNode(pParse, JSON_TRUE, 0, 0); + return i+4; + } + pParse->iErr = i; + return -1; + } + case 'f': { + if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){ + jsonParseAddNode(pParse, JSON_FALSE, 0, 0); + return i+5; + } + pParse->iErr = i; + return -1; + } + case '+': { + u8 seenDP, seenE, jnFlags; + pParse->hasNonstd = 1; + jnFlags = JNODE_JSON5; + goto parse_number; + case '.': + if( sqlite3Isdigit(z[i+1]) ){ + pParse->hasNonstd = 1; + jnFlags = JNODE_JSON5; + seenE = 0; + seenDP = JSON_REAL; + goto parse_number_2; + } + pParse->iErr = i; + return -1; + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': /* Parse number */ - u8 seenDP = 0; - u8 seenE = 0; + jnFlags = 0; + parse_number: + seenDP = JSON_INT; + seenE = 0; assert( '-' < '0' ); + assert( '+' < '0' ); + assert( '.' < '0' ); + c = z[i]; + if( c<='0' ){ - j = c=='-' ? i+1 : i; - if( z[j]=='0' && z[j+1]>='0' && z[j+1]<='9' ) return -1; + if( c=='0' ){ + if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){ + assert( seenDP==JSON_INT ); + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + for(j=i+3; sqlite3Isxdigit(z[j]); j++){} + goto parse_number_finish; + }else if( sqlite3Isdigit(z[i+1]) ){ + pParse->iErr = i+1; + return -1; + } + }else{ + if( !sqlite3Isdigit(z[i+1]) ){ + /* JSON5 allows for "+Infinity" and "-Infinity" using exactly + ** that case. SQLite also allows these in any case and it allows + ** "+inf" and "-inf". */ + if( (z[i+1]=='I' || z[i+1]=='i') + && sqlite3StrNICmp(&z[i+1], "inf",3)==0 + ){ + pParse->hasNonstd = 1; + if( z[i]=='-' ){ + jsonParseAddNode(pParse, JSON_REAL, 8, "-9.0e999"); + }else{ + jsonParseAddNode(pParse, JSON_REAL, 7, "9.0e999"); + } + return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4); + } + if( z[i+1]=='.' ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + goto parse_number_2; + } + pParse->iErr = i; + return -1; + } + if( z[i+1]=='0' ){ + if( sqlite3Isdigit(z[i+2]) ){ + pParse->iErr = i+1; + return -1; + }else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + for(j=i+4; sqlite3Isxdigit(z[j]); j++){} + goto parse_number_finish; + } + } + } } - j = i+1; - for(;; j++){ + parse_number_2: + for(j=i+1;; j++){ c = z[j]; - if( c>='0' && c<='9' ) continue; + if( sqlite3Isdigit(c) ) continue; if( c=='.' ){ - if( z[j-1]=='-' ) return -1; - if( seenDP ) return -1; - seenDP = 1; + if( seenDP==JSON_REAL ){ + pParse->iErr = j; + return -1; + } + seenDP = JSON_REAL; continue; } if( c=='e' || c=='E' ){ - if( z[j-1]<'0' ) return -1; - if( seenE ) return -1; - seenDP = seenE = 1; + if( z[j-1]<'0' ){ + if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + }else{ + pParse->iErr = j; + return -1; + } + } + if( seenE ){ + pParse->iErr = j; + return -1; + } + seenDP = JSON_REAL; + seenE = 1; c = z[j+1]; if( c=='+' || c=='-' ){ j++; c = z[j+1]; } - if( c<'0' || c>'9' ) return -1; + if( c<'0' || c>'9' ){ + pParse->iErr = j; + return -1; + } continue; } break; } - if( z[j-1]<'0' ) return -1; - jsonParseAddNode(pParse, seenDP ? JSON_REAL : JSON_INT, - j - i, &z[i]); + if( z[j-1]<'0' ){ + if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + }else{ + pParse->iErr = j; + return -1; + } + } + parse_number_finish: + jsonParseAddNode(pParse, seenDP | (jnFlags<<8), j - i, &z[i]); return j; - }else if( c=='}' ){ + } + case '}': { + pParse->iErr = i; return -2; /* End of {...} */ - }else if( c==']' ){ + } + case ']': { + pParse->iErr = i; return -3; /* End of [...] */ - }else if( c==0 ){ + } + case ',': { + pParse->iErr = i; + return -4; /* List separator */ + } + case ':': { + pParse->iErr = i; + return -5; /* Object label/value separator */ + } + case 0: { return 0; /* End of file */ - }else{ + } + case 0x09: + case 0x0a: + case 0x0d: + case 0x20: { + do{ + i++; + }while( fast_isspace(z[i]) ); + goto json_parse_restart; + } + case 0x0b: + case 0x0c: + case '/': + case 0xc2: + case 0xe1: + case 0xe2: + case 0xe3: + case 0xef: { + j = json5Whitespace(&z[i]); + if( j>0 ){ + i += j; + pParse->hasNonstd = 1; + goto json_parse_restart; + } + pParse->iErr = i; + return -1; + } + case 'n': { + if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){ + jsonParseAddNode(pParse, JSON_NULL, 0, 0); + return i+4; + } + /* fall-through into the default case that checks for NaN */ + } + default: { + u32 k; + int nn; + c = z[i]; + for(k=0; khasNonstd = 1; + return i + nn; + } + pParse->iErr = i; return -1; /* Syntax error */ } + } /* End switch(z[i]) */ } /* @@ -199760,7 +201266,14 @@ static int jsonParse( if( i>0 ){ assert( pParse->iDepth==0 ); while( fast_isspace(zJson[i]) ) i++; - if( zJson[i] ) i = -1; + if( zJson[i] ){ + i += json5Whitespace(&zJson[i]); + if( zJson[i] ){ + jsonParseReset(pParse); + return 1; + } + pParse->hasNonstd = 1; + } } if( i<=0 ){ if( pCtx!=0 ){ @@ -199831,6 +201344,15 @@ static int jsonParseFindParents(JsonParse *pParse){ ** is no longer valid, parse the JSON again and return the new parse, ** and also register the new parse so that it will be available for ** future sqlite3_get_auxdata() calls. +** +** If an error occurs and pErrCtx!=0 then report the error on pErrCtx +** and return NULL. +** +** If an error occurs and pErrCtx==0 then return the Parse object with +** JsonParse.nErr non-zero. If the caller invokes this routine with +** pErrCtx==0 and it gets back a JsonParse with nErr!=0, then the caller +** is responsible for invoking jsonParseFree() on the returned value. +** But the caller may invoke jsonParseFree() *only* if pParse->nErr!=0. */ static JsonParse *jsonParseCached( sqlite3_context *pCtx, @@ -199880,6 +201402,10 @@ static JsonParse *jsonParseCached( p->zJson = (char*)&p[1]; memcpy((char*)p->zJson, zJson, nJson+1); if( jsonParse(p, pErrCtx, p->zJson) ){ + if( pErrCtx==0 ){ + p->nErr = 1; + return p; + } sqlite3_free(p); return 0; } @@ -199894,7 +201420,7 @@ static JsonParse *jsonParseCached( ** Compare the OBJECT label at pNode against zKey,nKey. Return true on ** a match. */ -static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){ +static int jsonLabelCompare(const JsonNode *pNode, const char *zKey, u32 nKey){ assert( pNode->eU==1 ); if( pNode->jnFlags & JNODE_RAW ){ if( pNode->n!=nKey ) return 0; @@ -199904,6 +201430,15 @@ static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){ return strncmp(pNode->u.zJContent+1, zKey, nKey)==0; } } +static int jsonSameLabel(const JsonNode *p1, const JsonNode *p2){ + if( p1->jnFlags & JNODE_RAW ){ + return jsonLabelCompare(p2, p1->u.zJContent, p1->n); + }else if( p2->jnFlags & JNODE_RAW ){ + return jsonLabelCompare(p1, p2->u.zJContent, p2->n); + }else{ + return p1->n==p2->n && strncmp(p1->u.zJContent,p2->u.zJContent,p1->n)==0; + } +} /* forward declaration */ static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**); @@ -200374,7 +201909,7 @@ static void jsonExtractFunc( zPath = (const char*)sqlite3_value_text(argv[1]); if( zPath==0 ) return; if( flags & JSON_ABPATH ){ - if( zPath[0]!='$' ){ + if( zPath[0]!='$' || (zPath[1]!='.' && zPath[1]!='[' && zPath[1]!=0) ){ /* The -> and ->> operators accept abbreviated PATH arguments. This ** is mostly for compatibility with PostgreSQL, but also for ** convenience. @@ -200465,12 +202000,10 @@ static JsonNode *jsonMergePatch( assert( pPatch[i].eU==1 ); nKey = pPatch[i].n; zKey = pPatch[i].u.zJContent; - assert( (pPatch[i].jnFlags & JNODE_RAW)==0 ); for(j=1; jn; j += jsonNodeSize(&pTarget[j+1])+1 ){ assert( pTarget[j].eType==JSON_STRING ); assert( pTarget[j].jnFlags & JNODE_LABEL ); - assert( (pPatch[i].jnFlags & JNODE_RAW)==0 ); - if( pTarget[j].n==nKey && strncmp(pTarget[j].u.zJContent,zKey,nKey)==0 ){ + if( jsonSameLabel(&pPatch[i], &pTarget[j]) ){ if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break; if( pPatch[i+1].eType==JSON_NULL ){ pTarget[j+1].jnFlags |= JNODE_REMOVE; @@ -200757,8 +202290,8 @@ static void jsonTypeFunc( /* ** json_valid(JSON) ** -** Return 1 if JSON is a well-formed JSON string according to RFC-7159. -** Return 0 otherwise. +** Return 1 if JSON is a well-formed canonical JSON string according +** to RFC-7159. Return 0 otherwise. */ static void jsonValidFunc( sqlite3_context *ctx, @@ -200767,8 +202300,69 @@ static void jsonValidFunc( ){ JsonParse *p; /* The parse */ UNUSED_PARAMETER(argc); + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + p = jsonParseCached(ctx, argv, 0); + if( p==0 || p->oom ){ + sqlite3_result_error_nomem(ctx); + sqlite3_free(p); + }else{ + sqlite3_result_int(ctx, p->nErr==0 && p->hasNonstd==0); + if( p->nErr ) jsonParseFree(p); + } +} + +/* +** json_error_position(JSON) +** +** If the argument is not an interpretable JSON string, then return the 1-based +** character position at which the parser first recognized that the input +** was in error. The left-most character is 1. If the string is valid +** JSON, then return 0. +** +** Note that json_valid() is only true for strictly conforming canonical JSON. +** But this routine returns zero if the input contains extension. Thus: +** +** (1) If the input X is strictly conforming canonical JSON: +** +** json_valid(X) returns true +** json_error_position(X) returns 0 +** +** (2) If the input X is JSON but it includes extension (such as JSON5) that +** are not part of RFC-8259: +** +** json_valid(X) returns false +** json_error_position(X) return 0 +** +** (3) If the input X cannot be interpreted as JSON even taking extensions +** into account: +** +** json_valid(X) return false +** json_error_position(X) returns 1 or more +*/ +static void jsonErrorFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + JsonParse *p; /* The parse */ + UNUSED_PARAMETER(argc); + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; p = jsonParseCached(ctx, argv, 0); - sqlite3_result_int(ctx, p!=0); + if( p==0 || p->oom ){ + sqlite3_result_error_nomem(ctx); + sqlite3_free(p); + }else if( p->nErr==0 ){ + sqlite3_result_int(ctx, 0); + }else{ + int n = 1; + u32 i; + const char *z = p->zJson; + for(i=0; iiErr && ALWAYS(z[i]); i++){ + if( (z[i]&0xc0)!=0x80 ) n++; + } + sqlite3_result_int(ctx, n); + jsonParseFree(p); + } } @@ -201112,14 +202706,16 @@ static void jsonAppendObjectPathElement( assert( pNode->eU==1 ); z = pNode->u.zJContent; nn = pNode->n; - assert( nn>=2 ); - assert( z[0]=='"' ); - assert( z[nn-1]=='"' ); - if( nn>2 && sqlite3Isalpha(z[1]) ){ - for(jj=2; jjjnFlags & JNODE_RAW)==0 ){ + assert( nn>=2 ); + assert( z[0]=='"' || z[0]=='\'' ); + assert( z[nn-1]=='"' || z[0]=='\'' ); + if( nn>2 && sqlite3Isalpha(z[1]) ){ + for(jj=2; jj, 2, JSON_JSON, jsonExtractFunc), JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc), @@ -202003,16 +203600,17 @@ struct RtreeMatchArg { ** at run-time. */ #ifndef SQLITE_BYTEORDER -#if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ - defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ - defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ - defined(__arm__) -# define SQLITE_BYTEORDER 1234 -#elif defined(sparc) || defined(__ppc__) -# define SQLITE_BYTEORDER 4321 -#else -# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ -#endif +# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ + defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ + defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ppc__) || \ + defined(__ARMEB__) || defined(__AARCH64EB__) +# define SQLITE_BYTEORDER 4321 +# else +# define SQLITE_BYTEORDER 0 +# endif #endif @@ -212557,6 +214155,11 @@ static void rbuCheckpointFrame(sqlite3rbu *p, RbuFrame *pFrame){ p->rc = pDb->pMethods->xWrite(pDb, p->aBuf, p->pgsz, iOff); } +/* +** This value is copied from the definition of ZIPVFS_CTRL_FILE_POINTER +** in zipvfs.h. +*/ +#define RBU_ZIPVFS_CTRL_FILE_POINTER 230439 /* ** Take an EXCLUSIVE lock on the database file. Return SQLITE_OK if @@ -212565,9 +214168,20 @@ static void rbuCheckpointFrame(sqlite3rbu *p, RbuFrame *pFrame){ static int rbuLockDatabase(sqlite3 *db){ int rc = SQLITE_OK; sqlite3_file *fd = 0; - sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); - if( fd->pMethods ){ + sqlite3_file_control(db, "main", RBU_ZIPVFS_CTRL_FILE_POINTER, &fd); + if( fd ){ + sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + rc = fd->pMethods->xLock(fd, SQLITE_LOCK_SHARED); + if( rc==SQLITE_OK ){ + rc = fd->pMethods->xUnlock(fd, SQLITE_LOCK_NONE); + } + sqlite3_file_control(db, "main", RBU_ZIPVFS_CTRL_FILE_POINTER, &fd); + }else{ + sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + } + + if( rc==SQLITE_OK && fd->pMethods ){ rc = fd->pMethods->xLock(fd, SQLITE_LOCK_SHARED); if( rc==SQLITE_OK ){ rc = fd->pMethods->xLock(fd, SQLITE_LOCK_EXCLUSIVE); @@ -215804,6 +217418,7 @@ static int dbpageConnect( (void)pzErr; sqlite3_vtab_config(db, SQLITE_VTAB_DIRECTONLY); + sqlite3_vtab_config(db, SQLITE_VTAB_USES_ALL_SCHEMAS); rc = sqlite3_declare_vtab(db, "CREATE TABLE x(pgno INTEGER PRIMARY KEY, data BLOB, schema HIDDEN)"); if( rc==SQLITE_OK ){ @@ -215887,7 +217502,6 @@ static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ ){ pIdxInfo->orderByConsumed = 1; } - sqlite3VtabUsesAllSchemas(pIdxInfo); return SQLITE_OK; } @@ -216188,6 +217802,8 @@ typedef struct SessionInput SessionInput; # endif #endif +#define SESSIONS_ROWID "_rowid_" + static int sessions_strm_chunk_size = SESSIONS_STRM_CHUNK_SIZE; typedef struct SessionHook SessionHook; @@ -216209,6 +217825,7 @@ struct sqlite3_session { int bEnable; /* True if currently recording */ int bIndirect; /* True if all changes are indirect */ int bAutoAttach; /* True to auto-attach tables */ + int bImplicitPK; /* True to handle tables with implicit PK */ int rc; /* Non-zero if an error has occurred */ void *pFilterCtx; /* First argument to pass to xTableFilter */ int (*xTableFilter)(void *pCtx, const char *zTab); @@ -216285,6 +217902,7 @@ struct SessionTable { char *zName; /* Local name of table */ int nCol; /* Number of columns in table zName */ int bStat1; /* True if this is sqlite_stat1 */ + int bRowid; /* True if this table uses rowid for PK */ const char **azCol; /* Column names */ u8 *abPK; /* Array of primary key flags */ int nEntry; /* Total number of entries in hash table */ @@ -216677,6 +218295,7 @@ static unsigned int sessionHashAppendType(unsigned int h, int eType){ */ static int sessionPreupdateHash( sqlite3_session *pSession, /* Session object that owns pTab */ + i64 iRowid, SessionTable *pTab, /* Session table handle */ int bNew, /* True to hash the new.* PK */ int *piHash, /* OUT: Hash value */ @@ -216685,48 +218304,53 @@ static int sessionPreupdateHash( unsigned int h = 0; /* Hash value to return */ int i; /* Used to iterate through columns */ - assert( *pbNullPK==0 ); - assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); - for(i=0; inCol; i++){ - if( pTab->abPK[i] ){ - int rc; - int eType; - sqlite3_value *pVal; - - if( bNew ){ - rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); - }else{ - rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); - } - if( rc!=SQLITE_OK ) return rc; + if( pTab->bRowid ){ + assert( pTab->nCol-1==pSession->hook.xCount(pSession->hook.pCtx) ); + h = sessionHashAppendI64(h, iRowid); + }else{ + assert( *pbNullPK==0 ); + assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); + for(i=0; inCol; i++){ + if( pTab->abPK[i] ){ + int rc; + int eType; + sqlite3_value *pVal; - eType = sqlite3_value_type(pVal); - h = sessionHashAppendType(h, eType); - if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ - i64 iVal; - if( eType==SQLITE_INTEGER ){ - iVal = sqlite3_value_int64(pVal); + if( bNew ){ + rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); }else{ - double rVal = sqlite3_value_double(pVal); - assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); - memcpy(&iVal, &rVal, 8); + rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); } - h = sessionHashAppendI64(h, iVal); - }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ - const u8 *z; - int n; - if( eType==SQLITE_TEXT ){ - z = (const u8 *)sqlite3_value_text(pVal); + if( rc!=SQLITE_OK ) return rc; + + eType = sqlite3_value_type(pVal); + h = sessionHashAppendType(h, eType); + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + i64 iVal; + if( eType==SQLITE_INTEGER ){ + iVal = sqlite3_value_int64(pVal); + }else{ + double rVal = sqlite3_value_double(pVal); + assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); + memcpy(&iVal, &rVal, 8); + } + h = sessionHashAppendI64(h, iVal); + }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ + const u8 *z; + int n; + if( eType==SQLITE_TEXT ){ + z = (const u8 *)sqlite3_value_text(pVal); + }else{ + z = (const u8 *)sqlite3_value_blob(pVal); + } + n = sqlite3_value_bytes(pVal); + if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; + h = sessionHashAppendBlob(h, n, z); }else{ - z = (const u8 *)sqlite3_value_blob(pVal); + assert( eType==SQLITE_NULL ); + assert( pTab->bStat1==0 || i!=1 ); + *pbNullPK = 1; } - n = sqlite3_value_bytes(pVal); - if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; - h = sessionHashAppendBlob(h, n, z); - }else{ - assert( eType==SQLITE_NULL ); - assert( pTab->bStat1==0 || i!=1 ); - *pbNullPK = 1; } } } @@ -217009,6 +218633,7 @@ static int sessionMergeUpdate( */ static int sessionPreupdateEqual( sqlite3_session *pSession, /* Session object that owns SessionTable */ + i64 iRowid, /* Rowid value if pTab->bRowid */ SessionTable *pTab, /* Table associated with change */ SessionChange *pChange, /* Change to compare to */ int op /* Current pre-update operation */ @@ -217016,6 +218641,11 @@ static int sessionPreupdateEqual( int iCol; /* Used to iterate through columns */ u8 *a = pChange->aRecord; /* Cursor used to scan change record */ + if( pTab->bRowid ){ + if( a[0]!=SQLITE_INTEGER ) return 0; + return sessionGetI64(&a[1])==iRowid; + } + assert( op==SQLITE_INSERT || op==SQLITE_UPDATE || op==SQLITE_DELETE ); for(iCol=0; iColnCol; iCol++){ if( !pTab->abPK[iCol] ){ @@ -217160,7 +218790,8 @@ static int sessionTableInfo( int *pnCol, /* OUT: number of columns */ const char **pzTab, /* OUT: Copy of zThis */ const char ***pazCol, /* OUT: Array of column names for table */ - u8 **pabPK /* OUT: Array of booleans - true for PK col */ + u8 **pabPK, /* OUT: Array of booleans - true for PK col */ + int *pbRowid /* OUT: True if only PK is a rowid */ ){ char *zPragma; sqlite3_stmt *pStmt; @@ -217172,6 +218803,7 @@ static int sessionTableInfo( u8 *pAlloc = 0; char **azCol = 0; u8 *abPK = 0; + int bRowid = 0; /* Set to true to use rowid as PK */ assert( pazCol && pabPK ); @@ -217216,10 +218848,15 @@ static int sessionTableInfo( } nByte = nThis + 1; + bRowid = (pbRowid!=0); while( SQLITE_ROW==sqlite3_step(pStmt) ){ nByte += sqlite3_column_bytes(pStmt, 1); nDbCol++; + if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; } + if( nDbCol==0 ) bRowid = 0; + nDbCol += bRowid; + nByte += strlen(SESSIONS_ROWID); rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ){ @@ -217241,6 +218878,14 @@ static int sessionTableInfo( } i = 0; + if( bRowid ){ + size_t nName = strlen(SESSIONS_ROWID); + memcpy(pAlloc, SESSIONS_ROWID, nName+1); + azCol[i] = (char*)pAlloc; + pAlloc += nName+1; + abPK[i] = 1; + i++; + } while( SQLITE_ROW==sqlite3_step(pStmt) ){ int nName = sqlite3_column_bytes(pStmt, 1); const unsigned char *zName = sqlite3_column_text(pStmt, 1); @@ -217252,7 +218897,6 @@ static int sessionTableInfo( i++; } rc = sqlite3_reset(pStmt); - } /* If successful, populate the output variables. Otherwise, zero them and @@ -217269,6 +218913,7 @@ static int sessionTableInfo( if( pzTab ) *pzTab = 0; sessionFree(pSession, azCol); } + if( pbRowid ) *pbRowid = bRowid; sqlite3_finalize(pStmt); return rc; } @@ -217290,7 +218935,8 @@ static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, - pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK + pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK, + (pSession->bImplicitPK ? &pTab->bRowid : 0) ); if( pSession->rc==SQLITE_OK ){ int i; @@ -217362,6 +219008,7 @@ static int sessionUpdateMaxSize( ){ i64 nNew = 2; if( pC->op==SQLITE_INSERT ){ + if( pTab->bRowid ) nNew += 9; if( op!=SQLITE_DELETE ){ int ii; for(ii=0; iinCol; ii++){ @@ -217378,12 +219025,16 @@ static int sessionUpdateMaxSize( }else{ int ii; u8 *pCsr = pC->aRecord; - for(ii=0; iinCol; ii++){ + if( pTab->bRowid ){ + nNew += 9 + 1; + pCsr += 9; + } + for(ii=pTab->bRowid; iinCol; ii++){ int bChanged = 1; int nOld = 0; int eType; sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + pSession->hook.xNew(pSession->hook.pCtx, ii-pTab->bRowid, &p); if( p==0 ){ return SQLITE_NOMEM; } @@ -217462,6 +219113,7 @@ static int sessionUpdateMaxSize( */ static void sessionPreupdateOneChange( int op, /* One of SQLITE_UPDATE, INSERT, DELETE */ + i64 iRowid, sqlite3_session *pSession, /* Session object pTab is attached to */ SessionTable *pTab /* Table that change applies to */ ){ @@ -217477,7 +219129,7 @@ static void sessionPreupdateOneChange( /* Check the number of columns in this xPreUpdate call matches the ** number of columns in the table. */ - if( pTab->nCol!=pSession->hook.xCount(pSession->hook.pCtx) ){ + if( (pTab->nCol-pTab->bRowid)!=pSession->hook.xCount(pSession->hook.pCtx) ){ pSession->rc = SQLITE_SCHEMA; return; } @@ -217510,14 +219162,16 @@ static void sessionPreupdateOneChange( /* Calculate the hash-key for this change. If the primary key of the row ** includes a NULL value, exit early. Such changes are ignored by the ** session module. */ - rc = sessionPreupdateHash(pSession, pTab, op==SQLITE_INSERT, &iHash, &bNull); + rc = sessionPreupdateHash( + pSession, iRowid, pTab, op==SQLITE_INSERT, &iHash, &bNull + ); if( rc!=SQLITE_OK ) goto error_out; if( bNull==0 ){ /* Search the hash table for an existing record for this row. */ SessionChange *pC; for(pC=pTab->apChange[iHash]; pC; pC=pC->pNext){ - if( sessionPreupdateEqual(pSession, pTab, pC, op) ) break; + if( sessionPreupdateEqual(pSession, iRowid, pTab, pC, op) ) break; } if( pC==0 ){ @@ -217532,7 +219186,7 @@ static void sessionPreupdateOneChange( /* Figure out how large an allocation is required */ nByte = sizeof(SessionChange); - for(i=0; inCol; i++){ + for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p); @@ -217547,6 +219201,9 @@ static void sessionPreupdateOneChange( rc = sessionSerializeValue(0, p, &nByte); if( rc!=SQLITE_OK ) goto error_out; } + if( pTab->bRowid ){ + nByte += 9; /* Size of rowid field - an integer */ + } /* Allocate the change object */ pC = (SessionChange *)sessionMalloc64(pSession, nByte); @@ -217563,7 +219220,12 @@ static void sessionPreupdateOneChange( ** required values and encodings have already been cached in memory. ** It is not possible for an OOM to occur in this block. */ nByte = 0; - for(i=0; inCol; i++){ + if( pTab->bRowid ){ + pC->aRecord[0] = SQLITE_INTEGER; + sessionPutI64(&pC->aRecord[1], iRowid); + nByte = 9; + } + for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ pSession->hook.xOld(pSession->hook.pCtx, i, &p); @@ -217678,9 +219340,10 @@ static void xPreUpdate( pSession->rc = sessionFindTable(pSession, zName, &pTab); if( pTab ){ assert( pSession->rc==SQLITE_OK ); - sessionPreupdateOneChange(op, pSession, pTab); + assert( op==SQLITE_UPDATE || iKey1==iKey2 ); + sessionPreupdateOneChange(op, iKey1, pSession, pTab); if( op==SQLITE_UPDATE ){ - sessionPreupdateOneChange(SQLITE_INSERT, pSession, pTab); + sessionPreupdateOneChange(SQLITE_INSERT, iKey2, pSession, pTab); } } } @@ -217719,6 +219382,7 @@ static void sessionPreupdateHooks( typedef struct SessionDiffCtx SessionDiffCtx; struct SessionDiffCtx { sqlite3_stmt *pStmt; + int bRowid; int nOldOff; }; @@ -217727,17 +219391,17 @@ struct SessionDiffCtx { */ static int sessionDiffOld(void *pCtx, int iVal, sqlite3_value **ppVal){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff); + *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff+p->bRowid); return SQLITE_OK; } static int sessionDiffNew(void *pCtx, int iVal, sqlite3_value **ppVal){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - *ppVal = sqlite3_column_value(p->pStmt, iVal); + *ppVal = sqlite3_column_value(p->pStmt, iVal+p->bRowid); return SQLITE_OK; } static int sessionDiffCount(void *pCtx){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - return p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt); + return (p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt)) - p->bRowid; } static int sessionDiffDepth(void *pCtx){ (void)pCtx; @@ -217816,14 +219480,16 @@ static char *sessionExprCompareOther( static char *sessionSelectFindNew( const char *zDb1, /* Pick rows in this db only */ const char *zDb2, /* But not in this one */ + int bRowid, const char *zTbl, /* Table name */ const char *zExpr ){ + const char *zSel = (bRowid ? SESSIONS_ROWID ", *" : "*"); char *zRet = sqlite3_mprintf( - "SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS (" + "SELECT %s FROM \"%w\".\"%w\" WHERE NOT EXISTS (" " SELECT 1 FROM \"%w\".\"%w\" WHERE %s" ")", - zDb1, zTbl, zDb2, zTbl, zExpr + zSel, zDb1, zTbl, zDb2, zTbl, zExpr ); return zRet; } @@ -217837,7 +219503,9 @@ static int sessionDiffFindNew( char *zExpr ){ int rc = SQLITE_OK; - char *zStmt = sessionSelectFindNew(zDb1, zDb2, pTab->zName,zExpr); + char *zStmt = sessionSelectFindNew( + zDb1, zDb2, pTab->bRowid, pTab->zName, zExpr + ); if( zStmt==0 ){ rc = SQLITE_NOMEM; @@ -217848,8 +219516,10 @@ static int sessionDiffFindNew( SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx; pDiffCtx->pStmt = pStmt; pDiffCtx->nOldOff = 0; + pDiffCtx->bRowid = pTab->bRowid; while( SQLITE_ROW==sqlite3_step(pStmt) ){ - sessionPreupdateOneChange(op, pSession, pTab); + i64 iRowid = (pTab->bRowid ? sqlite3_column_int64(pStmt, 0) : 0); + sessionPreupdateOneChange(op, iRowid, pSession, pTab); } rc = sqlite3_finalize(pStmt); } @@ -217859,6 +219529,27 @@ static int sessionDiffFindNew( return rc; } +/* +** Return a comma-separated list of the fully-qualified (with both database +** and table name) column names from table pTab. e.g. +** +** "main"."t1"."a", "main"."t1"."b", "main"."t1"."c" +*/ +static char *sessionAllCols( + const char *zDb, + SessionTable *pTab +){ + int ii; + char *zRet = 0; + for(ii=0; iinCol; ii++){ + zRet = sqlite3_mprintf("%z%s\"%w\".\"%w\".\"%w\"", + zRet, (zRet ? ", " : ""), zDb, pTab->zName, pTab->azCol[ii] + ); + if( !zRet ) break; + } + return zRet; +} + static int sessionDiffFindModified( sqlite3_session *pSession, SessionTable *pTab, @@ -217873,11 +219564,13 @@ static int sessionDiffFindModified( if( zExpr2==0 ){ rc = SQLITE_NOMEM; }else{ + char *z1 = sessionAllCols(pSession->zDb, pTab); + char *z2 = sessionAllCols(zFrom, pTab); char *zStmt = sqlite3_mprintf( - "SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)", - pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2 + "SELECT %s,%s FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)", + z1, z2, pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2 ); - if( zStmt==0 ){ + if( zStmt==0 || z1==0 || z2==0 ){ rc = SQLITE_NOMEM; }else{ sqlite3_stmt *pStmt; @@ -217888,12 +219581,15 @@ static int sessionDiffFindModified( pDiffCtx->pStmt = pStmt; pDiffCtx->nOldOff = pTab->nCol; while( SQLITE_ROW==sqlite3_step(pStmt) ){ - sessionPreupdateOneChange(SQLITE_UPDATE, pSession, pTab); + i64 iRowid = (pTab->bRowid ? sqlite3_column_int64(pStmt, 0) : 0); + sessionPreupdateOneChange(SQLITE_UPDATE, iRowid, pSession, pTab); } rc = sqlite3_finalize(pStmt); } - sqlite3_free(zStmt); } + sqlite3_free(zStmt); + sqlite3_free(z1); + sqlite3_free(z2); } return rc; @@ -217932,9 +219628,12 @@ SQLITE_API int sqlite3session_diff( int bHasPk = 0; int bMismatch = 0; int nCol; /* Columns in zFrom.zTbl */ + int bRowid = 0; u8 *abPK; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); + rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK, + pSession->bImplicitPK ? &bRowid : 0 + ); if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ bMismatch = 1; @@ -218276,9 +219975,10 @@ static void sessionAppendStr( int *pRc ){ int nStr = sqlite3Strlen30(zStr); - if( 0==sessionBufferGrow(p, nStr, pRc) ){ + if( 0==sessionBufferGrow(p, nStr+1, pRc) ){ memcpy(&p->aBuf[p->nBuf], zStr, nStr); p->nBuf += nStr; + p->aBuf[p->nBuf] = 0x00; } } @@ -218300,6 +220000,27 @@ static void sessionAppendInteger( sessionAppendStr(p, aBuf, pRc); } +static void sessionAppendPrintf( + SessionBuffer *p, /* Buffer to append to */ + int *pRc, + const char *zFmt, + ... +){ + if( *pRc==SQLITE_OK ){ + char *zApp = 0; + va_list ap; + va_start(ap, zFmt); + zApp = sqlite3_vmprintf(zFmt, ap); + if( zApp==0 ){ + *pRc = SQLITE_NOMEM; + }else{ + sessionAppendStr(p, zApp, pRc); + } + va_end(ap); + sqlite3_free(zApp); + } +} + /* ** This function is a no-op if *pRc is other than SQLITE_OK when it is ** called. Otherwise, append the string zStr enclosed in quotes (") and @@ -218314,7 +220035,7 @@ static void sessionAppendIdent( const char *zStr, /* String to quote, escape and append */ int *pRc /* IN/OUT: Error code */ ){ - int nStr = sqlite3Strlen30(zStr)*2 + 2 + 1; + int nStr = sqlite3Strlen30(zStr)*2 + 2 + 2; if( 0==sessionBufferGrow(p, nStr, pRc) ){ char *zOut = (char *)&p->aBuf[p->nBuf]; const char *zIn = zStr; @@ -218325,6 +220046,7 @@ static void sessionAppendIdent( } *zOut++ = '"'; p->nBuf = (int)((u8 *)zOut - p->aBuf); + p->aBuf[p->nBuf] = 0x00; } } @@ -218460,7 +220182,7 @@ static int sessionAppendUpdate( /* If at least one field has been modified, this is not a no-op. */ if( bChanged ) bNoop = 0; - /* Add a field to the old.* record. This is omitted if this modules is + /* Add a field to the old.* record. This is omitted if this module is ** currently generating a patchset. */ if( bPatchset==0 ){ if( bChanged || abPK[i] ){ @@ -218549,12 +220271,20 @@ static int sessionAppendDelete( ** Formulate and prepare a SELECT statement to retrieve a row from table ** zTab in database zDb based on its primary key. i.e. ** -** SELECT * FROM zDb.zTab WHERE pk1 = ? AND pk2 = ? AND ... +** SELECT *, FROM zDb.zTab WHERE (pk1, pk2,...) IS (?1, ?2,...) +** +** where is: +** +** 1 AND (?A OR ?1 IS ) AND ... +** +** for each non-pk . */ static int sessionSelectStmt( sqlite3 *db, /* Database handle */ + int bIgnoreNoop, const char *zDb, /* Database name */ const char *zTab, /* Table name */ + int bRowid, int nCol, /* Number of columns in table */ const char **azCol, /* Names of table columns */ u8 *abPK, /* PRIMARY KEY array */ @@ -218562,8 +220292,50 @@ static int sessionSelectStmt( ){ int rc = SQLITE_OK; char *zSql = 0; + const char *zSep = ""; + const char *zCols = bRowid ? SESSIONS_ROWID ", *" : "*"; int nSql = -1; + int i; + + SessionBuffer nooptest = {0, 0, 0}; + SessionBuffer pkfield = {0, 0, 0}; + SessionBuffer pkvar = {0, 0, 0}; + + sessionAppendStr(&nooptest, ", 1", &rc); + if( 0==sqlite3_stricmp("sqlite_stat1", zTab) ){ + sessionAppendStr(&nooptest, " AND (?6 OR ?3 IS stat)", &rc); + sessionAppendStr(&pkfield, "tbl, idx", &rc); + sessionAppendStr(&pkvar, + "?1, (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", &rc + ); + zCols = "tbl, ?2, stat"; + }else{ + for(i=0; izDb, zName, &nCol, 0,&azCol,&abPK); - if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){ + rc = sessionTableInfo( + 0, db, pSession->zDb, zName, &nCol, 0, &azCol, &abPK, + (pSession->bImplicitPK ? &bRowid : 0) + ); + if( rc==SQLITE_OK && ( + pTab->nCol!=nCol + || pTab->bRowid!=bRowid + || memcmp(abPK, pTab->abPK, nCol) + )){ rc = SQLITE_SCHEMA; } @@ -218756,7 +220539,8 @@ static int sessionGenerateChangeset( /* Build and compile a statement to execute: */ if( rc==SQLITE_OK ){ rc = sessionSelectStmt( - db, pSession->zDb, zName, nCol, azCol, abPK, &pSel); + db, 0, pSession->zDb, zName, bRowid, nCol, azCol, abPK, &pSel + ); } nNoop = buf.nBuf; @@ -218839,7 +220623,7 @@ SQLITE_API int sqlite3session_changeset( int rc; if( pnChangeset==0 || ppChangeset==0 ) return SQLITE_MISUSE; - rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset,ppChangeset); + rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset, ppChangeset); assert( rc || pnChangeset==0 || pSession->bEnableSize==0 || *pnChangeset<=pSession->nMaxChangesetSize ); @@ -218957,6 +220741,19 @@ SQLITE_API int sqlite3session_object_config(sqlite3_session *pSession, int op, v break; } + case SQLITE_SESSION_OBJCONFIG_ROWID: { + int iArg = *(int*)pArg; + if( iArg>=0 ){ + if( pSession->pTable ){ + rc = SQLITE_MISUSE; + }else{ + pSession->bImplicitPK = (iArg!=0); + } + } + *(int*)pArg = pSession->bImplicitPK; + break; + } + default: rc = SQLITE_MISUSE; } @@ -219945,6 +221742,8 @@ struct SessionApplyCtx { SessionBuffer rebase; /* Rebase information (if any) here */ u8 bRebaseStarted; /* If table header is already in rebase */ u8 bRebase; /* True to collect rebase information */ + u8 bIgnoreNoop; /* True to ignore no-op conflicts */ + int bRowid; }; /* Number of prepared UPDATE statements to cache. */ @@ -220195,8 +221994,10 @@ static int sessionSelectRow( const char *zTab, /* Table name */ SessionApplyCtx *p /* Session changeset-apply context */ ){ - return sessionSelectStmt( - db, "main", zTab, p->nCol, p->azCol, p->abPK, &p->pSelect); + /* TODO */ + return sessionSelectStmt(db, p->bIgnoreNoop, + "main", zTab, p->bRowid, p->nCol, p->azCol, p->abPK, &p->pSelect + ); } /* @@ -220355,20 +222156,33 @@ static int sessionBindRow( */ static int sessionSeekToRow( sqlite3_changeset_iter *pIter, /* Changeset iterator */ - u8 *abPK, /* Primary key flags array */ - sqlite3_stmt *pSelect /* SELECT statement from sessionSelectRow() */ + SessionApplyCtx *p ){ + sqlite3_stmt *pSelect = p->pSelect; int rc; /* Return code */ int nCol; /* Number of columns in table */ int op; /* Changset operation (SQLITE_UPDATE etc.) */ const char *zDummy; /* Unused */ + sqlite3_clear_bindings(pSelect); sqlite3changeset_op(pIter, &zDummy, &nCol, &op, 0); rc = sessionBindRow(pIter, op==SQLITE_INSERT ? sqlite3changeset_new : sqlite3changeset_old, - nCol, abPK, pSelect + nCol, p->abPK, pSelect ); + if( op!=SQLITE_DELETE && p->bIgnoreNoop ){ + int ii; + for(ii=0; rc==SQLITE_OK && iiabPK[ii]==0 ){ + sqlite3_value *pVal = 0; + sqlite3changeset_new(pIter, ii, &pVal); + sqlite3_bind_int(pSelect, ii+1+nCol, (pVal==0)); + if( pVal ) rc = sessionBindValue(pSelect, ii+1, pVal); + } + } + } + if( rc==SQLITE_OK ){ rc = sqlite3_step(pSelect); if( rc!=SQLITE_ROW ) rc = sqlite3_reset(pSelect); @@ -220483,16 +222297,22 @@ static int sessionConflictHandler( /* Bind the new.* PRIMARY KEY values to the SELECT statement. */ if( pbReplace ){ - rc = sessionSeekToRow(pIter, p->abPK, p->pSelect); + rc = sessionSeekToRow(pIter, p); }else{ rc = SQLITE_OK; } if( rc==SQLITE_ROW ){ /* There exists another row with the new.* primary key. */ - pIter->pConflict = p->pSelect; - res = xConflict(pCtx, eType, pIter); - pIter->pConflict = 0; + if( p->bIgnoreNoop + && sqlite3_column_int(p->pSelect, sqlite3_column_count(p->pSelect)-1) + ){ + res = SQLITE_CHANGESET_OMIT; + }else{ + pIter->pConflict = p->pSelect; + res = xConflict(pCtx, eType, pIter); + pIter->pConflict = 0; + } rc = sqlite3_reset(p->pSelect); }else if( rc==SQLITE_OK ){ if( p->bDeferConstraints && eType==SQLITE_CHANGESET_CONFLICT ){ @@ -220600,7 +222420,7 @@ static int sessionApplyOneOp( sqlite3_step(p->pDelete); rc = sqlite3_reset(p->pDelete); - if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ + if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 && p->bIgnoreNoop==0 ){ rc = sessionConflictHandler( SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry ); @@ -220657,7 +222477,7 @@ static int sessionApplyOneOp( /* Check if there is a conflicting row. For sqlite_stat1, this needs ** to be done using a SELECT, as there is no PRIMARY KEY in the ** database schema to throw an exception if a duplicate is inserted. */ - rc = sessionSeekToRow(pIter, p->abPK, p->pSelect); + rc = sessionSeekToRow(pIter, p); if( rc==SQLITE_ROW ){ rc = SQLITE_CONSTRAINT; sqlite3_reset(p->pSelect); @@ -220834,6 +222654,7 @@ static int sessionChangesetApply( memset(&sApply, 0, sizeof(sApply)); sApply.bRebase = (ppRebase && pnRebase); sApply.bInvertConstraints = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); + sApply.bIgnoreNoop = !!(flags & SQLITE_CHANGESETAPPLY_IGNORENOOP); sqlite3_mutex_enter(sqlite3_db_mutex(db)); if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){ rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0); @@ -220871,6 +222692,7 @@ static int sessionChangesetApply( sApply.bStat1 = 0; sApply.bDeferConstraints = 1; sApply.bRebaseStarted = 0; + sApply.bRowid = 0; memset(&sApply.constraints, 0, sizeof(SessionBuffer)); /* If an xFilter() callback was specified, invoke it now. If the @@ -220890,8 +222712,8 @@ static int sessionChangesetApply( int i; sqlite3changeset_pk(pIter, &abPK, 0); - rc = sessionTableInfo(0, - db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK + rc = sessionTableInfo(0, db, "main", zNew, + &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK, &sApply.bRowid ); if( rc!=SQLITE_OK ) break; for(i=0; iiPos++; - if( p->iRangeEnd>0 ){ + if( p->iRangeEnd>=0 ){ if( iPosiRangeStart || iPos>p->iRangeEnd ) return SQLITE_OK; if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff; } @@ -225102,7 +226930,7 @@ static int fts5HighlightCb( } if( iPos==p->iter.iEnd ){ - if( p->iRangeEnd && p->iter.iStartiRangeStart ){ + if( p->iRangeEnd>=0 && p->iter.iStartiRangeStart ){ fts5HighlightAppend(&rc, p, p->zOpen, -1); } fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); @@ -225113,7 +226941,7 @@ static int fts5HighlightCb( } } - if( p->iRangeEnd>0 && iPos==p->iRangeEnd ){ + if( p->iRangeEnd>=0 && iPos==p->iRangeEnd ){ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); p->iOff = iEndOff; if( iPos>=p->iter.iStart && iPositer.iEnd ){ @@ -225148,6 +226976,7 @@ static void fts5HighlightFunction( memset(&ctx, 0, sizeof(HighlightContext)); ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]); ctx.zClose = (const char*)sqlite3_value_text(apVal[2]); + ctx.iRangeEnd = -1; rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn); if( ctx.zIn ){ @@ -225333,6 +227162,7 @@ static void fts5SnippetFunction( iCol = sqlite3_value_int(apVal[0]); ctx.zOpen = fts5ValueToText(apVal[1]); ctx.zClose = fts5ValueToText(apVal[2]); + ctx.iRangeEnd = -1; zEllips = fts5ValueToText(apVal[3]); nToken = sqlite3_value_int(apVal[4]); @@ -226601,6 +228431,7 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; } + assert( (pRet->abUnindexed && pRet->azCol) || rc!=SQLITE_OK ); for(i=3; rc==SQLITE_OK && ibSecureDelete = (bVal ? 1 : 0); + } }else{ *pbBadkey = 1; } @@ -226998,15 +228841,20 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ rc = sqlite3_finalize(p); } - if( rc==SQLITE_OK && iVersion!=FTS5_CURRENT_VERSION ){ + if( rc==SQLITE_OK + && iVersion!=FTS5_CURRENT_VERSION + && iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE + ){ rc = SQLITE_ERROR; if( pConfig->pzErrmsg ){ assert( 0==*pConfig->pzErrmsg ); - *pConfig->pzErrmsg = sqlite3_mprintf( - "invalid fts5 file format (found %d, expected %d) - run 'rebuild'", - iVersion, FTS5_CURRENT_VERSION + *pConfig->pzErrmsg = sqlite3_mprintf("invalid fts5 file format " + "(found %d, expected %d or %d) - run 'rebuild'", + iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE ); } + }else{ + pConfig->iVersion = iVersion; } if( rc==SQLITE_OK ){ @@ -227034,6 +228882,10 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ /* #include "fts5Int.h" */ /* #include "fts5parse.h" */ +#ifndef SQLITE_FTS5_MAX_EXPR_DEPTH +# define SQLITE_FTS5_MAX_EXPR_DEPTH 256 +#endif + /* ** All token types in the generated fts5parse.h file are greater than 0. */ @@ -227074,11 +228926,17 @@ struct Fts5Expr { ** FTS5_NOT (nChild, apChild valid) ** FTS5_STRING (pNear valid) ** FTS5_TERM (pNear valid) +** +** iHeight: +** Distance from this node to furthest leaf. This is always 0 for nodes +** of type FTS5_STRING and FTS5_TERM. For all other nodes it is one +** greater than the largest child value. */ struct Fts5ExprNode { int eType; /* Node type */ int bEof; /* True at EOF */ int bNomatch; /* True if entry is not a match */ + int iHeight; /* Distance to tree leaf nodes */ /* Next method for this node. */ int (*xNext)(Fts5Expr*, Fts5ExprNode*, int, i64); @@ -227148,6 +229006,31 @@ struct Fts5Parse { int bPhraseToAnd; /* Convert "a+b" to "a AND b" */ }; +/* +** Check that the Fts5ExprNode.iHeight variables are set correctly in +** the expression tree passed as the only argument. +*/ +#ifndef NDEBUG +static void assert_expr_depth_ok(int rc, Fts5ExprNode *p){ + if( rc==SQLITE_OK ){ + if( p->eType==FTS5_TERM || p->eType==FTS5_STRING || p->eType==0 ){ + assert( p->iHeight==0 ); + }else{ + int ii; + int iMaxChild = 0; + for(ii=0; iinChild; ii++){ + Fts5ExprNode *pChild = p->apChild[ii]; + iMaxChild = MAX(iMaxChild, pChild->iHeight); + assert_expr_depth_ok(SQLITE_OK, pChild); + } + assert( p->iHeight==iMaxChild+1 ); + } + } +} +#else +# define assert_expr_depth_ok(rc, p) +#endif + static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){ va_list ap; va_start(ap, zFmt); @@ -227262,6 +229145,8 @@ static int sqlite3Fts5ExprNew( }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF ); sqlite3Fts5ParserFree(pEngine, fts5ParseFree); + assert_expr_depth_ok(sParse.rc, sParse.pExpr); + /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ if( iColnCol && sParse.pExpr && sParse.rc==SQLITE_OK ){ @@ -227424,7 +229309,7 @@ static int sqlite3Fts5ExprAnd(Fts5Expr **pp1, Fts5Expr *p2){ Fts5Parse sParse; memset(&sParse, 0, sizeof(sParse)); - if( *pp1 ){ + if( *pp1 && p2 ){ Fts5Expr *p1 = *pp1; int nPhrase = p1->nPhrase + p2->nPhrase; @@ -227449,7 +229334,7 @@ static int sqlite3Fts5ExprAnd(Fts5Expr **pp1, Fts5Expr *p2){ } sqlite3_free(p2->apExprPhrase); sqlite3_free(p2); - }else{ + }else if( p2 ){ *pp1 = p2; } @@ -229223,6 +231108,7 @@ static void fts5ExprAssignXNext(Fts5ExprNode *pNode){ } static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ + int ii = p->nChild; if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){ int nByte = sizeof(Fts5ExprNode*) * pSub->nChild; memcpy(&p->apChild[p->nChild], pSub->apChild, nByte); @@ -229231,6 +231117,9 @@ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ }else{ p->apChild[p->nChild++] = pSub; } + for( ; iinChild; ii++){ + p->iHeight = MAX(p->iHeight, p->apChild[ii]->iHeight + 1); + } } /* @@ -229261,6 +231150,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( if( pRet ){ pRet->eType = FTS5_AND; pRet->nChild = nTerm; + pRet->iHeight = 1; fts5ExprAssignXNext(pRet); pParse->nPhrase--; for(ii=0; iiiHeight>SQLITE_FTS5_MAX_EXPR_DEPTH ){ + sqlite3Fts5ParseError(pParse, + "fts5 expression tree is too large (maximum depth %d)", + SQLITE_FTS5_MAX_EXPR_DEPTH + ); + sqlite3_free(pRet); + pRet = 0; + } } } } @@ -230966,6 +232864,8 @@ struct Fts5Index { sqlite3_stmt *pIdxSelect; int nRead; /* Total number of blocks read */ + sqlite3_stmt *pDeleteFromIdx; + sqlite3_stmt *pDataVersion; i64 iStructVersion; /* data_version when pStruct read */ Fts5Structure *pStruct; /* Current db structure (or NULL) */ @@ -231058,9 +232958,6 @@ struct Fts5CResult { ** iLeafOffset: ** Byte offset within the current leaf that is the first byte of the ** position list data (one byte passed the position-list size field). -** rowid field of the current entry. Usually this is the size field of the -** position list data. The exception is if the rowid for the current entry -** is the last thing on the leaf page. ** ** pLeaf: ** Buffer containing current leaf page data. Set to NULL at EOF. @@ -231619,6 +233516,7 @@ static int fts5StructureDecode( rc = FTS5_CORRUPT; break; } + assert( pSeg!=0 ); i += fts5GetVarint32(&pData[i], pSeg->iSegid); i += fts5GetVarint32(&pData[i], pSeg->pgnoFirst); i += fts5GetVarint32(&pData[i], pSeg->pgnoLast); @@ -231649,6 +233547,7 @@ static int fts5StructureDecode( */ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ fts5StructureMakeWritable(pRc, ppStruct); + assert( (ppStruct!=0 && (*ppStruct)!=0) || (*pRc)!=SQLITE_OK ); if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel; @@ -232107,42 +234006,25 @@ static int fts5DlidxLvlPrev(Fts5DlidxLvl *pLvl){ pLvl->bEof = 1; }else{ u8 *a = pLvl->pData->p; - i64 iVal; - int iLimit; - int ii; - int nZero = 0; - - /* Currently iOff points to the first byte of a varint. This block - ** decrements iOff until it points to the first byte of the previous - ** varint. Taking care not to read any memory locations that occur - ** before the buffer in memory. */ - iLimit = (iOff>9 ? iOff-9 : 0); - for(iOff--; iOff>iLimit; iOff--){ - if( (a[iOff-1] & 0x80)==0 ) break; - } - - fts5GetVarint(&a[iOff], (u64*)&iVal); - pLvl->iRowid -= iVal; - pLvl->iLeafPgno--; - - /* Skip backwards past any 0x00 varints. */ - for(ii=iOff-1; ii>=pLvl->iFirstOff && a[ii]==0x00; ii--){ - nZero++; - } - if( ii>=pLvl->iFirstOff && (a[ii] & 0x80) ){ - /* The byte immediately before the last 0x00 byte has the 0x80 bit - ** set. So the last 0x00 is only a varint 0 if there are 8 more 0x80 - ** bytes before a[ii]. */ - int bZero = 0; /* True if last 0x00 counts */ - if( (ii-8)>=pLvl->iFirstOff ){ - int j; - for(j=1; j<=8 && (a[ii-j] & 0x80); j++); - bZero = (j>8); + + pLvl->iOff = 0; + fts5DlidxLvlNext(pLvl); + while( 1 ){ + int nZero = 0; + int ii = pLvl->iOff; + u64 delta = 0; + + while( a[ii]==0 ){ + nZero++; + ii++; } - if( bZero==0 ) nZero--; + ii += sqlite3Fts5GetVarint(&a[ii], &delta); + + if( ii>=iOff ) break; + pLvl->iLeafPgno += nZero+1; + pLvl->iRowid += delta; + pLvl->iOff = ii; } - pLvl->iLeafPgno -= nZero; - pLvl->iOff = iOff - nZero; } return pLvl->bEof; @@ -232338,7 +234220,7 @@ static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){ i64 iOff = pIter->iLeafOffset; ASSERT_SZLEAF_OK(pIter->pLeaf); - if( iOff>=pIter->pLeaf->szLeaf ){ + while( iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( pIter->pLeaf==0 ){ if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; @@ -232437,10 +234319,12 @@ static void fts5SegIterInit( fts5SegIterSetNext(p, pIter); pIter->pSeg = pSeg; pIter->iLeafPgno = pSeg->pgnoFirst-1; - fts5SegIterNextPage(p, pIter); + do { + fts5SegIterNextPage(p, pIter); + }while( p->rc==SQLITE_OK && pIter->pLeaf && pIter->pLeaf->nn==4 ); } - if( p->rc==SQLITE_OK ){ + if( p->rc==SQLITE_OK && pIter->pLeaf ){ pIter->iLeafOffset = 4; assert( pIter->pLeaf!=0 ); assert_nc( pIter->pLeaf->nn>4 ); @@ -232634,7 +234518,7 @@ static void fts5SegIterNext_None( iOff = pIter->iLeafOffset; /* Next entry is on the next page */ - if( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ + while( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( p->rc || pIter->pLeaf==0 ) return; pIter->iRowid = 0; @@ -232827,7 +234711,7 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ Fts5Data *pLast = 0; int pgnoLast = 0; - if( pDlidx ){ + if( pDlidx && p->pConfig->iVersion==FTS5_CURRENT_VERSION ){ int iSegid = pIter->pSeg->iSegid; pgnoLast = fts5DlidxIterPgno(pDlidx); pLast = fts5LeafRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast)); @@ -233388,7 +235272,8 @@ static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){ /* ** Move the seg-iter so that it points to the first rowid on page iLeafPgno. -** It is an error if leaf iLeafPgno does not exist or contains no rowids. +** It is an error if leaf iLeafPgno does not exist. Unless the db is +** a 'secure-delete' db, if it contains no rowids then this is also an error. */ static void fts5SegIterGotoPage( Fts5Index *p, /* FTS5 backend object */ @@ -233403,21 +235288,23 @@ static void fts5SegIterGotoPage( fts5DataRelease(pIter->pNextLeaf); pIter->pNextLeaf = 0; pIter->iLeafPgno = iLeafPgno-1; - fts5SegIterNextPage(p, pIter); - assert( p->rc!=SQLITE_OK || pIter->iLeafPgno==iLeafPgno ); - if( p->rc==SQLITE_OK && ALWAYS(pIter->pLeaf!=0) ){ + while( p->rc==SQLITE_OK ){ int iOff; - u8 *a = pIter->pLeaf->p; - int n = pIter->pLeaf->szLeaf; - + fts5SegIterNextPage(p, pIter); + if( pIter->pLeaf==0 ) break; iOff = fts5LeafFirstRowidOff(pIter->pLeaf); - if( iOff<4 || iOff>=n ){ - p->rc = FTS5_CORRUPT; - }else{ - iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); - pIter->iLeafOffset = iOff; - fts5SegIterLoadNPos(p, pIter); + if( iOff>0 ){ + u8 *a = pIter->pLeaf->p; + int n = pIter->pLeaf->szLeaf; + if( iOff<4 || iOff>=n ){ + p->rc = FTS5_CORRUPT; + }else{ + iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); + pIter->iLeafOffset = iOff; + fts5SegIterLoadNPos(p, pIter); + } + break; } } } @@ -234132,7 +236019,7 @@ static void fts5MultiIterNew( if( iLevel<0 ){ assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) ); nSeg = pStruct->nSegment; - nSeg += (p->pHash ? 1 : 0); + nSeg += (p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH)); }else{ nSeg = MIN(pStruct->aLevel[iLevel].nSeg, nSegment); } @@ -234153,7 +236040,7 @@ static void fts5MultiIterNew( if( p->rc==SQLITE_OK ){ if( iLevel<0 ){ Fts5StructureLevel *pEnd = &pStruct->aLevel[pStruct->nLevel]; - if( p->pHash ){ + if( p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH) ){ /* Add a segment iterator for the current contents of the hash table. */ Fts5SegIter *pIter = &pNew->aSeg[iIter++]; fts5SegIterHashInit(p, pTerm, nTerm, flags, pIter); @@ -234908,7 +236795,7 @@ static void fts5TrimSegments(Fts5Index *p, Fts5Iter *pIter){ fts5BufferAppendBlob(&p->rc, &buf, sizeof(aHdr), aHdr); fts5BufferAppendVarint(&p->rc, &buf, pSeg->term.n); fts5BufferAppendBlob(&p->rc, &buf, pSeg->term.n, pSeg->term.p); - fts5BufferAppendBlob(&p->rc, &buf, pData->szLeaf-iOff,&pData->p[iOff]); + fts5BufferAppendBlob(&p->rc, &buf,pData->szLeaf-iOff,&pData->p[iOff]); if( p->rc==SQLITE_OK ){ /* Set the szLeaf field */ fts5PutU16(&buf.p[2], (u16)buf.n); @@ -235186,16 +237073,16 @@ static void fts5IndexCrisismerge( ){ const int nCrisis = p->pConfig->nCrisisMerge; Fts5Structure *pStruct = *ppStruct; - int iLvl = 0; - - assert( p->rc!=SQLITE_OK || pStruct->nLevel>0 ); - while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ - fts5IndexMergeLevel(p, &pStruct, iLvl, 0); - assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); - fts5StructurePromote(p, iLvl+1, pStruct); - iLvl++; + if( pStruct && pStruct->nLevel>0 ){ + int iLvl = 0; + while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ + fts5IndexMergeLevel(p, &pStruct, iLvl, 0); + assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); + fts5StructurePromote(p, iLvl+1, pStruct); + iLvl++; + } + *ppStruct = pStruct; } - *ppStruct = pStruct; } static int fts5IndexReturn(Fts5Index *p){ @@ -235229,6 +237116,413 @@ static int fts5PoslistPrefix(const u8 *aBuf, int nMax){ return ret; } +/* +** Execute the SQL statement: +** +** DELETE FROM %_idx WHERE (segid, (pgno/2)) = ($iSegid, $iPgno); +** +** This is used when a secure-delete operation removes the last term +** from a segment leaf page. In that case the %_idx entry is removed +** too. This is done to ensure that if all instances of a token are +** removed from an fts5 database in secure-delete mode, no trace of +** the token itself remains in the database. +*/ +static void fts5SecureDeleteIdxEntry( + Fts5Index *p, /* FTS5 backend object */ + int iSegid, /* Id of segment to delete entry for */ + int iPgno /* Page number within segment */ +){ + if( iPgno!=1 ){ + assert( p->pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE ); + if( p->pDeleteFromIdx==0 ){ + fts5IndexPrepareStmt(p, &p->pDeleteFromIdx, sqlite3_mprintf( + "DELETE FROM '%q'.'%q_idx' WHERE (segid, (pgno/2)) = (?1, ?2)", + p->pConfig->zDb, p->pConfig->zName + )); + } + if( p->rc==SQLITE_OK ){ + sqlite3_bind_int(p->pDeleteFromIdx, 1, iSegid); + sqlite3_bind_int(p->pDeleteFromIdx, 2, iPgno); + sqlite3_step(p->pDeleteFromIdx); + p->rc = sqlite3_reset(p->pDeleteFromIdx); + } + } +} + +/* +** This is called when a secure-delete operation removes a position-list +** that overflows onto segment page iPgno of segment pSeg. This function +** rewrites node iPgno, and possibly one or more of its right-hand peers, +** to remove this portion of the position list. +** +** Output variable (*pbLastInDoclist) is set to true if the position-list +** removed is followed by a new term or the end-of-segment, or false if +** it is followed by another rowid/position list. +*/ +static void fts5SecureDeleteOverflow( + Fts5Index *p, + Fts5StructureSegment *pSeg, + int iPgno, + int *pbLastInDoclist +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int pgno; + Fts5Data *pLeaf = 0; + assert( iPgno!=1 ); + + *pbLastInDoclist = 1; + for(pgno=iPgno; p->rc==SQLITE_OK && pgno<=pSeg->pgnoLast; pgno++){ + i64 iRowid = FTS5_SEGMENT_ROWID(pSeg->iSegid, pgno); + int iNext = 0; + u8 *aPg = 0; + + pLeaf = fts5DataRead(p, iRowid); + if( pLeaf==0 ) break; + aPg = pLeaf->p; + + iNext = fts5GetU16(&aPg[0]); + if( iNext!=0 ){ + *pbLastInDoclist = 0; + } + if( iNext==0 && pLeaf->szLeaf!=pLeaf->nn ){ + fts5GetVarint32(&aPg[pLeaf->szLeaf], iNext); + } + + if( iNext==0 ){ + /* The page contains no terms or rowids. Replace it with an empty + ** page and move on to the right-hand peer. */ + const u8 aEmpty[] = {0x00, 0x00, 0x00, 0x04}; + assert_nc( bDetailNone==0 || pLeaf->nn==4 ); + if( bDetailNone==0 ) fts5DataWrite(p, iRowid, aEmpty, sizeof(aEmpty)); + fts5DataRelease(pLeaf); + pLeaf = 0; + }else if( bDetailNone ){ + break; + }else if( iNext>=pLeaf->szLeaf || iNext<4 ){ + p->rc = FTS5_CORRUPT; + break; + }else{ + int nShift = iNext - 4; + int nPg; + + int nIdx = 0; + u8 *aIdx = 0; + + /* Unless the current page footer is 0 bytes in size (in which case + ** the new page footer will be as well), allocate and populate a + ** buffer containing the new page footer. Set stack variables aIdx + ** and nIdx accordingly. */ + if( pLeaf->nn>pLeaf->szLeaf ){ + int iFirst = 0; + int i1 = pLeaf->szLeaf; + int i2 = 0; + + aIdx = sqlite3Fts5MallocZero(&p->rc, (pLeaf->nn-pLeaf->szLeaf)+2); + if( aIdx==0 ) break; + i1 += fts5GetVarint32(&aPg[i1], iFirst); + i2 = sqlite3Fts5PutVarint(aIdx, iFirst-nShift); + if( i1nn ){ + memcpy(&aIdx[i2], &aPg[i1], pLeaf->nn-i1); + i2 += (pLeaf->nn-i1); + } + nIdx = i2; + } + + /* Modify the contents of buffer aPg[]. Set nPg to the new size + ** in bytes. The new page is always smaller than the old. */ + nPg = pLeaf->szLeaf - nShift; + memmove(&aPg[4], &aPg[4+nShift], nPg-4); + fts5PutU16(&aPg[2], nPg); + if( fts5GetU16(&aPg[0]) ) fts5PutU16(&aPg[0], 4); + if( nIdx>0 ){ + memcpy(&aPg[nPg], aIdx, nIdx); + nPg += nIdx; + } + sqlite3_free(aIdx); + + /* Write the new page to disk and exit the loop */ + assert( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, iRowid, aPg, nPg); + break; + } + } + fts5DataRelease(pLeaf); +} + +/* +** Completely remove the entry that pSeg currently points to from +** the database. +*/ +static void fts5DoSecureDelete( + Fts5Index *p, + Fts5SegIter *pSeg +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int iSegid = pSeg->pSeg->iSegid; + u8 *aPg = pSeg->pLeaf->p; + int nPg = pSeg->pLeaf->nn; + int iPgIdx = pSeg->pLeaf->szLeaf; + + u64 iDelta = 0; + u64 iNextDelta = 0; + int iNextOff = 0; + int iOff = 0; + int nIdx = 0; + u8 *aIdx = 0; + int bLastInDoclist = 0; + int iIdx = 0; + int iStart = 0; + int iKeyOff = 0; + int iPrevKeyOff = 0; + int iDelKeyOff = 0; /* Offset of deleted key, if any */ + + nIdx = nPg-iPgIdx; + aIdx = sqlite3Fts5MallocZero(&p->rc, nIdx+16); + if( p->rc ) return; + memcpy(aIdx, &aPg[iPgIdx], nIdx); + + /* At this point segment iterator pSeg points to the entry + ** this function should remove from the b-tree segment. + ** + ** In detail=full or detail=column mode, pSeg->iLeafOffset is the + ** offset of the first byte in the position-list for the entry to + ** remove. Immediately before this comes two varints that will also + ** need to be removed: + ** + ** + the rowid or delta rowid value for the entry, and + ** + the size of the position list in bytes. + ** + ** Or, in detail=none mode, there is a single varint prior to + ** pSeg->iLeafOffset - the rowid or delta rowid value. + ** + ** This block sets the following variables: + ** + ** iStart: + ** iDelta: + */ + { + int iSOP; + if( pSeg->iLeafPgno==pSeg->iTermLeafPgno ){ + iStart = pSeg->iTermLeafOffset; + }else{ + iStart = fts5GetU16(&aPg[0]); + } + + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + assert_nc( iSOP<=pSeg->iLeafOffset ); + + if( bDetailNone ){ + while( iSOPiLeafOffset ){ + if( aPg[iSOP]==0x00 ) iSOP++; + if( aPg[iSOP]==0x00 ) iSOP++; + iStart = iSOP; + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + } + + iNextOff = iSOP; + if( iNextOffiEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + if( iNextOffiEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + + }else{ + int nPos = 0; + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + while( iSOPiLeafOffset ){ + iStart = iSOP + (nPos/2); + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + } + assert_nc( iSOP==pSeg->iLeafOffset ); + iNextOff = pSeg->iLeafOffset + pSeg->nPos; + } + } + + iOff = iStart; + if( iNextOff>=iPgIdx ){ + int pgno = pSeg->iLeafPgno+1; + fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); + iNextOff = iPgIdx; + }else{ + /* Set bLastInDoclist to true if the entry being removed is the last + ** in its doclist. */ + for(iIdx=0, iKeyOff=0; iIdxiTermLeafOffset && pSeg->iLeafPgno==pSeg->iTermLeafPgno + ){ + /* The entry being removed was the only position list in its + ** doclist. Therefore the term needs to be removed as well. */ + int iKey = 0; + for(iIdx=0, iKeyOff=0; iIdx(u32)iStart ) break; + iKeyOff += iVal; + } + + iDelKeyOff = iOff = iKeyOff; + if( iNextOff!=iPgIdx ){ + int nPrefix = 0; + int nSuffix = 0; + int nPrefix2 = 0; + int nSuffix2 = 0; + + iDelKeyOff = iNextOff; + iNextOff += fts5GetVarint32(&aPg[iNextOff], nPrefix2); + iNextOff += fts5GetVarint32(&aPg[iNextOff], nSuffix2); + + if( iKey!=1 ){ + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nPrefix); + } + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nSuffix); + + nPrefix = MIN(nPrefix, nPrefix2); + nSuffix = (nPrefix2 + nSuffix2) - nPrefix; + + if( (iKeyOff+nSuffix)>iPgIdx || (iNextOff+nSuffix2)>iPgIdx ){ + p->rc = FTS5_CORRUPT; + }else{ + if( iKey!=1 ){ + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nPrefix); + } + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nSuffix); + if( nPrefix2>nPrefix ){ + memcpy(&aPg[iOff], &pSeg->term.p[nPrefix], nPrefix2-nPrefix); + iOff += (nPrefix2-nPrefix); + } + memmove(&aPg[iOff], &aPg[iNextOff], nSuffix2); + iOff += nSuffix2; + iNextOff += nSuffix2; + } + } + }else if( iStart==4 ){ + int iPgno; + + assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno ); + /* The entry being removed may be the only position list in + ** its doclist. */ + for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){ + Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno)); + int bEmpty = (pPg && pPg->nn==4); + fts5DataRelease(pPg); + if( bEmpty==0 ) break; + } + + if( iPgno==pSeg->iTermLeafPgno ){ + i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno); + Fts5Data *pTerm = fts5DataRead(p, iId); + if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){ + u8 *aTermIdx = &pTerm->p[pTerm->szLeaf]; + int nTermIdx = pTerm->nn - pTerm->szLeaf; + int iTermIdx = 0; + int iTermOff = 0; + + while( 1 ){ + u32 iVal = 0; + int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal); + iTermOff += iVal; + if( (iTermIdx+nByte)>=nTermIdx ) break; + iTermIdx += nByte; + } + nTermIdx = iTermIdx; + + memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx); + fts5PutU16(&pTerm->p[2], iTermOff); + + fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx); + if( nTermIdx==0 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno); + } + } + fts5DataRelease(pTerm); + } + } + + if( p->rc==SQLITE_OK ){ + const int nMove = nPg - iNextOff; + int nShift = 0; + + memmove(&aPg[iOff], &aPg[iNextOff], nMove); + iPgIdx -= (iNextOff - iOff); + nPg = iPgIdx; + fts5PutU16(&aPg[2], iPgIdx); + + nShift = iNextOff - iOff; + for(iIdx=0, iKeyOff=0, iPrevKeyOff=0; iIdxiOff ){ + iKeyOff -= nShift; + nShift = 0; + } + nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOff - iPrevKeyOff); + iPrevKeyOff = iKeyOff; + } + } + + if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno); + } + + assert_nc( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg,nPg); + } + sqlite3_free(aIdx); +} + +/* +** This is called as part of flushing a delete to disk in 'secure-delete' +** mode. It edits the segments within the database described by argument +** pStruct to remove the entries for term zTerm, rowid iRowid. +*/ +static void fts5FlushSecureDelete( + Fts5Index *p, + Fts5Structure *pStruct, + const char *zTerm, + i64 iRowid +){ + const int f = FTS5INDEX_QUERY_SKIPHASH; + int nTerm = (int)strlen(zTerm); + Fts5Iter *pIter = 0; /* Used to find term instance */ + + fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter); + if( fts5MultiIterEof(p, pIter)==0 ){ + i64 iThis = fts5MultiIterRowid(pIter); + if( iThisrc==SQLITE_OK + && fts5MultiIterEof(p, pIter)==0 + && iRowid==fts5MultiIterRowid(pIter) + ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + fts5DoSecureDelete(p, pSeg); + } + } + + fts5MultiIterFree(pIter); +} + + /* ** Flush the contents of in-memory hash table iHash to a new level-0 ** segment on disk. Also update the corresponding structure record. @@ -235251,6 +237545,7 @@ static void fts5FlushOneHash(Fts5Index *p){ if( iSegid ){ const int pgsz = p->pConfig->pgsz; int eDetail = p->pConfig->eDetail; + int bSecureDelete = p->pConfig->bSecureDelete; Fts5StructureSegment *pSeg; /* New segment within pStruct */ Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */ Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */ @@ -235273,40 +237568,77 @@ static void fts5FlushOneHash(Fts5Index *p){ } while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){ const char *zTerm; /* Buffer containing term */ + int nTerm; /* Size of zTerm in bytes */ const u8 *pDoclist; /* Pointer to doclist for this term */ int nDoclist; /* Size of doclist in bytes */ - /* Write the term for this entry to disk. */ + /* Get the term and doclist for this entry. */ sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist); - fts5WriteAppendTerm(p, &writer, (int)strlen(zTerm), (const u8*)zTerm); - if( p->rc!=SQLITE_OK ) break; + nTerm = (int)strlen(zTerm); + if( bSecureDelete==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + if( p->rc!=SQLITE_OK ) break; + assert( writer.bFirstRowidInPage==0 ); + } - assert( writer.bFirstRowidInPage==0 ); - if( pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ + if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ /* The entire doclist will fit on the current leaf. */ fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); }else{ + int bTermWritten = !bSecureDelete; i64 iRowid = 0; - u64 iDelta = 0; + i64 iPrev = 0; int iOff = 0; /* The entire doclist will not fit on this leaf. The following ** loop iterates through the poslists that make up the current ** doclist. */ while( p->rc==SQLITE_OK && iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ + iOff++; + continue; + } + } + } + + if( p->rc==SQLITE_OK && bTermWritten==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + bTermWritten = 1; + assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 ); + } + if( writer.bFirstRowidInPage ){ fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid); writer.bFirstRowidInPage = 0; fts5WriteDlidxAppend(p, &writer, iRowid); - if( p->rc!=SQLITE_OK ) break; }else{ - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iDelta); + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid-iPrev); } + if( p->rc!=SQLITE_OK ) break; assert( pBuf->n<=pBuf->nSpace ); + iPrev = iRowid; if( eDetail==FTS5_DETAIL_NONE ){ if( iOffnLevel==0 ){ - fts5StructureAddLevel(&p->rc, &pStruct); - } - fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); - if( p->rc==SQLITE_OK ){ - pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; - pSeg->iSegid = iSegid; - pSeg->pgnoFirst = 1; - pSeg->pgnoLast = pgnoLast; - pStruct->nSegment++; + assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 ); + if( pgnoLast>0 ){ + /* Update the Fts5Structure. It is written back to the database by the + ** fts5StructureRelease() call below. */ + if( pStruct->nLevel==0 ){ + fts5StructureAddLevel(&p->rc, &pStruct); + } + fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); + if( p->rc==SQLITE_OK ){ + pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; + pSeg->iSegid = iSegid; + pSeg->pgnoFirst = 1; + pSeg->pgnoLast = pgnoLast; + pStruct->nSegment++; + } + fts5StructurePromote(p, 0, pStruct); } - fts5StructurePromote(p, 0, pStruct); } fts5IndexAutomerge(p, &pStruct, pgnoLast); @@ -236119,6 +238454,7 @@ static int sqlite3Fts5IndexClose(Fts5Index *p){ sqlite3_finalize(p->pIdxDeleter); sqlite3_finalize(p->pIdxSelect); sqlite3_finalize(p->pDataVersion); + sqlite3_finalize(p->pDeleteFromIdx); sqlite3Fts5HashFree(p->pHash); sqlite3_free(p->zDataTbl); sqlite3_free(p); @@ -236749,6 +239085,7 @@ static void fts5IndexIntegrityCheckSegment( Fts5StructureSegment *pSeg /* Segment to check internal consistency */ ){ Fts5Config *pConfig = p->pConfig; + int bSecureDelete = (pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE); sqlite3_stmt *pStmt = 0; int rc2; int iIdxPrevLeaf = pSeg->pgnoFirst-1; @@ -236784,7 +239121,19 @@ static void fts5IndexIntegrityCheckSegment( ** is also a rowid pointer within the leaf page header, it points to a ** location before the term. */ if( pLeaf->nn<=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + + if( nIdxTerm==0 + && pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE + && pLeaf->nn==pLeaf->szLeaf + && pLeaf->nn==4 + ){ + /* special case - the very first page in a segment keeps its %_idx + ** entry even if all the terms are removed from it by secure-delete + ** operations. */ + }else{ + p->rc = FTS5_CORRUPT; + } + }else{ int iOff; /* Offset of first term on leaf */ int iRowidOff; /* Offset of first rowid on leaf */ @@ -236848,9 +239197,12 @@ static void fts5IndexIntegrityCheckSegment( ASSERT_SZLEAF_OK(pLeaf); if( iRowidOff>=pLeaf->szLeaf ){ p->rc = FTS5_CORRUPT; - }else{ + }else if( bSecureDelete==0 || iRowidOff>0 ){ + i64 iDlRowid = fts5DlidxIterRowid(pDlidx); fts5GetVarint(&pLeaf->p[iRowidOff], (u64*)&iRowid); - if( iRowid!=fts5DlidxIterRowid(pDlidx) ) p->rc = FTS5_CORRUPT; + if( iRowidrc = FTS5_CORRUPT; + } } fts5DataRelease(pLeaf); } @@ -239112,6 +241464,8 @@ static int fts5UpdateMethod( Fts5Config *pConfig = pTab->p.pConfig; int eType0; /* value_type() of apVal[0] */ int rc = SQLITE_OK; /* Return code */ + int bUpdateOrDelete = 0; + /* A transaction must be open when this is called. */ assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); @@ -239122,6 +241476,11 @@ static int fts5UpdateMethod( || sqlite3_value_type(apVal[0])==SQLITE_NULL ); assert( pTab->p.pConfig->pzErrmsg==0 ); + if( pConfig->pgsz==0 ){ + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + if( rc!=SQLITE_OK ) return rc; + } + pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg; /* Put any active cursors into REQUIRE_SEEK state. */ @@ -239174,6 +241533,7 @@ static int fts5UpdateMethod( else if( nArg==1 ){ i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); + bUpdateOrDelete = 1; } /* INSERT or UPDATE */ @@ -239189,6 +241549,7 @@ static int fts5UpdateMethod( if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + bUpdateOrDelete = 1; } fts5StorageInsert(&rc, pTab, apVal, pRowid); } @@ -239217,10 +241578,24 @@ static int fts5UpdateMethod( rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); fts5StorageInsert(&rc, pTab, apVal, pRowid); } + bUpdateOrDelete = 1; } } } + if( rc==SQLITE_OK + && bUpdateOrDelete + && pConfig->bSecureDelete + && pConfig->iVersion==FTS5_CURRENT_VERSION + ){ + rc = sqlite3Fts5StorageConfigValue( + pTab->pStorage, "version", 0, FTS5_CURRENT_VERSION_SECUREDELETE + ); + if( rc==SQLITE_OK ){ + pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; + } + } + pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -240080,6 +242455,7 @@ static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint); fts5TripCursors(pTab); + pTab->p.pConfig->pgsz = 0; return sqlite3Fts5StorageRollback(pTab->pStorage); } @@ -240282,7 +242658,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0", -1, SQLITE_TRANSIENT); } /* @@ -245274,3 +247650,4 @@ SQLITE_API int sqlite3_stmt_init( /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } /************************** End of sqlite3.c ******************************/ +#pragma GCC diagnostic pop diff --git a/database/sqlite/sqlite3.h b/database/sqlite/sqlite3.h index 7e43e1f1b4dcd7..48effe20216f29 100644 --- a/database/sqlite/sqlite3.h +++ b/database/sqlite/sqlite3.h @@ -146,9 +146,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.41.2" -#define SQLITE_VERSION_NUMBER 3041002 -#define SQLITE_SOURCE_ID "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da" +#define SQLITE_VERSION "3.42.0" +#define SQLITE_VERSION_NUMBER 3042000 +#define SQLITE_SOURCE_ID "2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1655,20 +1655,23 @@ SQLITE_API int sqlite3_os_end(void); ** must ensure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running. ** -** The sqlite3_config() interface -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** ** The first argument to sqlite3_config() is an integer ** [configuration option] that determines ** what property of SQLite is to be configured. Subsequent arguments ** vary depending on the [configuration option] ** in the first argument. ** +** For most configuration options, the sqlite3_config() interface +** may only be invoked prior to library initialization using +** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". +** ^If sqlite3_config() is called after [sqlite3_initialize()] and before +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. +** Note, however, that ^sqlite3_config() can be called as part of the +** implementation of an application-defined [sqlite3_os_init()]. +** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option ** then this routine returns a non-zero [error code]. @@ -1776,6 +1779,23 @@ struct sqlite3_mem_methods { ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. ** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +**
    +**
  • SQLITE_CONFIG_LOG +**
  • SQLITE_CONFIG_PCACHE_HDRSZ +**
+** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications ** should check the return code from [sqlite3_config()] to make sure that @@ -2122,28 +2142,28 @@ struct sqlite3_mem_methods { ** compile-time option is not set, then the default maximum is 1073741824. ** */ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ @@ -2378,7 +2398,7 @@ struct sqlite3_mem_methods { **
** ** [[SQLITE_DBCONFIG_DQS_DML]] -**
SQLITE_DBCONFIG_DQS_DML +**
SQLITE_DBCONFIG_DQS_DML
**
The SQLITE_DBCONFIG_DQS_DML option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DML statements ** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The @@ -2387,7 +2407,7 @@ struct sqlite3_mem_methods { **
** ** [[SQLITE_DBCONFIG_DQS_DDL]] -**
SQLITE_DBCONFIG_DQS_DDL +**
SQLITE_DBCONFIG_DQS_DDL
**
The SQLITE_DBCONFIG_DQS option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DDL statements, ** such as CREATE TABLE and CREATE INDEX. The @@ -2396,7 +2416,7 @@ struct sqlite3_mem_methods { **
** ** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] -**
SQLITE_DBCONFIG_TRUSTED_SCHEMA +**
SQLITE_DBCONFIG_TRUSTED_SCHEMA
**
The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to ** assume that database schemas are untainted by malicious content. ** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite @@ -2416,7 +2436,7 @@ struct sqlite3_mem_methods { **
** ** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] -**
SQLITE_DBCONFIG_LEGACY_FILE_FORMAT +**
SQLITE_DBCONFIG_LEGACY_FILE_FORMAT
**
The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly ** created database file to have a schema format version number (the 4-byte @@ -2425,7 +2445,7 @@ struct sqlite3_mem_methods { ** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, ** newly created databases are generally not understandable by SQLite versions ** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there -** is now scarcely any need to generated database files that are compatible +** is now scarcely any need to generate database files that are compatible ** all the way back to version 3.0.0, and so this setting is of little ** practical use, but is provided so that SQLite can continue to claim the ** ability to generate new database files that are compatible with version @@ -2436,6 +2456,38 @@ struct sqlite3_mem_methods { ** not considered a bug since SQLite versions 3.3.0 and earlier do not support ** either generated columns or decending indexes. **
+** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +**
SQLITE_DBCONFIG_STMT_SCANSTATUS
+**
The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +**
+** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +**
SQLITE_DBCONFIG_REVERSE_SCANORDER
+**
The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. +**
+** ** */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ @@ -2456,7 +2508,9 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ #define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -6201,6 +6255,13 @@ SQLITE_API void sqlite3_activate_cerod( ** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. */ SQLITE_API int sqlite3_sleep(int); @@ -7828,9 +7889,9 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */ @@ -9564,18 +9625,28 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
SQLITE_VTAB_INNOCUOUS
**
Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a ** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS ** flag unless absolutely necessary. **
+** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]
SQLITE_VTAB_USES_ALL_SCHEMAS
+**
Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +**
** */ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 #define SQLITE_VTAB_INNOCUOUS 2 #define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy @@ -10750,16 +10821,20 @@ SQLITE_API int sqlite3session_create( SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* -** CAPIREF: Conigure a Session Object +** CAPI3REF: Configure a Session Object ** METHOD: sqlite3_session ** ** This method is used to configure a session object after it has been -** created. At present the only valid value for the second parameter is -** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. ** -** Arguments for sqlite3session_object_config() +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config ** -** The following values may passed as the the 4th parameter to +** The following values may passed as the the 2nd parameter to ** sqlite3session_object_config(). ** **
SQLITE_SESSION_OBJCONFIG_SIZE
@@ -10775,12 +10850,21 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); ** ** It is an error (SQLITE_MISUSE) to attempt to modify this setting after ** the first table has been attached to the session object. +** +**
SQLITE_SESSION_OBJCONFIG_ROWID
+** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. */ -SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); - -/* -*/ -#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 /* ** CAPI3REF: Enable Or Disable A Session Object @@ -11913,9 +11997,23 @@ SQLITE_API int sqlite3changeset_apply_v2( ** Invert the changeset before applying it. This is equivalent to inverting ** a changeset using sqlite3changeset_invert() before applying it. It is ** an error to specify this flag with a patchset. +** +**
SQLITE_CHANGESETAPPLY_IGNORENOOP
+** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +**
    +**
  • a delete change if the row being deleted cannot be found, +**
  • an update change if the modified fields are already set to +** their new values in the conflicting row, or +**
  • an insert change if all fields of the conflicting row match +** the row being inserted. +**
*/ #define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 #define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 /* ** CAPI3REF: Constants Passed To The Conflict Handler diff --git a/database/sqlite/sqlite3recover.c b/database/sqlite/sqlite3recover.c new file mode 100644 index 00000000000000..3dae0b7a94b4ad --- /dev/null +++ b/database/sqlite/sqlite3recover.c @@ -0,0 +1,2872 @@ +/* +** 2022-08-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +*/ + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-compare" +#include "sqlite3recover.h" +#include +#include + +#ifndef SQLITE_OMIT_VIRTUALTABLE + +/* +** Declaration for public API function in file dbdata.c. This may be called +** with NULL as the final two arguments to register the sqlite_dbptr and +** sqlite_dbdata virtual tables with a database handle. +*/ +#ifdef _WIN32 +__declspec(dllexport) +#endif +int sqlite3_dbdata_init(sqlite3*, char**, const sqlite3_api_routines*); + +typedef unsigned int u32; +typedef unsigned char u8; +typedef sqlite3_int64 i64; + +typedef struct RecoverTable RecoverTable; +typedef struct RecoverColumn RecoverColumn; + +/* +** When recovering rows of data that can be associated with table +** definitions recovered from the sqlite_schema table, each table is +** represented by an instance of the following object. +** +** iRoot: +** The root page in the original database. Not necessarily (and usually +** not) the same in the recovered database. +** +** zTab: +** Name of the table. +** +** nCol/aCol[]: +** aCol[] is an array of nCol columns. In the order in which they appear +** in the table. +** +** bIntkey: +** Set to true for intkey tables, false for WITHOUT ROWID. +** +** iRowidBind: +** Each column in the aCol[] array has associated with it the index of +** the bind parameter its values will be bound to in the INSERT statement +** used to construct the output database. If the table does has a rowid +** but not an INTEGER PRIMARY KEY column, then iRowidBind contains the +** index of the bind paramater to which the rowid value should be bound. +** Otherwise, it contains -1. If the table does contain an INTEGER PRIMARY +** KEY column, then the rowid value should be bound to the index associated +** with the column. +** +** pNext: +** All RecoverTable objects used by the recovery operation are allocated +** and populated as part of creating the recovered database schema in +** the output database, before any non-schema data are recovered. They +** are then stored in a singly-linked list linked by this variable beginning +** at sqlite3_recover.pTblList. +*/ +struct RecoverTable { + u32 iRoot; /* Root page in original database */ + char *zTab; /* Name of table */ + int nCol; /* Number of columns in table */ + RecoverColumn *aCol; /* Array of columns */ + int bIntkey; /* True for intkey, false for without rowid */ + int iRowidBind; /* If >0, bind rowid to INSERT here */ + RecoverTable *pNext; +}; + +/* +** Each database column is represented by an instance of the following object +** stored in the RecoverTable.aCol[] array of the associated table. +** +** iField: +** The index of the associated field within database records. Or -1 if +** there is no associated field (e.g. for virtual generated columns). +** +** iBind: +** The bind index of the INSERT statement to bind this columns values +** to. Or 0 if there is no such index (iff (iField<0)). +** +** bIPK: +** True if this is the INTEGER PRIMARY KEY column. +** +** zCol: +** Name of column. +** +** eHidden: +** A RECOVER_EHIDDEN_* constant value (see below for interpretation of each). +*/ +struct RecoverColumn { + int iField; /* Field in record on disk */ + int iBind; /* Binding to use in INSERT */ + int bIPK; /* True for IPK column */ + char *zCol; + int eHidden; +}; + +#define RECOVER_EHIDDEN_NONE 0 /* Normal database column */ +#define RECOVER_EHIDDEN_HIDDEN 1 /* Column is __HIDDEN__ */ +#define RECOVER_EHIDDEN_VIRTUAL 2 /* Virtual generated column */ +#define RECOVER_EHIDDEN_STORED 3 /* Stored generated column */ + +/* +** Bitmap object used to track pages in the input database. Allocated +** and manipulated only by the following functions: +** +** recoverBitmapAlloc() +** recoverBitmapFree() +** recoverBitmapSet() +** recoverBitmapQuery() +** +** nPg: +** Largest page number that may be stored in the bitmap. The range +** of valid keys is 1 to nPg, inclusive. +** +** aElem[]: +** Array large enough to contain a bit for each key. For key value +** iKey, the associated bit is the bit (iKey%32) of aElem[iKey/32]. +** In other words, the following is true if bit iKey is set, or +** false if it is clear: +** +** (aElem[iKey/32] & (1 << (iKey%32))) ? 1 : 0 +*/ +typedef struct RecoverBitmap RecoverBitmap; +struct RecoverBitmap { + i64 nPg; /* Size of bitmap */ + u32 aElem[1]; /* Array of 32-bit bitmasks */ +}; + +/* +** State variables (part of the sqlite3_recover structure) used while +** recovering data for tables identified in the recovered schema (state +** RECOVER_STATE_WRITING). +*/ +typedef struct RecoverStateW1 RecoverStateW1; +struct RecoverStateW1 { + sqlite3_stmt *pTbls; + sqlite3_stmt *pSel; + sqlite3_stmt *pInsert; + int nInsert; + + RecoverTable *pTab; /* Table currently being written */ + int nMax; /* Max column count in any schema table */ + sqlite3_value **apVal; /* Array of nMax values */ + int nVal; /* Number of valid entries in apVal[] */ + int bHaveRowid; + i64 iRowid; + i64 iPrevPage; + int iPrevCell; +}; + +/* +** State variables (part of the sqlite3_recover structure) used while +** recovering data destined for the lost and found table (states +** RECOVER_STATE_LOSTANDFOUND[123]). +*/ +typedef struct RecoverStateLAF RecoverStateLAF; +struct RecoverStateLAF { + RecoverBitmap *pUsed; + i64 nPg; /* Size of db in pages */ + sqlite3_stmt *pAllAndParent; + sqlite3_stmt *pMapInsert; + sqlite3_stmt *pMaxField; + sqlite3_stmt *pUsedPages; + sqlite3_stmt *pFindRoot; + sqlite3_stmt *pInsert; /* INSERT INTO lost_and_found ... */ + sqlite3_stmt *pAllPage; + sqlite3_stmt *pPageData; + sqlite3_value **apVal; + int nMaxField; +}; + +/* +** Main recover handle structure. +*/ +struct sqlite3_recover { + /* Copies of sqlite3_recover_init[_sql]() parameters */ + sqlite3 *dbIn; /* Input database */ + char *zDb; /* Name of input db ("main" etc.) */ + char *zUri; /* URI for output database */ + void *pSqlCtx; /* SQL callback context */ + int (*xSql)(void*,const char*); /* Pointer to SQL callback function */ + + /* Values configured by sqlite3_recover_config() */ + char *zStateDb; /* State database to use (or NULL) */ + char *zLostAndFound; /* Name of lost-and-found table (or NULL) */ + int bFreelistCorrupt; /* SQLITE_RECOVER_FREELIST_CORRUPT setting */ + int bRecoverRowid; /* SQLITE_RECOVER_ROWIDS setting */ + int bSlowIndexes; /* SQLITE_RECOVER_SLOWINDEXES setting */ + + int pgsz; + int detected_pgsz; + int nReserve; + u8 *pPage1Disk; + u8 *pPage1Cache; + + /* Error code and error message */ + int errCode; /* For sqlite3_recover_errcode() */ + char *zErrMsg; /* For sqlite3_recover_errmsg() */ + + int eState; + int bCloseTransaction; + + /* Variables used with eState==RECOVER_STATE_WRITING */ + RecoverStateW1 w1; + + /* Variables used with states RECOVER_STATE_LOSTANDFOUND[123] */ + RecoverStateLAF laf; + + /* Fields used within sqlite3_recover_run() */ + sqlite3 *dbOut; /* Output database */ + sqlite3_stmt *pGetPage; /* SELECT against input db sqlite_dbdata */ + RecoverTable *pTblList; /* List of tables recovered from schema */ +}; + +/* +** The various states in which an sqlite3_recover object may exist: +** +** RECOVER_STATE_INIT: +** The object is initially created in this state. sqlite3_recover_step() +** has yet to be called. This is the only state in which it is permitted +** to call sqlite3_recover_config(). +** +** RECOVER_STATE_WRITING: +** +** RECOVER_STATE_LOSTANDFOUND1: +** State to populate the bitmap of pages used by other tables or the +** database freelist. +** +** RECOVER_STATE_LOSTANDFOUND2: +** Populate the recovery.map table - used to figure out a "root" page +** for each lost page from in the database from which records are +** extracted. +** +** RECOVER_STATE_LOSTANDFOUND3: +** Populate the lost-and-found table itself. +*/ +#define RECOVER_STATE_INIT 0 +#define RECOVER_STATE_WRITING 1 +#define RECOVER_STATE_LOSTANDFOUND1 2 +#define RECOVER_STATE_LOSTANDFOUND2 3 +#define RECOVER_STATE_LOSTANDFOUND3 4 +#define RECOVER_STATE_SCHEMA2 5 +#define RECOVER_STATE_DONE 6 + + +/* +** Global variables used by this extension. +*/ +typedef struct RecoverGlobal RecoverGlobal; +struct RecoverGlobal { + const sqlite3_io_methods *pMethods; + sqlite3_recover *p; +}; +static RecoverGlobal recover_g; + +/* +** Use this static SQLite mutex to protect the globals during the +** first call to sqlite3_recover_step(). +*/ +#define RECOVER_MUTEX_ID SQLITE_MUTEX_STATIC_APP2 + + +/* +** Default value for SQLITE_RECOVER_ROWIDS (sqlite3_recover.bRecoverRowid). +*/ +#define RECOVER_ROWID_DEFAULT 1 + +/* +** Mutex handling: +** +** recoverEnterMutex() - Enter the recovery mutex +** recoverLeaveMutex() - Leave the recovery mutex +** recoverAssertMutexHeld() - Assert that the recovery mutex is held +*/ +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE==0 +# define recoverEnterMutex() +# define recoverLeaveMutex() +#else +static void recoverEnterMutex(void){ + sqlite3_mutex_enter(sqlite3_mutex_alloc(RECOVER_MUTEX_ID)); +} +static void recoverLeaveMutex(void){ + sqlite3_mutex_leave(sqlite3_mutex_alloc(RECOVER_MUTEX_ID)); +} +#endif +#if SQLITE_THREADSAFE+0>=1 && defined(SQLITE_DEBUG) +static void recoverAssertMutexHeld(void){ + assert( sqlite3_mutex_held(sqlite3_mutex_alloc(RECOVER_MUTEX_ID)) ); +} +#else +# define recoverAssertMutexHeld() +#endif + + +/* +** Like strlen(). But handles NULL pointer arguments. +*/ +static int recoverStrlen(const char *zStr){ + if( zStr==0 ) return 0; + return (int)(strlen(zStr)&0x7fffffff); +} + +/* +** This function is a no-op if the recover handle passed as the first +** argument already contains an error (if p->errCode!=SQLITE_OK). +** +** Otherwise, an attempt is made to allocate, zero and return a buffer nByte +** bytes in size. If successful, a pointer to the new buffer is returned. Or, +** if an OOM error occurs, NULL is returned and the handle error code +** (p->errCode) set to SQLITE_NOMEM. +*/ +static void *recoverMalloc(sqlite3_recover *p, i64 nByte){ + void *pRet = 0; + assert( nByte>0 ); + if( p->errCode==SQLITE_OK ){ + pRet = sqlite3_malloc64(nByte); + if( pRet ){ + memset(pRet, 0, nByte); + }else{ + p->errCode = SQLITE_NOMEM; + } + } + return pRet; +} + +/* +** Set the error code and error message for the recover handle passed as +** the first argument. The error code is set to the value of parameter +** errCode. +** +** Parameter zFmt must be a printf() style formatting string. The handle +** error message is set to the result of using any trailing arguments for +** parameter substitutions in the formatting string. +** +** For example: +** +** recoverError(p, SQLITE_ERROR, "no such table: %s", zTablename); +*/ +static int recoverError( + sqlite3_recover *p, + int errCode, + const char *zFmt, ... +){ + char *z = 0; + va_list ap; + va_start(ap, zFmt); + if( zFmt ){ + z = sqlite3_vmprintf(zFmt, ap); + va_end(ap); + } + sqlite3_free(p->zErrMsg); + p->zErrMsg = z; + p->errCode = errCode; + return errCode; +} + + +/* +** This function is a no-op if p->errCode is initially other than SQLITE_OK. +** In this case it returns NULL. +** +** Otherwise, an attempt is made to allocate and return a bitmap object +** large enough to store a bit for all page numbers between 1 and nPg, +** inclusive. The bitmap is initially zeroed. +*/ +static RecoverBitmap *recoverBitmapAlloc(sqlite3_recover *p, i64 nPg){ + int nElem = (nPg+1+31) / 32; + int nByte = sizeof(RecoverBitmap) + nElem*sizeof(u32); + RecoverBitmap *pRet = (RecoverBitmap*)recoverMalloc(p, nByte); + + if( pRet ){ + pRet->nPg = nPg; + } + return pRet; +} + +/* +** Free a bitmap object allocated by recoverBitmapAlloc(). +*/ +static void recoverBitmapFree(RecoverBitmap *pMap){ + sqlite3_free(pMap); +} + +/* +** Set the bit associated with page iPg in bitvec pMap. +*/ +static void recoverBitmapSet(RecoverBitmap *pMap, i64 iPg){ + if( iPg<=pMap->nPg ){ + int iElem = (iPg / 32); + int iBit = (iPg % 32); + pMap->aElem[iElem] |= (((u32)1) << iBit); + } +} + +/* +** Query bitmap object pMap for the state of the bit associated with page +** iPg. Return 1 if it is set, or 0 otherwise. +*/ +static int recoverBitmapQuery(RecoverBitmap *pMap, i64 iPg){ + int ret = 1; + if( iPg<=pMap->nPg && iPg>0 ){ + int iElem = (iPg / 32); + int iBit = (iPg % 32); + ret = (pMap->aElem[iElem] & (((u32)1) << iBit)) ? 1 : 0; + } + return ret; +} + +/* +** Set the recover handle error to the error code and message returned by +** calling sqlite3_errcode() and sqlite3_errmsg(), respectively, on database +** handle db. +*/ +static int recoverDbError(sqlite3_recover *p, sqlite3 *db){ + return recoverError(p, sqlite3_errcode(db), "%s", sqlite3_errmsg(db)); +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). +** +** Otherwise, it attempts to prepare the SQL statement in zSql against +** database handle db. If successful, the statement handle is returned. +** Or, if an error occurs, NULL is returned and an error left in the +** recover handle. +*/ +static sqlite3_stmt *recoverPrepare( + sqlite3_recover *p, + sqlite3 *db, + const char *zSql +){ + sqlite3_stmt *pStmt = 0; + if( p->errCode==SQLITE_OK ){ + if( sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0) ){ + recoverDbError(p, db); + } + } + return pStmt; +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). +** +** Otherwise, argument zFmt is used as a printf() style format string, +** along with any trailing arguments, to create an SQL statement. This +** SQL statement is prepared against database handle db and, if successful, +** the statment handle returned. Or, if an error occurs - either during +** the printf() formatting or when preparing the resulting SQL - an +** error code and message are left in the recover handle. +*/ +static sqlite3_stmt *recoverPreparePrintf( + sqlite3_recover *p, + sqlite3 *db, + const char *zFmt, ... +){ + sqlite3_stmt *pStmt = 0; + if( p->errCode==SQLITE_OK ){ + va_list ap; + char *z; + va_start(ap, zFmt); + z = sqlite3_vmprintf(zFmt, ap); + va_end(ap); + if( z==0 ){ + p->errCode = SQLITE_NOMEM; + }else{ + pStmt = recoverPrepare(p, db, z); + sqlite3_free(z); + } + } + return pStmt; +} + +/* +** Reset SQLite statement handle pStmt. If the call to sqlite3_reset() +** indicates that an error occurred, and there is not already an error +** in the recover handle passed as the first argument, set the error +** code and error message appropriately. +** +** This function returns a copy of the statement handle pointer passed +** as the second argument. +*/ +static sqlite3_stmt *recoverReset(sqlite3_recover *p, sqlite3_stmt *pStmt){ + int rc = sqlite3_reset(pStmt); + if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT && p->errCode==SQLITE_OK ){ + recoverDbError(p, sqlite3_db_handle(pStmt)); + } + return pStmt; +} + +/* +** Finalize SQLite statement handle pStmt. If the call to sqlite3_reset() +** indicates that an error occurred, and there is not already an error +** in the recover handle passed as the first argument, set the error +** code and error message appropriately. +*/ +static void recoverFinalize(sqlite3_recover *p, sqlite3_stmt *pStmt){ + sqlite3 *db = sqlite3_db_handle(pStmt); + int rc = sqlite3_finalize(pStmt); + if( rc!=SQLITE_OK && p->errCode==SQLITE_OK ){ + recoverDbError(p, db); + } +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). A copy of p->errCode is returned in this +** case. +** +** Otherwise, execute SQL script zSql. If successful, return SQLITE_OK. +** Or, if an error occurs, leave an error code and message in the recover +** handle and return a copy of the error code. +*/ +static int recoverExec(sqlite3_recover *p, sqlite3 *db, const char *zSql){ + if( p->errCode==SQLITE_OK ){ + int rc = sqlite3_exec(db, zSql, 0, 0, 0); + if( rc ){ + recoverDbError(p, db); + } + } + return p->errCode; +} + +/* +** Bind the value pVal to parameter iBind of statement pStmt. Leave an +** error in the recover handle passed as the first argument if an error +** (e.g. an OOM) occurs. +*/ +static void recoverBindValue( + sqlite3_recover *p, + sqlite3_stmt *pStmt, + int iBind, + sqlite3_value *pVal +){ + if( p->errCode==SQLITE_OK ){ + int rc = sqlite3_bind_value(pStmt, iBind, pVal); + if( rc ) recoverError(p, rc, 0); + } +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). NULL is returned in this case. +** +** Otherwise, an attempt is made to interpret zFmt as a printf() style +** formatting string and the result of using the trailing arguments for +** parameter substitution with it written into a buffer obtained from +** sqlite3_malloc(). If successful, a pointer to the buffer is returned. +** It is the responsibility of the caller to eventually free the buffer +** using sqlite3_free(). +** +** Or, if an error occurs, an error code and message is left in the recover +** handle and NULL returned. +*/ +static char *recoverMPrintf(sqlite3_recover *p, const char *zFmt, ...){ + va_list ap; + char *z; + va_start(ap, zFmt); + z = sqlite3_vmprintf(zFmt, ap); + va_end(ap); + if( p->errCode==SQLITE_OK ){ + if( z==0 ) p->errCode = SQLITE_NOMEM; + }else{ + sqlite3_free(z); + z = 0; + } + return z; +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). Zero is returned in this case. +** +** Otherwise, execute "PRAGMA page_count" against the input database. If +** successful, return the integer result. Or, if an error occurs, leave an +** error code and error message in the sqlite3_recover handle and return +** zero. +*/ +static i64 recoverPageCount(sqlite3_recover *p){ + i64 nPg = 0; + if( p->errCode==SQLITE_OK ){ + sqlite3_stmt *pStmt = 0; + pStmt = recoverPreparePrintf(p, p->dbIn, "PRAGMA %Q.page_count", p->zDb); + if( pStmt ){ + sqlite3_step(pStmt); + nPg = sqlite3_column_int64(pStmt, 0); + } + recoverFinalize(p, pStmt); + } + return nPg; +} + +/* +** Implementation of SQL scalar function "read_i32". The first argument to +** this function must be a blob. The second a non-negative integer. This +** function reads and returns a 32-bit big-endian integer from byte +** offset (4*) of the blob. +** +** SELECT read_i32(, ) +*/ +static void recoverReadI32( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *pBlob; + int nBlob; + int iInt; + + assert( argc==2 ); + nBlob = sqlite3_value_bytes(argv[0]); + pBlob = (const unsigned char*)sqlite3_value_blob(argv[0]); + iInt = sqlite3_value_int(argv[1]) & 0xFFFF; + + if( (iInt+1)*4<=nBlob ){ + const unsigned char *a = &pBlob[iInt*4]; + i64 iVal = ((i64)a[0]<<24) + + ((i64)a[1]<<16) + + ((i64)a[2]<< 8) + + ((i64)a[3]<< 0); + sqlite3_result_int64(context, iVal); + } +} + +/* +** Implementation of SQL scalar function "page_is_used". This function +** is used as part of the procedure for locating orphan rows for the +** lost-and-found table, and it depends on those routines having populated +** the sqlite3_recover.laf.pUsed variable. +** +** The only argument to this function is a page-number. It returns true +** if the page has already been used somehow during data recovery, or false +** otherwise. +** +** SELECT page_is_used(); +*/ +static void recoverPageIsUsed( + sqlite3_context *pCtx, + int nArg, + sqlite3_value **apArg +){ + sqlite3_recover *p = (sqlite3_recover*)sqlite3_user_data(pCtx); + i64 pgno = sqlite3_value_int64(apArg[0]); + assert( nArg==1 ); + sqlite3_result_int(pCtx, recoverBitmapQuery(p->laf.pUsed, pgno)); +} + +/* +** The implementation of a user-defined SQL function invoked by the +** sqlite_dbdata and sqlite_dbptr virtual table modules to access pages +** of the database being recovered. +** +** This function always takes a single integer argument. If the argument +** is zero, then the value returned is the number of pages in the db being +** recovered. If the argument is greater than zero, it is a page number. +** The value returned in this case is an SQL blob containing the data for +** the identified page of the db being recovered. e.g. +** +** SELECT getpage(0); -- return number of pages in db +** SELECT getpage(4); -- return page 4 of db as a blob of data +*/ +static void recoverGetPage( + sqlite3_context *pCtx, + int nArg, + sqlite3_value **apArg +){ + sqlite3_recover *p = (sqlite3_recover*)sqlite3_user_data(pCtx); + i64 pgno = sqlite3_value_int64(apArg[0]); + sqlite3_stmt *pStmt = 0; + + assert( nArg==1 ); + if( pgno==0 ){ + i64 nPg = recoverPageCount(p); + sqlite3_result_int64(pCtx, nPg); + return; + }else{ + if( p->pGetPage==0 ){ + pStmt = p->pGetPage = recoverPreparePrintf( + p, p->dbIn, "SELECT data FROM sqlite_dbpage(%Q) WHERE pgno=?", p->zDb + ); + }else if( p->errCode==SQLITE_OK ){ + pStmt = p->pGetPage; + } + + if( pStmt ){ + sqlite3_bind_int64(pStmt, 1, pgno); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + const u8 *aPg; + int nPg; + assert( p->errCode==SQLITE_OK ); + aPg = sqlite3_column_blob(pStmt, 0); + nPg = sqlite3_column_bytes(pStmt, 0); + if( pgno==1 && nPg==p->pgsz && 0==memcmp(p->pPage1Cache, aPg, nPg) ){ + aPg = p->pPage1Disk; + } + sqlite3_result_blob(pCtx, aPg, nPg-p->nReserve, SQLITE_TRANSIENT); + } + recoverReset(p, pStmt); + } + } + + if( p->errCode ){ + if( p->zErrMsg ) sqlite3_result_error(pCtx, p->zErrMsg, -1); + sqlite3_result_error_code(pCtx, p->errCode); + } +} + +/* +** Find a string that is not found anywhere in z[]. Return a pointer +** to that string. +** +** Try to use zA and zB first. If both of those are already found in z[] +** then make up some string and store it in the buffer zBuf. +*/ +static const char *recoverUnusedString( + const char *z, /* Result must not appear anywhere in z */ + const char *zA, const char *zB, /* Try these first */ + char *zBuf /* Space to store a generated string */ +){ + unsigned i = 0; + if( strstr(z, zA)==0 ) return zA; + if( strstr(z, zB)==0 ) return zB; + do{ + sqlite3_snprintf(20,zBuf,"(%s%u)", zA, i++); + }while( strstr(z,zBuf)!=0 ); + return zBuf; +} + +/* +** Implementation of scalar SQL function "escape_crnl". The argument passed to +** this function is the output of built-in function quote(). If the first +** character of the input is "'", indicating that the value passed to quote() +** was a text value, then this function searches the input for "\n" and "\r" +** characters and adds a wrapper similar to the following: +** +** replace(replace(, '\n', char(10), '\r', char(13)); +** +** Or, if the first character of the input is not "'", then a copy of the input +** is returned. +*/ +static void recoverEscapeCrnl( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *zText = (const char*)sqlite3_value_text(argv[0]); + (void)argc; + if( zText && zText[0]=='\'' ){ + int nText = sqlite3_value_bytes(argv[0]); + int i; + char zBuf1[20]; + char zBuf2[20]; + const char *zNL = 0; + const char *zCR = 0; + int nCR = 0; + int nNL = 0; + + for(i=0; zText[i]; i++){ + if( zNL==0 && zText[i]=='\n' ){ + zNL = recoverUnusedString(zText, "\\n", "\\012", zBuf1); + nNL = (int)strlen(zNL); + } + if( zCR==0 && zText[i]=='\r' ){ + zCR = recoverUnusedString(zText, "\\r", "\\015", zBuf2); + nCR = (int)strlen(zCR); + } + } + + if( zNL || zCR ){ + int iOut = 0; + i64 nMax = (nNL > nCR) ? nNL : nCR; + i64 nAlloc = nMax * nText + (nMax+64)*2; + char *zOut = (char*)sqlite3_malloc64(nAlloc); + if( zOut==0 ){ + sqlite3_result_error_nomem(context); + return; + } + + if( zNL && zCR ){ + memcpy(&zOut[iOut], "replace(replace(", 16); + iOut += 16; + }else{ + memcpy(&zOut[iOut], "replace(", 8); + iOut += 8; + } + for(i=0; zText[i]; i++){ + if( zText[i]=='\n' ){ + memcpy(&zOut[iOut], zNL, nNL); + iOut += nNL; + }else if( zText[i]=='\r' ){ + memcpy(&zOut[iOut], zCR, nCR); + iOut += nCR; + }else{ + zOut[iOut] = zText[i]; + iOut++; + } + } + + if( zNL ){ + memcpy(&zOut[iOut], ",'", 2); iOut += 2; + memcpy(&zOut[iOut], zNL, nNL); iOut += nNL; + memcpy(&zOut[iOut], "', char(10))", 12); iOut += 12; + } + if( zCR ){ + memcpy(&zOut[iOut], ",'", 2); iOut += 2; + memcpy(&zOut[iOut], zCR, nCR); iOut += nCR; + memcpy(&zOut[iOut], "', char(13))", 12); iOut += 12; + } + + sqlite3_result_text(context, zOut, iOut, SQLITE_TRANSIENT); + sqlite3_free(zOut); + return; + } + } + + sqlite3_result_value(context, argv[0]); +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). A copy of the error code is returned in +** this case. +** +** Otherwise, attempt to populate temporary table "recovery.schema" with the +** parts of the database schema that can be extracted from the input database. +** +** If no error occurs, SQLITE_OK is returned. Otherwise, an error code +** and error message are left in the recover handle and a copy of the +** error code returned. It is not considered an error if part of all of +** the database schema cannot be recovered due to corruption. +*/ +static int recoverCacheSchema(sqlite3_recover *p){ + return recoverExec(p, p->dbOut, + "WITH RECURSIVE pages(p) AS (" + " SELECT 1" + " UNION" + " SELECT child FROM sqlite_dbptr('getpage()'), pages WHERE pgno=p" + ")" + "INSERT INTO recovery.schema SELECT" + " max(CASE WHEN field=0 THEN value ELSE NULL END)," + " max(CASE WHEN field=1 THEN value ELSE NULL END)," + " max(CASE WHEN field=2 THEN value ELSE NULL END)," + " max(CASE WHEN field=3 THEN value ELSE NULL END)," + " max(CASE WHEN field=4 THEN value ELSE NULL END)" + "FROM sqlite_dbdata('getpage()') WHERE pgno IN (" + " SELECT p FROM pages" + ") GROUP BY pgno, cell" + ); +} + +/* +** If this recover handle is not in SQL callback mode (i.e. was not created +** using sqlite3_recover_init_sql()) of if an error has already occurred, +** this function is a no-op. Otherwise, issue a callback with SQL statement +** zSql as the parameter. +** +** If the callback returns non-zero, set the recover handle error code to +** the value returned (so that the caller will abandon processing). +*/ +static void recoverSqlCallback(sqlite3_recover *p, const char *zSql){ + if( p->errCode==SQLITE_OK && p->xSql ){ + int res = p->xSql(p->pSqlCtx, zSql); + if( res ){ + recoverError(p, SQLITE_ERROR, "callback returned an error - %d", res); + } + } +} + +/* +** Transfer the following settings from the input database to the output +** database: +** +** + page-size, +** + auto-vacuum settings, +** + database encoding, +** + user-version (PRAGMA user_version), and +** + application-id (PRAGMA application_id), and +*/ +static void recoverTransferSettings(sqlite3_recover *p){ + const char *aPragma[] = { + "encoding", + "page_size", + "auto_vacuum", + "user_version", + "application_id" + }; + int ii; + + /* Truncate the output database to 0 pages in size. This is done by + ** opening a new, empty, temp db, then using the backup API to clobber + ** any existing output db with a copy of it. */ + if( p->errCode==SQLITE_OK ){ + sqlite3 *db2 = 0; + int rc = sqlite3_open("", &db2); + if( rc!=SQLITE_OK ){ + recoverDbError(p, db2); + return; + } + + for(ii=0; ii<(int)(sizeof(aPragma)/sizeof(aPragma[0])); ii++){ + const char *zPrag = aPragma[ii]; + sqlite3_stmt *p1 = 0; + p1 = recoverPreparePrintf(p, p->dbIn, "PRAGMA %Q.%s", p->zDb, zPrag); + if( p->errCode==SQLITE_OK && sqlite3_step(p1)==SQLITE_ROW ){ + const char *zArg = (const char*)sqlite3_column_text(p1, 0); + char *z2 = recoverMPrintf(p, "PRAGMA %s = %Q", zPrag, zArg); + recoverSqlCallback(p, z2); + recoverExec(p, db2, z2); + sqlite3_free(z2); + if( zArg==0 ){ + recoverError(p, SQLITE_NOMEM, 0); + } + } + recoverFinalize(p, p1); + } + recoverExec(p, db2, "CREATE TABLE t1(a); DROP TABLE t1;"); + + if( p->errCode==SQLITE_OK ){ + sqlite3 *db = p->dbOut; + sqlite3_backup *pBackup = sqlite3_backup_init(db, "main", db2, "main"); + if( pBackup ){ + sqlite3_backup_step(pBackup, -1); + p->errCode = sqlite3_backup_finish(pBackup); + }else{ + recoverDbError(p, db); + } + } + + sqlite3_close(db2); + } +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). A copy of the error code is returned in +** this case. +** +** Otherwise, an attempt is made to open the output database, attach +** and create the schema of the temporary database used to store +** intermediate data, and to register all required user functions and +** virtual table modules with the output handle. +** +** If no error occurs, SQLITE_OK is returned. Otherwise, an error code +** and error message are left in the recover handle and a copy of the +** error code returned. +*/ +static int recoverOpenOutput(sqlite3_recover *p){ + struct Func { + const char *zName; + int nArg; + void (*xFunc)(sqlite3_context*,int,sqlite3_value **); + } aFunc[] = { + { "getpage", 1, recoverGetPage }, + { "page_is_used", 1, recoverPageIsUsed }, + { "read_i32", 2, recoverReadI32 }, + { "escape_crnl", 1, recoverEscapeCrnl }, + }; + + const int flags = SQLITE_OPEN_URI|SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE; + sqlite3 *db = 0; /* New database handle */ + int ii; /* For iterating through aFunc[] */ + + assert( p->dbOut==0 ); + + if( sqlite3_open_v2(p->zUri, &db, flags, 0) ){ + recoverDbError(p, db); + } + + /* Register the sqlite_dbdata and sqlite_dbptr virtual table modules. + ** These two are registered with the output database handle - this + ** module depends on the input handle supporting the sqlite_dbpage + ** virtual table only. */ + if( p->errCode==SQLITE_OK ){ + p->errCode = sqlite3_dbdata_init(db, 0, 0); + } + + /* Register the custom user-functions with the output handle. */ + for(ii=0; + p->errCode==SQLITE_OK && ii<(int)(sizeof(aFunc)/sizeof(aFunc[0])); + ii++){ + p->errCode = sqlite3_create_function(db, aFunc[ii].zName, + aFunc[ii].nArg, SQLITE_UTF8, (void*)p, aFunc[ii].xFunc, 0, 0 + ); + } + + p->dbOut = db; + return p->errCode; +} + +/* +** Attach the auxiliary database 'recovery' to the output database handle. +** This temporary database is used during the recovery process and then +** discarded. +*/ +static void recoverOpenRecovery(sqlite3_recover *p){ + char *zSql = recoverMPrintf(p, "ATTACH %Q AS recovery;", p->zStateDb); + recoverExec(p, p->dbOut, zSql); + recoverExec(p, p->dbOut, + "PRAGMA writable_schema = 1;" + "CREATE TABLE recovery.map(pgno INTEGER PRIMARY KEY, parent INT);" + "CREATE TABLE recovery.schema(type, name, tbl_name, rootpage, sql);" + ); + sqlite3_free(zSql); +} + + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). +** +** Otherwise, argument zName must be the name of a table that has just been +** created in the output database. This function queries the output db +** for the schema of said table, and creates a RecoverTable object to +** store the schema in memory. The new RecoverTable object is linked into +** the list at sqlite3_recover.pTblList. +** +** Parameter iRoot must be the root page of table zName in the INPUT +** database. +*/ +static void recoverAddTable( + sqlite3_recover *p, + const char *zName, /* Name of table created in output db */ + i64 iRoot /* Root page of same table in INPUT db */ +){ + sqlite3_stmt *pStmt = recoverPreparePrintf(p, p->dbOut, + "PRAGMA table_xinfo(%Q)", zName + ); + + if( pStmt ){ + int iPk = -1; + int iBind = 1; + RecoverTable *pNew = 0; + int nCol = 0; + int nName = recoverStrlen(zName); + int nByte = 0; + while( sqlite3_step(pStmt)==SQLITE_ROW ){ + nCol++; + nByte += (sqlite3_column_bytes(pStmt, 1)+1); + } + nByte += sizeof(RecoverTable) + nCol*sizeof(RecoverColumn) + nName+1; + recoverReset(p, pStmt); + + pNew = recoverMalloc(p, nByte); + if( pNew ){ + int i = 0; + int iField = 0; + char *csr = 0; + pNew->aCol = (RecoverColumn*)&pNew[1]; + pNew->zTab = csr = (char*)&pNew->aCol[nCol]; + pNew->nCol = nCol; + pNew->iRoot = iRoot; + memcpy(csr, zName, nName); + csr += nName+1; + + for(i=0; sqlite3_step(pStmt)==SQLITE_ROW; i++){ + int iPKF = sqlite3_column_int(pStmt, 5); + int n = sqlite3_column_bytes(pStmt, 1); + const char *z = (const char*)sqlite3_column_text(pStmt, 1); + const char *zType = (const char*)sqlite3_column_text(pStmt, 2); + int eHidden = sqlite3_column_int(pStmt, 6); + + if( iPk==-1 && iPKF==1 && !sqlite3_stricmp("integer", zType) ) iPk = i; + if( iPKF>1 ) iPk = -2; + pNew->aCol[i].zCol = csr; + pNew->aCol[i].eHidden = eHidden; + if( eHidden==RECOVER_EHIDDEN_VIRTUAL ){ + pNew->aCol[i].iField = -1; + }else{ + pNew->aCol[i].iField = iField++; + } + if( eHidden!=RECOVER_EHIDDEN_VIRTUAL + && eHidden!=RECOVER_EHIDDEN_STORED + ){ + pNew->aCol[i].iBind = iBind++; + } + memcpy(csr, z, n); + csr += (n+1); + } + + pNew->pNext = p->pTblList; + p->pTblList = pNew; + pNew->bIntkey = 1; + } + + recoverFinalize(p, pStmt); + + pStmt = recoverPreparePrintf(p, p->dbOut, "PRAGMA index_xinfo(%Q)", zName); + while( pStmt && sqlite3_step(pStmt)==SQLITE_ROW ){ + int iField = sqlite3_column_int(pStmt, 0); + int iCol = sqlite3_column_int(pStmt, 1); + + assert( iColnCol ); + pNew->aCol[iCol].iField = iField; + + pNew->bIntkey = 0; + iPk = -2; + } + recoverFinalize(p, pStmt); + + if( p->errCode==SQLITE_OK ){ + if( iPk>=0 ){ + pNew->aCol[iPk].bIPK = 1; + }else if( pNew->bIntkey ){ + pNew->iRowidBind = iBind++; + } + } + } +} + +/* +** This function is called after recoverCacheSchema() has cached those parts +** of the input database schema that could be recovered in temporary table +** "recovery.schema". This function creates in the output database copies +** of all parts of that schema that must be created before the tables can +** be populated. Specifically, this means: +** +** * all tables that are not VIRTUAL, and +** * UNIQUE indexes. +** +** If the recovery handle uses SQL callbacks, then callbacks containing +** the associated "CREATE TABLE" and "CREATE INDEX" statements are made. +** +** Additionally, records are added to the sqlite_schema table of the +** output database for any VIRTUAL tables. The CREATE VIRTUAL TABLE +** records are written directly to sqlite_schema, not actually executed. +** If the handle is in SQL callback mode, then callbacks are invoked +** with equivalent SQL statements. +*/ +static int recoverWriteSchema1(sqlite3_recover *p){ + sqlite3_stmt *pSelect = 0; + sqlite3_stmt *pTblname = 0; + + pSelect = recoverPrepare(p, p->dbOut, + "WITH dbschema(rootpage, name, sql, tbl, isVirtual, isIndex) AS (" + " SELECT rootpage, name, sql, " + " type='table', " + " sql LIKE 'create virtual%'," + " (type='index' AND (sql LIKE '%unique%' OR ?1))" + " FROM recovery.schema" + ")" + "SELECT rootpage, tbl, isVirtual, name, sql" + " FROM dbschema " + " WHERE tbl OR isIndex" + " ORDER BY tbl DESC, name=='sqlite_sequence' DESC" + ); + + pTblname = recoverPrepare(p, p->dbOut, + "SELECT name FROM sqlite_schema " + "WHERE type='table' ORDER BY rowid DESC LIMIT 1" + ); + + if( pSelect ){ + sqlite3_bind_int(pSelect, 1, p->bSlowIndexes); + while( sqlite3_step(pSelect)==SQLITE_ROW ){ + i64 iRoot = sqlite3_column_int64(pSelect, 0); + int bTable = sqlite3_column_int(pSelect, 1); + int bVirtual = sqlite3_column_int(pSelect, 2); + const char *zName = (const char*)sqlite3_column_text(pSelect, 3); + const char *zSql = (const char*)sqlite3_column_text(pSelect, 4); + char *zFree = 0; + int rc = SQLITE_OK; + + if( bVirtual ){ + zSql = (const char*)(zFree = recoverMPrintf(p, + "INSERT INTO sqlite_schema VALUES('table', %Q, %Q, 0, %Q)", + zName, zName, zSql + )); + } + rc = sqlite3_exec(p->dbOut, zSql, 0, 0, 0); + if( rc==SQLITE_OK ){ + recoverSqlCallback(p, zSql); + if( bTable && !bVirtual ){ + if( SQLITE_ROW==sqlite3_step(pTblname) ){ + const char *zTbl = (const char*)sqlite3_column_text(pTblname, 0); + recoverAddTable(p, zTbl, iRoot); + } + recoverReset(p, pTblname); + } + }else if( rc!=SQLITE_ERROR ){ + recoverDbError(p, p->dbOut); + } + sqlite3_free(zFree); + } + } + recoverFinalize(p, pSelect); + recoverFinalize(p, pTblname); + + return p->errCode; +} + +/* +** This function is called after the output database has been populated. It +** adds all recovered schema elements that were not created in the output +** database by recoverWriteSchema1() - everything except for tables and +** UNIQUE indexes. Specifically: +** +** * views, +** * triggers, +** * non-UNIQUE indexes. +** +** If the recover handle is in SQL callback mode, then equivalent callbacks +** are issued to create the schema elements. +*/ +static int recoverWriteSchema2(sqlite3_recover *p){ + sqlite3_stmt *pSelect = 0; + + pSelect = recoverPrepare(p, p->dbOut, + p->bSlowIndexes ? + "SELECT rootpage, sql FROM recovery.schema " + " WHERE type!='table' AND type!='index'" + : + "SELECT rootpage, sql FROM recovery.schema " + " WHERE type!='table' AND (type!='index' OR sql NOT LIKE '%unique%')" + ); + + if( pSelect ){ + while( sqlite3_step(pSelect)==SQLITE_ROW ){ + const char *zSql = (const char*)sqlite3_column_text(pSelect, 1); + int rc = sqlite3_exec(p->dbOut, zSql, 0, 0, 0); + if( rc==SQLITE_OK ){ + recoverSqlCallback(p, zSql); + }else if( rc!=SQLITE_ERROR ){ + recoverDbError(p, p->dbOut); + } + } + } + recoverFinalize(p, pSelect); + + return p->errCode; +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). In this case it returns NULL. +** +** Otherwise, if the recover handle is configured to create an output +** database (was created by sqlite3_recover_init()), then this function +** prepares and returns an SQL statement to INSERT a new record into table +** pTab, assuming the first nField fields of a record extracted from disk +** are valid. +** +** For example, if table pTab is: +** +** CREATE TABLE name(a, b GENERATED ALWAYS AS (a+1) STORED, c, d, e); +** +** And nField is 4, then the SQL statement prepared and returned is: +** +** INSERT INTO (a, c, d) VALUES (?1, ?2, ?3); +** +** In this case even though 4 values were extracted from the input db, +** only 3 are written to the output, as the generated STORED column +** cannot be written. +** +** If the recover handle is in SQL callback mode, then the SQL statement +** prepared is such that evaluating it returns a single row containing +** a single text value - itself an SQL statement similar to the above, +** except with SQL literals in place of the variables. For example: +** +** SELECT 'INSERT INTO (a, c, d) VALUES (' +** || quote(?1) || ', ' +** || quote(?2) || ', ' +** || quote(?3) || ')'; +** +** In either case, it is the responsibility of the caller to eventually +** free the statement handle using sqlite3_finalize(). +*/ +static sqlite3_stmt *recoverInsertStmt( + sqlite3_recover *p, + RecoverTable *pTab, + int nField +){ + sqlite3_stmt *pRet = 0; + const char *zSep = ""; + const char *zSqlSep = ""; + char *zSql = 0; + char *zFinal = 0; + char *zBind = 0; + int ii; + int bSql = p->xSql ? 1 : 0; + + if( nField<=0 ) return 0; + + assert( nField<=pTab->nCol ); + + zSql = recoverMPrintf(p, "INSERT OR IGNORE INTO %Q(", pTab->zTab); + + if( pTab->iRowidBind ){ + assert( pTab->bIntkey ); + zSql = recoverMPrintf(p, "%z_rowid_", zSql); + if( bSql ){ + zBind = recoverMPrintf(p, "%zquote(?%d)", zBind, pTab->iRowidBind); + }else{ + zBind = recoverMPrintf(p, "%z?%d", zBind, pTab->iRowidBind); + } + zSqlSep = "||', '||"; + zSep = ", "; + } + + for(ii=0; iiaCol[ii].eHidden; + if( eHidden!=RECOVER_EHIDDEN_VIRTUAL + && eHidden!=RECOVER_EHIDDEN_STORED + ){ + assert( pTab->aCol[ii].iField>=0 && pTab->aCol[ii].iBind>=1 ); + zSql = recoverMPrintf(p, "%z%s%Q", zSql, zSep, pTab->aCol[ii].zCol); + + if( bSql ){ + zBind = recoverMPrintf(p, + "%z%sescape_crnl(quote(?%d))", zBind, zSqlSep, pTab->aCol[ii].iBind + ); + zSqlSep = "||', '||"; + }else{ + zBind = recoverMPrintf(p, "%z%s?%d", zBind, zSep, pTab->aCol[ii].iBind); + } + zSep = ", "; + } + } + + if( bSql ){ + zFinal = recoverMPrintf(p, "SELECT %Q || ') VALUES (' || %s || ')'", + zSql, zBind + ); + }else{ + zFinal = recoverMPrintf(p, "%s) VALUES (%s)", zSql, zBind); + } + + pRet = recoverPrepare(p, p->dbOut, zFinal); + sqlite3_free(zSql); + sqlite3_free(zBind); + sqlite3_free(zFinal); + + return pRet; +} + + +/* +** Search the list of RecoverTable objects at p->pTblList for one that +** has root page iRoot in the input database. If such an object is found, +** return a pointer to it. Otherwise, return NULL. +*/ +static RecoverTable *recoverFindTable(sqlite3_recover *p, u32 iRoot){ + RecoverTable *pRet = 0; + for(pRet=p->pTblList; pRet && pRet->iRoot!=iRoot; pRet=pRet->pNext); + return pRet; +} + +/* +** This function attempts to create a lost and found table within the +** output db. If successful, it returns a pointer to a buffer containing +** the name of the new table. It is the responsibility of the caller to +** eventually free this buffer using sqlite3_free(). +** +** If an error occurs, NULL is returned and an error code and error +** message left in the recover handle. +*/ +static char *recoverLostAndFoundCreate( + sqlite3_recover *p, /* Recover object */ + int nField /* Number of column fields in new table */ +){ + char *zTbl = 0; + sqlite3_stmt *pProbe = 0; + int ii = 0; + + pProbe = recoverPrepare(p, p->dbOut, + "SELECT 1 FROM sqlite_schema WHERE name=?" + ); + for(ii=-1; zTbl==0 && p->errCode==SQLITE_OK && ii<1000; ii++){ + int bFail = 0; + if( ii<0 ){ + zTbl = recoverMPrintf(p, "%s", p->zLostAndFound); + }else{ + zTbl = recoverMPrintf(p, "%s_%d", p->zLostAndFound, ii); + } + + if( p->errCode==SQLITE_OK ){ + sqlite3_bind_text(pProbe, 1, zTbl, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pProbe) ){ + bFail = 1; + } + recoverReset(p, pProbe); + } + + if( bFail ){ + sqlite3_clear_bindings(pProbe); + sqlite3_free(zTbl); + zTbl = 0; + } + } + recoverFinalize(p, pProbe); + + if( zTbl ){ + const char *zSep = 0; + char *zField = 0; + char *zSql = 0; + + zSep = "rootpgno INTEGER, pgno INTEGER, nfield INTEGER, id INTEGER, "; + for(ii=0; p->errCode==SQLITE_OK && iidbOut, zSql); + recoverSqlCallback(p, zSql); + sqlite3_free(zSql); + }else if( p->errCode==SQLITE_OK ){ + recoverError( + p, SQLITE_ERROR, "failed to create %s output table", p->zLostAndFound + ); + } + + return zTbl; +} + +/* +** Synthesize and prepare an INSERT statement to write to the lost_and_found +** table in the output database. The name of the table is zTab, and it has +** nField c* fields. +*/ +static sqlite3_stmt *recoverLostAndFoundInsert( + sqlite3_recover *p, + const char *zTab, + int nField +){ + int nTotal = nField + 4; + int ii; + char *zBind = 0; + sqlite3_stmt *pRet = 0; + + if( p->xSql==0 ){ + for(ii=0; iidbOut, "INSERT INTO %s VALUES(%s)", zTab, zBind + ); + }else{ + const char *zSep = ""; + for(ii=0; iidbOut, "SELECT 'INSERT INTO %s VALUES(' || %s || ')'", zTab, zBind + ); + } + + sqlite3_free(zBind); + return pRet; +} + +/* +** Input database page iPg contains data that will be written to the +** lost-and-found table of the output database. This function attempts +** to identify the root page of the tree that page iPg belonged to. +** If successful, it sets output variable (*piRoot) to the page number +** of the root page and returns SQLITE_OK. Otherwise, if an error occurs, +** an SQLite error code is returned and the final value of *piRoot +** undefined. +*/ +static int recoverLostAndFoundFindRoot( + sqlite3_recover *p, + i64 iPg, + i64 *piRoot +){ + RecoverStateLAF *pLaf = &p->laf; + + if( pLaf->pFindRoot==0 ){ + pLaf->pFindRoot = recoverPrepare(p, p->dbOut, + "WITH RECURSIVE p(pgno) AS (" + " SELECT ?" + " UNION" + " SELECT parent FROM recovery.map AS m, p WHERE m.pgno=p.pgno" + ") " + "SELECT p.pgno FROM p, recovery.map m WHERE m.pgno=p.pgno " + " AND m.parent IS NULL" + ); + } + if( p->errCode==SQLITE_OK ){ + sqlite3_bind_int64(pLaf->pFindRoot, 1, iPg); + if( sqlite3_step(pLaf->pFindRoot)==SQLITE_ROW ){ + *piRoot = sqlite3_column_int64(pLaf->pFindRoot, 0); + }else{ + *piRoot = iPg; + } + recoverReset(p, pLaf->pFindRoot); + } + return p->errCode; +} + +/* +** Recover data from page iPage of the input database and write it to +** the lost-and-found table in the output database. +*/ +static void recoverLostAndFoundOnePage(sqlite3_recover *p, i64 iPage){ + RecoverStateLAF *pLaf = &p->laf; + sqlite3_value **apVal = pLaf->apVal; + sqlite3_stmt *pPageData = pLaf->pPageData; + sqlite3_stmt *pInsert = pLaf->pInsert; + + int nVal = -1; + int iPrevCell = 0; + i64 iRoot = 0; + int bHaveRowid = 0; + i64 iRowid = 0; + int ii = 0; + + if( recoverLostAndFoundFindRoot(p, iPage, &iRoot) ) return; + sqlite3_bind_int64(pPageData, 1, iPage); + while( p->errCode==SQLITE_OK && SQLITE_ROW==sqlite3_step(pPageData) ){ + int iCell = sqlite3_column_int64(pPageData, 0); + int iField = sqlite3_column_int64(pPageData, 1); + + if( iPrevCell!=iCell && nVal>=0 ){ + /* Insert the new row */ + sqlite3_bind_int64(pInsert, 1, iRoot); /* rootpgno */ + sqlite3_bind_int64(pInsert, 2, iPage); /* pgno */ + sqlite3_bind_int(pInsert, 3, nVal); /* nfield */ + if( bHaveRowid ){ + sqlite3_bind_int64(pInsert, 4, iRowid); /* id */ + } + for(ii=0; iinMaxField ){ + sqlite3_value *pVal = sqlite3_column_value(pPageData, 2); + apVal[iField] = sqlite3_value_dup(pVal); + assert( iField==nVal || (nVal==-1 && iField==0) ); + nVal = iField+1; + if( apVal[iField]==0 ){ + recoverError(p, SQLITE_NOMEM, 0); + } + } + + iPrevCell = iCell; + } + recoverReset(p, pPageData); + + for(ii=0; iilaf; + if( p->errCode==SQLITE_OK ){ + if( pLaf->pInsert==0 ){ + return SQLITE_DONE; + }else{ + if( p->errCode==SQLITE_OK ){ + int res = sqlite3_step(pLaf->pAllPage); + if( res==SQLITE_ROW ){ + i64 iPage = sqlite3_column_int64(pLaf->pAllPage, 0); + if( recoverBitmapQuery(pLaf->pUsed, iPage)==0 ){ + recoverLostAndFoundOnePage(p, iPage); + } + }else{ + recoverReset(p, pLaf->pAllPage); + return SQLITE_DONE; + } + } + } + } + return SQLITE_OK; +} + +/* +** Initialize resources required in RECOVER_STATE_LOSTANDFOUND3 +** state - during which the lost-and-found table of the output database +** is populated with recovered data that can not be assigned to any +** recovered schema object. +*/ +static void recoverLostAndFound3Init(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + + if( pLaf->nMaxField>0 ){ + char *zTab = 0; /* Name of lost_and_found table */ + + zTab = recoverLostAndFoundCreate(p, pLaf->nMaxField); + pLaf->pInsert = recoverLostAndFoundInsert(p, zTab, pLaf->nMaxField); + sqlite3_free(zTab); + + pLaf->pAllPage = recoverPreparePrintf(p, p->dbOut, + "WITH RECURSIVE seq(ii) AS (" + " SELECT 1 UNION ALL SELECT ii+1 FROM seq WHERE ii<%lld" + ")" + "SELECT ii FROM seq" , p->laf.nPg + ); + pLaf->pPageData = recoverPrepare(p, p->dbOut, + "SELECT cell, field, value " + "FROM sqlite_dbdata('getpage()') d WHERE d.pgno=? " + "UNION ALL " + "SELECT -1, -1, -1" + ); + + pLaf->apVal = (sqlite3_value**)recoverMalloc(p, + pLaf->nMaxField*sizeof(sqlite3_value*) + ); + } +} + +/* +** Initialize resources required in RECOVER_STATE_WRITING state - during which +** tables recovered from the schema of the input database are populated with +** recovered data. +*/ +static int recoverWriteDataInit(sqlite3_recover *p){ + RecoverStateW1 *p1 = &p->w1; + RecoverTable *pTbl = 0; + int nByte = 0; + + /* Figure out the maximum number of columns for any table in the schema */ + assert( p1->nMax==0 ); + for(pTbl=p->pTblList; pTbl; pTbl=pTbl->pNext){ + if( pTbl->nCol>p1->nMax ) p1->nMax = pTbl->nCol; + } + + /* Allocate an array of (sqlite3_value*) in which to accumulate the values + ** that will be written to the output database in a single row. */ + nByte = sizeof(sqlite3_value*) * (p1->nMax+1); + p1->apVal = (sqlite3_value**)recoverMalloc(p, nByte); + if( p1->apVal==0 ) return p->errCode; + + /* Prepare the SELECT to loop through schema tables (pTbls) and the SELECT + ** to loop through cells that appear to belong to a single table (pSel). */ + p1->pTbls = recoverPrepare(p, p->dbOut, + "SELECT rootpage FROM recovery.schema " + " WHERE type='table' AND (sql NOT LIKE 'create virtual%')" + " ORDER BY (tbl_name='sqlite_sequence') ASC" + ); + p1->pSel = recoverPrepare(p, p->dbOut, + "WITH RECURSIVE pages(page) AS (" + " SELECT ?1" + " UNION" + " SELECT child FROM sqlite_dbptr('getpage()'), pages " + " WHERE pgno=page" + ") " + "SELECT page, cell, field, value " + "FROM sqlite_dbdata('getpage()') d, pages p WHERE p.page=d.pgno " + "UNION ALL " + "SELECT 0, 0, 0, 0" + ); + + return p->errCode; +} + +/* +** Clean up resources allocated by recoverWriteDataInit() (stuff in +** sqlite3_recover.w1). +*/ +static void recoverWriteDataCleanup(sqlite3_recover *p){ + RecoverStateW1 *p1 = &p->w1; + int ii; + for(ii=0; iinVal; ii++){ + sqlite3_value_free(p1->apVal[ii]); + } + sqlite3_free(p1->apVal); + recoverFinalize(p, p1->pInsert); + recoverFinalize(p, p1->pTbls); + recoverFinalize(p, p1->pSel); + memset(p1, 0, sizeof(*p1)); +} + +/* +** Perform one step (sqlite3_recover_step()) of work for the connection +** passed as the only argument, which is guaranteed to be in +** RECOVER_STATE_WRITING state - during which tables recovered from the +** schema of the input database are populated with recovered data. +*/ +static int recoverWriteDataStep(sqlite3_recover *p){ + RecoverStateW1 *p1 = &p->w1; + sqlite3_stmt *pSel = p1->pSel; + sqlite3_value **apVal = p1->apVal; + + if( p->errCode==SQLITE_OK && p1->pTab==0 ){ + if( sqlite3_step(p1->pTbls)==SQLITE_ROW ){ + i64 iRoot = sqlite3_column_int64(p1->pTbls, 0); + p1->pTab = recoverFindTable(p, iRoot); + + recoverFinalize(p, p1->pInsert); + p1->pInsert = 0; + + /* If this table is unknown, return early. The caller will invoke this + ** function again and it will move on to the next table. */ + if( p1->pTab==0 ) return p->errCode; + + /* If this is the sqlite_sequence table, delete any rows added by + ** earlier INSERT statements on tables with AUTOINCREMENT primary + ** keys before recovering its contents. The p1->pTbls SELECT statement + ** is rigged to deliver "sqlite_sequence" last of all, so we don't + ** worry about it being modified after it is recovered. */ + if( sqlite3_stricmp("sqlite_sequence", p1->pTab->zTab)==0 ){ + recoverExec(p, p->dbOut, "DELETE FROM sqlite_sequence"); + recoverSqlCallback(p, "DELETE FROM sqlite_sequence"); + } + + /* Bind the root page of this table within the original database to + ** SELECT statement p1->pSel. The SELECT statement will then iterate + ** through cells that look like they belong to table pTab. */ + sqlite3_bind_int64(pSel, 1, iRoot); + + p1->nVal = 0; + p1->bHaveRowid = 0; + p1->iPrevPage = -1; + p1->iPrevCell = -1; + }else{ + return SQLITE_DONE; + } + } + assert( p->errCode!=SQLITE_OK || p1->pTab ); + + if( p->errCode==SQLITE_OK && sqlite3_step(pSel)==SQLITE_ROW ){ + RecoverTable *pTab = p1->pTab; + + i64 iPage = sqlite3_column_int64(pSel, 0); + int iCell = sqlite3_column_int(pSel, 1); + int iField = sqlite3_column_int(pSel, 2); + sqlite3_value *pVal = sqlite3_column_value(pSel, 3); + int bNewCell = (p1->iPrevPage!=iPage || p1->iPrevCell!=iCell); + + assert( bNewCell==0 || (iField==-1 || iField==0) ); + assert( bNewCell || iField==p1->nVal || p1->nVal==pTab->nCol ); + + if( bNewCell ){ + int ii = 0; + if( p1->nVal>=0 ){ + if( p1->pInsert==0 || p1->nVal!=p1->nInsert ){ + recoverFinalize(p, p1->pInsert); + p1->pInsert = recoverInsertStmt(p, pTab, p1->nVal); + p1->nInsert = p1->nVal; + } + if( p1->nVal>0 ){ + sqlite3_stmt *pInsert = p1->pInsert; + for(ii=0; iinCol; ii++){ + RecoverColumn *pCol = &pTab->aCol[ii]; + int iBind = pCol->iBind; + if( iBind>0 ){ + if( pCol->bIPK ){ + sqlite3_bind_int64(pInsert, iBind, p1->iRowid); + }else if( pCol->iFieldnVal ){ + recoverBindValue(p, pInsert, iBind, apVal[pCol->iField]); + } + } + } + if( p->bRecoverRowid && pTab->iRowidBind>0 && p1->bHaveRowid ){ + sqlite3_bind_int64(pInsert, pTab->iRowidBind, p1->iRowid); + } + if( SQLITE_ROW==sqlite3_step(pInsert) ){ + const char *z = (const char*)sqlite3_column_text(pInsert, 0); + recoverSqlCallback(p, z); + } + recoverReset(p, pInsert); + assert( p->errCode || pInsert ); + if( pInsert ) sqlite3_clear_bindings(pInsert); + } + } + + for(ii=0; iinVal; ii++){ + sqlite3_value_free(apVal[ii]); + apVal[ii] = 0; + } + p1->nVal = -1; + p1->bHaveRowid = 0; + } + + if( iPage!=0 ){ + if( iField<0 ){ + p1->iRowid = sqlite3_column_int64(pSel, 3); + assert( p1->nVal==-1 ); + p1->nVal = 0; + p1->bHaveRowid = 1; + }else if( iFieldnCol ){ + assert( apVal[iField]==0 ); + apVal[iField] = sqlite3_value_dup( pVal ); + if( apVal[iField]==0 ){ + recoverError(p, SQLITE_NOMEM, 0); + } + p1->nVal = iField+1; + } + p1->iPrevCell = iCell; + p1->iPrevPage = iPage; + } + }else{ + recoverReset(p, pSel); + p1->pTab = 0; + } + + return p->errCode; +} + +/* +** Initialize resources required by sqlite3_recover_step() in +** RECOVER_STATE_LOSTANDFOUND1 state - during which the set of pages not +** already allocated to a recovered schema element is determined. +*/ +static void recoverLostAndFound1Init(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + sqlite3_stmt *pStmt = 0; + + assert( p->laf.pUsed==0 ); + pLaf->nPg = recoverPageCount(p); + pLaf->pUsed = recoverBitmapAlloc(p, pLaf->nPg); + + /* Prepare a statement to iterate through all pages that are part of any tree + ** in the recoverable part of the input database schema to the bitmap. And, + ** if !p->bFreelistCorrupt, add all pages that appear to be part of the + ** freelist. */ + pStmt = recoverPrepare( + p, p->dbOut, + "WITH trunk(pgno) AS (" + " SELECT read_i32(getpage(1), 8) AS x WHERE x>0" + " UNION" + " SELECT read_i32(getpage(trunk.pgno), 0) AS x FROM trunk WHERE x>0" + ")," + "trunkdata(pgno, data) AS (" + " SELECT pgno, getpage(pgno) FROM trunk" + ")," + "freelist(data, n, freepgno) AS (" + " SELECT data, min(16384, read_i32(data, 1)-1), pgno FROM trunkdata" + " UNION ALL" + " SELECT data, n-1, read_i32(data, 2+n) FROM freelist WHERE n>=0" + ")," + "" + "roots(r) AS (" + " SELECT 1 UNION ALL" + " SELECT rootpage FROM recovery.schema WHERE rootpage>0" + ")," + "used(page) AS (" + " SELECT r FROM roots" + " UNION" + " SELECT child FROM sqlite_dbptr('getpage()'), used " + " WHERE pgno=page" + ") " + "SELECT page FROM used" + " UNION ALL " + "SELECT freepgno FROM freelist WHERE NOT ?" + ); + if( pStmt ) sqlite3_bind_int(pStmt, 1, p->bFreelistCorrupt); + pLaf->pUsedPages = pStmt; +} + +/* +** Perform one step (sqlite3_recover_step()) of work for the connection +** passed as the only argument, which is guaranteed to be in +** RECOVER_STATE_LOSTANDFOUND1 state - during which the set of pages not +** already allocated to a recovered schema element is determined. +*/ +static int recoverLostAndFound1Step(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + int rc = p->errCode; + if( rc==SQLITE_OK ){ + rc = sqlite3_step(pLaf->pUsedPages); + if( rc==SQLITE_ROW ){ + i64 iPg = sqlite3_column_int64(pLaf->pUsedPages, 0); + recoverBitmapSet(pLaf->pUsed, iPg); + rc = SQLITE_OK; + }else{ + recoverFinalize(p, pLaf->pUsedPages); + pLaf->pUsedPages = 0; + } + } + return rc; +} + +/* +** Initialize resources required by RECOVER_STATE_LOSTANDFOUND2 +** state - during which the pages identified in RECOVER_STATE_LOSTANDFOUND1 +** are sorted into sets that likely belonged to the same database tree. +*/ +static void recoverLostAndFound2Init(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + + assert( p->laf.pAllAndParent==0 ); + assert( p->laf.pMapInsert==0 ); + assert( p->laf.pMaxField==0 ); + assert( p->laf.nMaxField==0 ); + + pLaf->pMapInsert = recoverPrepare(p, p->dbOut, + "INSERT OR IGNORE INTO recovery.map(pgno, parent) VALUES(?, ?)" + ); + pLaf->pAllAndParent = recoverPreparePrintf(p, p->dbOut, + "WITH RECURSIVE seq(ii) AS (" + " SELECT 1 UNION ALL SELECT ii+1 FROM seq WHERE ii<%lld" + ")" + "SELECT pgno, child FROM sqlite_dbptr('getpage()') " + " UNION ALL " + "SELECT NULL, ii FROM seq", p->laf.nPg + ); + pLaf->pMaxField = recoverPreparePrintf(p, p->dbOut, + "SELECT max(field)+1 FROM sqlite_dbdata('getpage') WHERE pgno = ?" + ); +} + +/* +** Perform one step (sqlite3_recover_step()) of work for the connection +** passed as the only argument, which is guaranteed to be in +** RECOVER_STATE_LOSTANDFOUND2 state - during which the pages identified +** in RECOVER_STATE_LOSTANDFOUND1 are sorted into sets that likely belonged +** to the same database tree. +*/ +static int recoverLostAndFound2Step(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + if( p->errCode==SQLITE_OK ){ + int res = sqlite3_step(pLaf->pAllAndParent); + if( res==SQLITE_ROW ){ + i64 iChild = sqlite3_column_int(pLaf->pAllAndParent, 1); + if( recoverBitmapQuery(pLaf->pUsed, iChild)==0 ){ + sqlite3_bind_int64(pLaf->pMapInsert, 1, iChild); + sqlite3_bind_value(pLaf->pMapInsert, 2, + sqlite3_column_value(pLaf->pAllAndParent, 0) + ); + sqlite3_step(pLaf->pMapInsert); + recoverReset(p, pLaf->pMapInsert); + sqlite3_bind_int64(pLaf->pMaxField, 1, iChild); + if( SQLITE_ROW==sqlite3_step(pLaf->pMaxField) ){ + int nMax = sqlite3_column_int(pLaf->pMaxField, 0); + if( nMax>pLaf->nMaxField ) pLaf->nMaxField = nMax; + } + recoverReset(p, pLaf->pMaxField); + } + }else{ + recoverFinalize(p, pLaf->pAllAndParent); + pLaf->pAllAndParent =0; + return SQLITE_DONE; + } + } + return p->errCode; +} + +/* +** Free all resources allocated as part of sqlite3_recover_step() calls +** in one of the RECOVER_STATE_LOSTANDFOUND[123] states. +*/ +static void recoverLostAndFoundCleanup(sqlite3_recover *p){ + recoverBitmapFree(p->laf.pUsed); + p->laf.pUsed = 0; + sqlite3_finalize(p->laf.pUsedPages); + sqlite3_finalize(p->laf.pAllAndParent); + sqlite3_finalize(p->laf.pMapInsert); + sqlite3_finalize(p->laf.pMaxField); + sqlite3_finalize(p->laf.pFindRoot); + sqlite3_finalize(p->laf.pInsert); + sqlite3_finalize(p->laf.pAllPage); + sqlite3_finalize(p->laf.pPageData); + p->laf.pUsedPages = 0; + p->laf.pAllAndParent = 0; + p->laf.pMapInsert = 0; + p->laf.pMaxField = 0; + p->laf.pFindRoot = 0; + p->laf.pInsert = 0; + p->laf.pAllPage = 0; + p->laf.pPageData = 0; + sqlite3_free(p->laf.apVal); + p->laf.apVal = 0; +} + +/* +** Free all resources allocated as part of sqlite3_recover_step() calls. +*/ +static void recoverFinalCleanup(sqlite3_recover *p){ + RecoverTable *pTab = 0; + RecoverTable *pNext = 0; + + recoverWriteDataCleanup(p); + recoverLostAndFoundCleanup(p); + + for(pTab=p->pTblList; pTab; pTab=pNext){ + pNext = pTab->pNext; + sqlite3_free(pTab); + } + p->pTblList = 0; + sqlite3_finalize(p->pGetPage); + p->pGetPage = 0; + sqlite3_file_control(p->dbIn, p->zDb, SQLITE_FCNTL_RESET_CACHE, 0); + + { +#ifndef NDEBUG + int res = +#endif + sqlite3_close(p->dbOut); + assert( res==SQLITE_OK ); + } + p->dbOut = 0; +} + +/* +** Decode and return an unsigned 16-bit big-endian integer value from +** buffer a[]. +*/ +static u32 recoverGetU16(const u8 *a){ + return (((u32)a[0])<<8) + ((u32)a[1]); +} + +/* +** Decode and return an unsigned 32-bit big-endian integer value from +** buffer a[]. +*/ +static u32 recoverGetU32(const u8 *a){ + return (((u32)a[0])<<24) + (((u32)a[1])<<16) + (((u32)a[2])<<8) + ((u32)a[3]); +} + +/* +** Decode an SQLite varint from buffer a[]. Write the decoded value to (*pVal) +** and return the number of bytes consumed. +*/ +static int recoverGetVarint(const u8 *a, i64 *pVal){ + sqlite3_uint64 u = 0; + int i; + for(i=0; i<8; i++){ + u = (u<<7) + (a[i]&0x7f); + if( (a[i]&0x80)==0 ){ *pVal = (sqlite3_int64)u; return i+1; } + } + u = (u<<8) + (a[i]&0xff); + *pVal = (sqlite3_int64)u; + return 9; +} + +/* +** The second argument points to a buffer n bytes in size. If this buffer +** or a prefix thereof appears to contain a well-formed SQLite b-tree page, +** return the page-size in bytes. Otherwise, if the buffer does not +** appear to contain a well-formed b-tree page, return 0. +*/ +static int recoverIsValidPage(u8 *aTmp, const u8 *a, int n){ + u8 *aUsed = aTmp; + int nFrag = 0; + int nActual = 0; + int iFree = 0; + int nCell = 0; /* Number of cells on page */ + int iCellOff = 0; /* Offset of cell array in page */ + int iContent = 0; + int eType = 0; + int ii = 0; + + eType = (int)a[0]; + if( eType!=0x02 && eType!=0x05 && eType!=0x0A && eType!=0x0D ) return 0; + + iFree = (int)recoverGetU16(&a[1]); + nCell = (int)recoverGetU16(&a[3]); + iContent = (int)recoverGetU16(&a[5]); + if( iContent==0 ) iContent = 65536; + nFrag = (int)a[7]; + + if( iContent>n ) return 0; + + memset(aUsed, 0, n); + memset(aUsed, 0xFF, iContent); + + /* Follow the free-list. This is the same format for all b-tree pages. */ + if( iFree && iFree<=iContent ) return 0; + while( iFree ){ + int iNext = 0; + int nByte = 0; + if( iFree>(n-4) ) return 0; + iNext = recoverGetU16(&a[iFree]); + nByte = recoverGetU16(&a[iFree+2]); + if( iFree+nByte>n || nByte<4 ) return 0; + if( iNext && iNextiContent ) return 0; + for(ii=0; iin ){ + return 0; + } + if( eType==0x05 || eType==0x02 ) nByte += 4; + nByte += recoverGetVarint(&a[iOff+nByte], &nPayload); + if( eType==0x0D ){ + i64 dummy = 0; + nByte += recoverGetVarint(&a[iOff+nByte], &dummy); + } + if( eType!=0x05 ){ + int X = (eType==0x0D) ? n-35 : (((n-12)*64/255)-23); + int M = ((n-12)*32/255)-23; + int K = M+((nPayload-M)%(n-4)); + + if( nPayloadn ){ + return 0; + } + for(iByte=iOff; iByte<(iOff+nByte); iByte++){ + if( aUsed[iByte]!=0 ){ + return 0; + } + aUsed[iByte] = 0xFF; + } + } + + nActual = 0; + for(ii=0; iipMethods!=&recover_methods ); + return pFd->pMethods->xClose(pFd); +} + +/* +** Write value v to buffer a[] as a 16-bit big-endian unsigned integer. +*/ +static void recoverPutU16(u8 *a, u32 v){ + a[0] = (v>>8) & 0x00FF; + a[1] = (v>>0) & 0x00FF; +} + +/* +** Write value v to buffer a[] as a 32-bit big-endian unsigned integer. +*/ +static void recoverPutU32(u8 *a, u32 v){ + a[0] = (v>>24) & 0x00FF; + a[1] = (v>>16) & 0x00FF; + a[2] = (v>>8) & 0x00FF; + a[3] = (v>>0) & 0x00FF; +} + +/* +** Detect the page-size of the database opened by file-handle pFd by +** searching the first part of the file for a well-formed SQLite b-tree +** page. If parameter nReserve is non-zero, then as well as searching for +** a b-tree page with zero reserved bytes, this function searches for one +** with nReserve reserved bytes at the end of it. +** +** If successful, set variable p->detected_pgsz to the detected page-size +** in bytes and return SQLITE_OK. Or, if no error occurs but no valid page +** can be found, return SQLITE_OK but leave p->detected_pgsz set to 0. Or, +** if an error occurs (e.g. an IO or OOM error), then an SQLite error code +** is returned. The final value of p->detected_pgsz is undefined in this +** case. +*/ +static int recoverVfsDetectPagesize( + sqlite3_recover *p, /* Recover handle */ + sqlite3_file *pFd, /* File-handle open on input database */ + u32 nReserve, /* Possible nReserve value */ + i64 nSz /* Size of database file in bytes */ +){ + int rc = SQLITE_OK; + const int nMin = 512; + const int nMax = 65536; + const int nMaxBlk = 4; + u32 pgsz = 0; + int iBlk = 0; + u8 *aPg = 0; + u8 *aTmp = 0; + int nBlk = 0; + + aPg = (u8*)sqlite3_malloc(2*nMax); + if( aPg==0 ) return SQLITE_NOMEM; + aTmp = &aPg[nMax]; + + nBlk = (nSz+nMax-1)/nMax; + if( nBlk>nMaxBlk ) nBlk = nMaxBlk; + + do { + for(iBlk=0; rc==SQLITE_OK && iBlk=((iBlk+1)*nMax)) ? nMax : (nSz % nMax); + memset(aPg, 0, nMax); + rc = pFd->pMethods->xRead(pFd, aPg, nByte, iBlk*nMax); + if( rc==SQLITE_OK ){ + int pgsz2; + for(pgsz2=(pgsz ? pgsz*2 : nMin); pgsz2<=nMax; pgsz2=pgsz2*2){ + int iOff; + for(iOff=0; iOff(u32)p->detected_pgsz ){ + p->detected_pgsz = pgsz; + p->nReserve = nReserve; + } + if( nReserve==0 ) break; + nReserve = 0; + }while( 1 ); + + p->detected_pgsz = pgsz; + sqlite3_free(aPg); + return rc; +} + +/* +** The xRead() method of the wrapper VFS. This is used to intercept calls +** to read page 1 of the input database. +*/ +static int recoverVfsRead(sqlite3_file *pFd, void *aBuf, int nByte, i64 iOff){ + int rc = SQLITE_OK; + if( pFd->pMethods==&recover_methods ){ + pFd->pMethods = recover_g.pMethods; + rc = pFd->pMethods->xRead(pFd, aBuf, nByte, iOff); + if( nByte==16 ){ + sqlite3_randomness(16, aBuf); + }else + if( rc==SQLITE_OK && iOff==0 && nByte>=108 ){ + /* Ensure that the database has a valid header file. The only fields + ** that really matter to recovery are: + ** + ** + Database page size (16-bits at offset 16) + ** + Size of db in pages (32-bits at offset 28) + ** + Database encoding (32-bits at offset 56) + ** + ** Also preserved are: + ** + ** + first freelist page (32-bits at offset 32) + ** + size of freelist (32-bits at offset 36) + ** + the wal-mode flags (16-bits at offset 18) + ** + ** We also try to preserve the auto-vacuum, incr-value, user-version + ** and application-id fields - all 32 bit quantities at offsets + ** 52, 60, 64 and 68. All other fields are set to known good values. + ** + ** Byte offset 105 should also contain the page-size as a 16-bit + ** integer. + */ + const int aPreserve[] = {32, 36, 52, 60, 64, 68}; + u8 aHdr[108] = { + 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00, + 0xFF, 0xFF, 0x01, 0x01, 0x00, 0x40, 0x20, 0x20, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x10, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2e, 0x5b, 0x30, + + 0x0D, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00 + }; + u8 *a = (u8*)aBuf; + + u32 pgsz = recoverGetU16(&a[16]); + u32 nReserve = a[20]; + u32 enc = recoverGetU32(&a[56]); + u32 dbsz = 0; + i64 dbFileSize = 0; + int ii; + sqlite3_recover *p = recover_g.p; + + if( pgsz==0x01 ) pgsz = 65536; + rc = pFd->pMethods->xFileSize(pFd, &dbFileSize); + + if( rc==SQLITE_OK && p->detected_pgsz==0 ){ + rc = recoverVfsDetectPagesize(p, pFd, nReserve, dbFileSize); + } + if( p->detected_pgsz ){ + pgsz = p->detected_pgsz; + nReserve = p->nReserve; + } + + if( pgsz ){ + dbsz = dbFileSize / pgsz; + } + if( enc!=SQLITE_UTF8 && enc!=SQLITE_UTF16BE && enc!=SQLITE_UTF16LE ){ + enc = SQLITE_UTF8; + } + + sqlite3_free(p->pPage1Cache); + p->pPage1Cache = 0; + p->pPage1Disk = 0; + + p->pgsz = nByte; + p->pPage1Cache = (u8*)recoverMalloc(p, nByte*2); + if( p->pPage1Cache ){ + p->pPage1Disk = &p->pPage1Cache[nByte]; + memcpy(p->pPage1Disk, aBuf, nByte); + aHdr[18] = a[18]; + aHdr[19] = a[19]; + recoverPutU32(&aHdr[28], dbsz); + recoverPutU32(&aHdr[56], enc); + recoverPutU16(&aHdr[105], pgsz-nReserve); + if( pgsz==65536 ) pgsz = 1; + recoverPutU16(&aHdr[16], pgsz); + aHdr[20] = nReserve; + for(ii=0; ii<(int)(sizeof(aPreserve)/sizeof(aPreserve[0])); ii++){ + memcpy(&aHdr[aPreserve[ii]], &a[aPreserve[ii]], 4); + } + memcpy(aBuf, aHdr, sizeof(aHdr)); + memset(&((u8*)aBuf)[sizeof(aHdr)], 0, nByte-sizeof(aHdr)); + + memcpy(p->pPage1Cache, aBuf, nByte); + }else{ + rc = p->errCode; + } + + } + pFd->pMethods = &recover_methods; + }else{ + rc = pFd->pMethods->xRead(pFd, aBuf, nByte, iOff); + } + return rc; +} + +/* +** Used to make sqlite3_io_methods wrapper methods less verbose. +*/ +#define RECOVER_VFS_WRAPPER(code) \ + int rc = SQLITE_OK; \ + if( pFd->pMethods==&recover_methods ){ \ + pFd->pMethods = recover_g.pMethods; \ + rc = code; \ + pFd->pMethods = &recover_methods; \ + }else{ \ + rc = code; \ + } \ + return rc; + +/* +** Methods of the wrapper VFS. All methods except for xRead() and xClose() +** simply uninstall the sqlite3_io_methods wrapper, invoke the equivalent +** method on the lower level VFS, then reinstall the wrapper before returning. +** Those that return an integer value use the RECOVER_VFS_WRAPPER macro. +*/ +static int recoverVfsWrite( + sqlite3_file *pFd, const void *aBuf, int nByte, i64 iOff +){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xWrite(pFd, aBuf, nByte, iOff) + ); +} +static int recoverVfsTruncate(sqlite3_file *pFd, sqlite3_int64 size){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xTruncate(pFd, size) + ); +} +static int recoverVfsSync(sqlite3_file *pFd, int flags){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xSync(pFd, flags) + ); +} +static int recoverVfsFileSize(sqlite3_file *pFd, sqlite3_int64 *pSize){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xFileSize(pFd, pSize) + ); +} +static int recoverVfsLock(sqlite3_file *pFd, int eLock){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xLock(pFd, eLock) + ); +} +static int recoverVfsUnlock(sqlite3_file *pFd, int eLock){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xUnlock(pFd, eLock) + ); +} +static int recoverVfsCheckReservedLock(sqlite3_file *pFd, int *pResOut){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xCheckReservedLock(pFd, pResOut) + ); +} +static int recoverVfsFileControl(sqlite3_file *pFd, int op, void *pArg){ + RECOVER_VFS_WRAPPER ( + (pFd->pMethods ? pFd->pMethods->xFileControl(pFd, op, pArg) : SQLITE_NOTFOUND) + ); +} +static int recoverVfsSectorSize(sqlite3_file *pFd){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xSectorSize(pFd) + ); +} +static int recoverVfsDeviceCharacteristics(sqlite3_file *pFd){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xDeviceCharacteristics(pFd) + ); +} +static int recoverVfsShmMap( + sqlite3_file *pFd, int iPg, int pgsz, int bExtend, void volatile **pp +){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xShmMap(pFd, iPg, pgsz, bExtend, pp) + ); +} +static int recoverVfsShmLock(sqlite3_file *pFd, int offset, int n, int flags){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xShmLock(pFd, offset, n, flags) + ); +} +static void recoverVfsShmBarrier(sqlite3_file *pFd){ + if( pFd->pMethods==&recover_methods ){ + pFd->pMethods = recover_g.pMethods; + pFd->pMethods->xShmBarrier(pFd); + pFd->pMethods = &recover_methods; + }else{ + pFd->pMethods->xShmBarrier(pFd); + } +} +static int recoverVfsShmUnmap(sqlite3_file *pFd, int deleteFlag){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xShmUnmap(pFd, deleteFlag) + ); +} + +static int recoverVfsFetch( + sqlite3_file *pFd, + sqlite3_int64 iOff, + int iAmt, + void **pp +){ + (void)pFd; + (void)iOff; + (void)iAmt; + *pp = 0; + return SQLITE_OK; +} +static int recoverVfsUnfetch(sqlite3_file *pFd, sqlite3_int64 iOff, void *p){ + (void)pFd; + (void)iOff; + (void)p; + return SQLITE_OK; +} + +/* +** Install the VFS wrapper around the file-descriptor open on the input +** database for recover handle p. Mutex RECOVER_MUTEX_ID must be held +** when this function is called. +*/ +static void recoverInstallWrapper(sqlite3_recover *p){ + sqlite3_file *pFd = 0; + assert( recover_g.pMethods==0 ); + recoverAssertMutexHeld(); + sqlite3_file_control(p->dbIn, p->zDb, SQLITE_FCNTL_FILE_POINTER, (void*)&pFd); + assert( pFd==0 || pFd->pMethods!=&recover_methods ); + if( pFd && pFd->pMethods ){ + int iVersion = 1 + (pFd->pMethods->iVersion>1 && pFd->pMethods->xShmMap!=0); + recover_g.pMethods = pFd->pMethods; + recover_g.p = p; + recover_methods.iVersion = iVersion; + pFd->pMethods = &recover_methods; + } +} + +/* +** Uninstall the VFS wrapper that was installed around the file-descriptor open +** on the input database for recover handle p. Mutex RECOVER_MUTEX_ID must be +** held when this function is called. +*/ +static void recoverUninstallWrapper(sqlite3_recover *p){ + sqlite3_file *pFd = 0; + recoverAssertMutexHeld(); + sqlite3_file_control(p->dbIn, p->zDb,SQLITE_FCNTL_FILE_POINTER,(void*)&pFd); + if( pFd && pFd->pMethods ){ + pFd->pMethods = recover_g.pMethods; + recover_g.pMethods = 0; + recover_g.p = 0; + } +} + +/* +** This function does the work of a single sqlite3_recover_step() call. It +** is guaranteed that the handle is not in an error state when this +** function is called. +*/ +static void recoverStep(sqlite3_recover *p){ + assert( p && p->errCode==SQLITE_OK ); + switch( p->eState ){ + case RECOVER_STATE_INIT: + /* This is the very first call to sqlite3_recover_step() on this object. + */ + recoverSqlCallback(p, "BEGIN"); + recoverSqlCallback(p, "PRAGMA writable_schema = on"); + + recoverEnterMutex(); + recoverInstallWrapper(p); + + /* Open the output database. And register required virtual tables and + ** user functions with the new handle. */ + recoverOpenOutput(p); + + /* Open transactions on both the input and output databases. */ + sqlite3_file_control(p->dbIn, p->zDb, SQLITE_FCNTL_RESET_CACHE, 0); + recoverExec(p, p->dbIn, "PRAGMA writable_schema = on"); + recoverExec(p, p->dbIn, "BEGIN"); + if( p->errCode==SQLITE_OK ) p->bCloseTransaction = 1; + recoverExec(p, p->dbIn, "SELECT 1 FROM sqlite_schema"); + recoverTransferSettings(p); + recoverOpenRecovery(p); + recoverCacheSchema(p); + + recoverUninstallWrapper(p); + recoverLeaveMutex(); + + recoverExec(p, p->dbOut, "BEGIN"); + + recoverWriteSchema1(p); + p->eState = RECOVER_STATE_WRITING; + break; + + case RECOVER_STATE_WRITING: { + if( p->w1.pTbls==0 ){ + recoverWriteDataInit(p); + } + if( SQLITE_DONE==recoverWriteDataStep(p) ){ + recoverWriteDataCleanup(p); + if( p->zLostAndFound ){ + p->eState = RECOVER_STATE_LOSTANDFOUND1; + }else{ + p->eState = RECOVER_STATE_SCHEMA2; + } + } + break; + } + + case RECOVER_STATE_LOSTANDFOUND1: { + if( p->laf.pUsed==0 ){ + recoverLostAndFound1Init(p); + } + if( SQLITE_DONE==recoverLostAndFound1Step(p) ){ + p->eState = RECOVER_STATE_LOSTANDFOUND2; + } + break; + } + case RECOVER_STATE_LOSTANDFOUND2: { + if( p->laf.pAllAndParent==0 ){ + recoverLostAndFound2Init(p); + } + if( SQLITE_DONE==recoverLostAndFound2Step(p) ){ + p->eState = RECOVER_STATE_LOSTANDFOUND3; + } + break; + } + + case RECOVER_STATE_LOSTANDFOUND3: { + if( p->laf.pInsert==0 ){ + recoverLostAndFound3Init(p); + } + if( SQLITE_DONE==recoverLostAndFound3Step(p) ){ + p->eState = RECOVER_STATE_SCHEMA2; + } + break; + } + + case RECOVER_STATE_SCHEMA2: { + int rc = SQLITE_OK; + + recoverWriteSchema2(p); + p->eState = RECOVER_STATE_DONE; + + /* If no error has occurred, commit the write transaction on the output + ** database. Regardless of whether or not an error has occurred, make + ** an attempt to end the read transaction on the input database. */ + recoverExec(p, p->dbOut, "COMMIT"); + rc = sqlite3_exec(p->dbIn, "END", 0, 0, 0); + if( p->errCode==SQLITE_OK ) p->errCode = rc; + + recoverSqlCallback(p, "PRAGMA writable_schema = off"); + recoverSqlCallback(p, "COMMIT"); + p->eState = RECOVER_STATE_DONE; + recoverFinalCleanup(p); + break; + }; + + case RECOVER_STATE_DONE: { + /* no-op */ + break; + }; + } +} + + +/* +** This is a worker function that does the heavy lifting for both init +** functions: +** +** sqlite3_recover_init() +** sqlite3_recover_init_sql() +** +** All this function does is allocate space for the recover handle and +** take copies of the input parameters. All the real work is done within +** sqlite3_recover_run(). +*/ +sqlite3_recover *recoverInit( + sqlite3* db, + const char *zDb, + const char *zUri, /* Output URI for _recover_init() */ + int (*xSql)(void*, const char*),/* SQL callback for _recover_init_sql() */ + void *pSqlCtx /* Context arg for _recover_init_sql() */ +){ + sqlite3_recover *pRet = 0; + int nDb = 0; + int nUri = 0; + int nByte = 0; + + if( zDb==0 ){ zDb = "main"; } + + nDb = recoverStrlen(zDb); + nUri = recoverStrlen(zUri); + + nByte = sizeof(sqlite3_recover) + nDb+1 + nUri+1; + pRet = (sqlite3_recover*)sqlite3_malloc(nByte); + if( pRet ){ + memset(pRet, 0, nByte); + pRet->dbIn = db; + pRet->zDb = (char*)&pRet[1]; + pRet->zUri = &pRet->zDb[nDb+1]; + memcpy(pRet->zDb, zDb, nDb); + if( nUri>0 && zUri ) memcpy(pRet->zUri, zUri, nUri); + pRet->xSql = xSql; + pRet->pSqlCtx = pSqlCtx; + pRet->bRecoverRowid = RECOVER_ROWID_DEFAULT; + } + + return pRet; +} + +/* +** Initialize a recovery handle that creates a new database containing +** the recovered data. +*/ +sqlite3_recover *sqlite3_recover_init( + sqlite3* db, + const char *zDb, + const char *zUri +){ + return recoverInit(db, zDb, zUri, 0, 0); +} + +/* +** Initialize a recovery handle that returns recovered data in the +** form of SQL statements via a callback. +*/ +sqlite3_recover *sqlite3_recover_init_sql( + sqlite3* db, + const char *zDb, + int (*xSql)(void*, const char*), + void *pSqlCtx +){ + return recoverInit(db, zDb, 0, xSql, pSqlCtx); +} + +/* +** Return the handle error message, if any. +*/ +const char *sqlite3_recover_errmsg(sqlite3_recover *p){ + return (p && p->errCode!=SQLITE_NOMEM) ? p->zErrMsg : "out of memory"; +} + +/* +** Return the handle error code. +*/ +int sqlite3_recover_errcode(sqlite3_recover *p){ + return p ? p->errCode : SQLITE_NOMEM; +} + +/* +** Configure the handle. +*/ +int sqlite3_recover_config(sqlite3_recover *p, int op, void *pArg){ + int rc = SQLITE_OK; + if( p==0 ){ + rc = SQLITE_NOMEM; + }else if( p->eState!=RECOVER_STATE_INIT ){ + rc = SQLITE_MISUSE; + }else{ + switch( op ){ + case 789: + /* This undocumented magic configuration option is used to set the + ** name of the auxiliary database that is ATTACH-ed to the database + ** connection and used to hold state information during the + ** recovery process. This option is for debugging use only and + ** is subject to change or removal at any time. */ + sqlite3_free(p->zStateDb); + p->zStateDb = recoverMPrintf(p, "%s", (char*)pArg); + break; + + case SQLITE_RECOVER_LOST_AND_FOUND: { + const char *zArg = (const char*)pArg; + sqlite3_free(p->zLostAndFound); + if( zArg ){ + p->zLostAndFound = recoverMPrintf(p, "%s", zArg); + }else{ + p->zLostAndFound = 0; + } + break; + } + + case SQLITE_RECOVER_FREELIST_CORRUPT: + p->bFreelistCorrupt = *(int*)pArg; + break; + + case SQLITE_RECOVER_ROWIDS: + p->bRecoverRowid = *(int*)pArg; + break; + + case SQLITE_RECOVER_SLOWINDEXES: + p->bSlowIndexes = *(int*)pArg; + break; + + default: + rc = SQLITE_NOTFOUND; + break; + } + } + + return rc; +} + +/* +** Do a unit of work towards the recovery job. Return SQLITE_OK if +** no error has occurred but database recovery is not finished, SQLITE_DONE +** if database recovery has been successfully completed, or an SQLite +** error code if an error has occurred. +*/ +int sqlite3_recover_step(sqlite3_recover *p){ + if( p==0 ) return SQLITE_NOMEM; + if( p->errCode==SQLITE_OK ) recoverStep(p); + if( p->eState==RECOVER_STATE_DONE && p->errCode==SQLITE_OK ){ + return SQLITE_DONE; + } + return p->errCode; +} + +/* +** Do the configured recovery operation. Return SQLITE_OK if successful, or +** else an SQLite error code. +*/ +int sqlite3_recover_run(sqlite3_recover *p){ + while( SQLITE_OK==sqlite3_recover_step(p) ); + return sqlite3_recover_errcode(p); +} + + +/* +** Free all resources associated with the recover handle passed as the only +** argument. The results of using a handle with any sqlite3_recover_** +** API function after it has been passed to this function are undefined. +** +** A copy of the value returned by the first call made to sqlite3_recover_run() +** on this handle is returned, or SQLITE_OK if sqlite3_recover_run() has +** not been called on this handle. +*/ +int sqlite3_recover_finish(sqlite3_recover *p){ + int rc; + if( p==0 ){ + rc = SQLITE_NOMEM; + }else{ + recoverFinalCleanup(p); + if( p->bCloseTransaction && sqlite3_get_autocommit(p->dbIn)==0 ){ + rc = sqlite3_exec(p->dbIn, "END", 0, 0, 0); + if( p->errCode==SQLITE_OK ) p->errCode = rc; + } + rc = p->errCode; + sqlite3_free(p->zErrMsg); + sqlite3_free(p->zStateDb); + sqlite3_free(p->zLostAndFound); + sqlite3_free(p->pPage1Cache); + sqlite3_free(p); + } + return rc; +} + +#endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */ +#pragma GCC diagnostic pop diff --git a/database/sqlite/sqlite3recover.h b/database/sqlite/sqlite3recover.h new file mode 100644 index 00000000000000..7a1cd1cd878f26 --- /dev/null +++ b/database/sqlite/sqlite3recover.h @@ -0,0 +1,249 @@ +/* +** 2022-08-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the public interface to the "recover" extension - +** an SQLite extension designed to recover data from corrupted database +** files. +*/ + +/* +** OVERVIEW: +** +** To use the API to recover data from a corrupted database, an +** application: +** +** 1) Creates an sqlite3_recover handle by calling either +** sqlite3_recover_init() or sqlite3_recover_init_sql(). +** +** 2) Configures the new handle using one or more calls to +** sqlite3_recover_config(). +** +** 3) Executes the recovery by repeatedly calling sqlite3_recover_step() on +** the handle until it returns something other than SQLITE_OK. If it +** returns SQLITE_DONE, then the recovery operation completed without +** error. If it returns some other non-SQLITE_OK value, then an error +** has occurred. +** +** 4) Retrieves any error code and English language error message using the +** sqlite3_recover_errcode() and sqlite3_recover_errmsg() APIs, +** respectively. +** +** 5) Destroys the sqlite3_recover handle and frees all resources +** using sqlite3_recover_finish(). +** +** The application may abandon the recovery operation at any point +** before it is finished by passing the sqlite3_recover handle to +** sqlite3_recover_finish(). This is not an error, but the final state +** of the output database, or the results of running the partial script +** delivered to the SQL callback, are undefined. +*/ + +#ifndef _SQLITE_RECOVER_H +#define _SQLITE_RECOVER_H + +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** An instance of the sqlite3_recover object represents a recovery +** operation in progress. +** +** Constructors: +** +** sqlite3_recover_init() +** sqlite3_recover_init_sql() +** +** Destructor: +** +** sqlite3_recover_finish() +** +** Methods: +** +** sqlite3_recover_config() +** sqlite3_recover_errcode() +** sqlite3_recover_errmsg() +** sqlite3_recover_run() +** sqlite3_recover_step() +*/ +typedef struct sqlite3_recover sqlite3_recover; + +/* +** These two APIs attempt to create and return a new sqlite3_recover object. +** In both cases the first two arguments identify the (possibly +** corrupt) database to recover data from. The first argument is an open +** database handle and the second the name of a database attached to that +** handle (i.e. "main", "temp" or the name of an attached database). +** +** If sqlite3_recover_init() is used to create the new sqlite3_recover +** handle, then data is recovered into a new database, identified by +** string parameter zUri. zUri may be an absolute or relative file path, +** or may be an SQLite URI. If the identified database file already exists, +** it is overwritten. +** +** If sqlite3_recover_init_sql() is invoked, then any recovered data will +** be returned to the user as a series of SQL statements. Executing these +** SQL statements results in the same database as would have been created +** had sqlite3_recover_init() been used. For each SQL statement in the +** output, the callback function passed as the third argument (xSql) is +** invoked once. The first parameter is a passed a copy of the fourth argument +** to this function (pCtx) as its first parameter, and a pointer to a +** nul-terminated buffer containing the SQL statement formated as UTF-8 as +** the second. If the xSql callback returns any value other than SQLITE_OK, +** then processing is immediately abandoned and the value returned used as +** the recover handle error code (see below). +** +** If an out-of-memory error occurs, NULL may be returned instead of +** a valid handle. In all other cases, it is the responsibility of the +** application to avoid resource leaks by ensuring that +** sqlite3_recover_finish() is called on all allocated handles. +*/ +sqlite3_recover *sqlite3_recover_init( + sqlite3* db, + const char *zDb, + const char *zUri +); +sqlite3_recover *sqlite3_recover_init_sql( + sqlite3* db, + const char *zDb, + int (*xSql)(void*, const char*), + void *pCtx +); + +/* +** Configure an sqlite3_recover object that has just been created using +** sqlite3_recover_init() or sqlite3_recover_init_sql(). This function +** may only be called before the first call to sqlite3_recover_step() +** or sqlite3_recover_run() on the object. +** +** The second argument passed to this function must be one of the +** SQLITE_RECOVER_* symbols defined below. Valid values for the third argument +** depend on the specific SQLITE_RECOVER_* symbol in use. +** +** SQLITE_OK is returned if the configuration operation was successful, +** or an SQLite error code otherwise. +*/ +int sqlite3_recover_config(sqlite3_recover*, int op, void *pArg); + +/* +** SQLITE_RECOVER_LOST_AND_FOUND: +** The pArg argument points to a string buffer containing the name +** of a "lost-and-found" table in the output database, or NULL. If +** the argument is non-NULL and the database contains seemingly +** valid pages that cannot be associated with any table in the +** recovered part of the schema, data is extracted from these +** pages to add to the lost-and-found table. +** +** SQLITE_RECOVER_FREELIST_CORRUPT: +** The pArg value must actually be a pointer to a value of type +** int containing value 0 or 1 cast as a (void*). If this option is set +** (argument is 1) and a lost-and-found table has been configured using +** SQLITE_RECOVER_LOST_AND_FOUND, then is assumed that the freelist is +** corrupt and an attempt is made to recover records from pages that +** appear to be linked into the freelist. Otherwise, pages on the freelist +** are ignored. Setting this option can recover more data from the +** database, but often ends up "recovering" deleted records. The default +** value is 0 (clear). +** +** SQLITE_RECOVER_ROWIDS: +** The pArg value must actually be a pointer to a value of type +** int containing value 0 or 1 cast as a (void*). If this option is set +** (argument is 1), then an attempt is made to recover rowid values +** that are not also INTEGER PRIMARY KEY values. If this option is +** clear, then new rowids are assigned to all recovered rows. The +** default value is 1 (set). +** +** SQLITE_RECOVER_SLOWINDEXES: +** The pArg value must actually be a pointer to a value of type +** int containing value 0 or 1 cast as a (void*). If this option is clear +** (argument is 0), then when creating an output database, the recover +** module creates and populates non-UNIQUE indexes right at the end of the +** recovery operation - after all recoverable data has been inserted +** into the new database. This is faster overall, but means that the +** final call to sqlite3_recover_step() for a recovery operation may +** be need to create a large number of indexes, which may be very slow. +** +** Or, if this option is set (argument is 1), then non-UNIQUE indexes +** are created in the output database before it is populated with +** recovered data. This is slower overall, but avoids the slow call +** to sqlite3_recover_step() at the end of the recovery operation. +** +** The default option value is 0. +*/ +#define SQLITE_RECOVER_LOST_AND_FOUND 1 +#define SQLITE_RECOVER_FREELIST_CORRUPT 2 +#define SQLITE_RECOVER_ROWIDS 3 +#define SQLITE_RECOVER_SLOWINDEXES 4 + +/* +** Perform a unit of work towards the recovery operation. This function +** must normally be called multiple times to complete database recovery. +** +** If no error occurs but the recovery operation is not completed, this +** function returns SQLITE_OK. If recovery has been completed successfully +** then SQLITE_DONE is returned. If an error has occurred, then an SQLite +** error code (e.g. SQLITE_IOERR or SQLITE_NOMEM) is returned. It is not +** considered an error if some or all of the data cannot be recovered +** due to database corruption. +** +** Once sqlite3_recover_step() has returned a value other than SQLITE_OK, +** all further such calls on the same recover handle are no-ops that return +** the same non-SQLITE_OK value. +*/ +int sqlite3_recover_step(sqlite3_recover*); + +/* +** Run the recovery operation to completion. Return SQLITE_OK if successful, +** or an SQLite error code otherwise. Calling this function is the same +** as executing: +** +** while( SQLITE_OK==sqlite3_recover_step(p) ); +** return sqlite3_recover_errcode(p); +*/ +int sqlite3_recover_run(sqlite3_recover*); + +/* +** If an error has been encountered during a prior call to +** sqlite3_recover_step(), then this function attempts to return a +** pointer to a buffer containing an English language explanation of +** the error. If no error message is available, or if an out-of memory +** error occurs while attempting to allocate a buffer in which to format +** the error message, NULL is returned. +** +** The returned buffer remains valid until the sqlite3_recover handle is +** destroyed using sqlite3_recover_finish(). +*/ +const char *sqlite3_recover_errmsg(sqlite3_recover*); + +/* +** If this function is called on an sqlite3_recover handle after +** an error occurs, an SQLite error code is returned. Otherwise, SQLITE_OK. +*/ +int sqlite3_recover_errcode(sqlite3_recover*); + +/* +** Clean up a recovery object created by a call to sqlite3_recover_init(). +** The results of using a recovery object with any API after it has been +** passed to this function are undefined. +** +** This function returns the same value as sqlite3_recover_errcode(). +*/ +int sqlite3_recover_finish(sqlite3_recover*); + + +#ifdef __cplusplus +} /* end of the 'extern "C"' block */ +#endif + +#endif /* ifndef _SQLITE_RECOVER_H */ diff --git a/database/sqlite/sqlite_aclk.c b/database/sqlite/sqlite_aclk.c index fedce50ebaa649..ac574879ccde22 100644 --- a/database/sqlite/sqlite_aclk.c +++ b/database/sqlite/sqlite_aclk.c @@ -11,63 +11,47 @@ struct aclk_sync_config_s { uv_timer_t timer_req; time_t cleanup_after; // Start a cleanup after this timestamp uv_async_t async; - /* FIFO command queue */ - uv_mutex_t cmd_mutex; - uv_cond_t cmd_cond; bool initialized; - volatile unsigned queue_size; - struct aclk_database_cmdqueue cmd_queue; + SPINLOCK cmd_queue_lock; + struct aclk_database_cmd *cmd_base; } aclk_sync_config = { 0 }; - void sanity_check(void) { // make sure the compiler will stop on misconfigurations BUILD_BUG_ON(WORKER_UTILIZATION_MAX_JOB_TYPES < ACLK_MAX_ENUMERATIONS_DEFINED); } - -int aclk_database_enq_cmd_noblock(struct aclk_database_cmd *cmd) +static struct aclk_database_cmd aclk_database_deq_cmd(void) { - unsigned queue_size; + struct aclk_database_cmd ret; - /* wait for free space in queue */ - uv_mutex_lock(&aclk_sync_config.cmd_mutex); - if ((queue_size = aclk_sync_config.queue_size) == ACLK_DATABASE_CMD_Q_MAX_SIZE) { - uv_mutex_unlock(&aclk_sync_config.cmd_mutex); - return 1; + spinlock_lock(&aclk_sync_config.cmd_queue_lock); + if(aclk_sync_config.cmd_base) { + struct aclk_database_cmd *t = aclk_sync_config.cmd_base; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(aclk_sync_config.cmd_base, t, prev, next); + ret = *t; + freez(t); } + else { + ret.opcode = ACLK_DATABASE_NOOP; + ret.completion = NULL; + } + spinlock_unlock(&aclk_sync_config.cmd_queue_lock); - fatal_assert(queue_size < ACLK_DATABASE_CMD_Q_MAX_SIZE); - /* enqueue command */ - aclk_sync_config.cmd_queue.cmd_array[aclk_sync_config.cmd_queue.tail] = *cmd; - aclk_sync_config.cmd_queue.tail = aclk_sync_config.cmd_queue.tail != ACLK_DATABASE_CMD_Q_MAX_SIZE - 1 ? - aclk_sync_config.cmd_queue.tail + 1 : 0; - aclk_sync_config.queue_size = queue_size + 1; - uv_mutex_unlock(&aclk_sync_config.cmd_mutex); - return 0; + return ret; } static void aclk_database_enq_cmd(struct aclk_database_cmd *cmd) { - unsigned queue_size; + struct aclk_database_cmd *t = mallocz(sizeof(*t)); + *t = *cmd; + t->prev = t->next = NULL; - /* wait for free space in queue */ - uv_mutex_lock(&aclk_sync_config.cmd_mutex); - while ((queue_size = aclk_sync_config.queue_size) == ACLK_DATABASE_CMD_Q_MAX_SIZE) { - uv_cond_wait(&aclk_sync_config.cmd_cond, &aclk_sync_config.cmd_mutex); - } - fatal_assert(queue_size < ACLK_DATABASE_CMD_Q_MAX_SIZE); - /* enqueue command */ - aclk_sync_config.cmd_queue.cmd_array[aclk_sync_config.cmd_queue.tail] = *cmd; - aclk_sync_config.cmd_queue.tail = aclk_sync_config.cmd_queue.tail != ACLK_DATABASE_CMD_Q_MAX_SIZE - 1 ? - aclk_sync_config.cmd_queue.tail + 1 : 0; - aclk_sync_config.queue_size = queue_size + 1; - uv_mutex_unlock(&aclk_sync_config.cmd_mutex); - - /* wake up event loop */ - int rc = uv_async_send(&aclk_sync_config.async); - if (unlikely(rc)) - netdata_log_debug(D_ACLK_SYNC, "Failed to wake up event loop"); + spinlock_lock(&aclk_sync_config.cmd_queue_lock); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(aclk_sync_config.cmd_base, t, prev, next); + spinlock_unlock(&aclk_sync_config.cmd_queue_lock); + + (void) uv_async_send(&aclk_sync_config.async); } enum { @@ -86,6 +70,8 @@ enum { IDX_PROGRAM_VERSION, IDX_ENTRIES, IDX_HEALTH_ENABLED, + IDX_LAST_CONNECTED, + IDX_IS_EPHEMERAL, }; static int create_host_callback(void *data, int argc, char **argv, char **column) @@ -94,9 +80,31 @@ static int create_host_callback(void *data, int argc, char **argv, char **column UNUSED(argc); UNUSED(column); + time_t last_connected = + (time_t)(argv[IDX_LAST_CONNECTED] ? str2uint64_t(argv[IDX_LAST_CONNECTED], NULL) : 0); + + if (!last_connected) + last_connected = now_realtime_sec(); + + time_t age = now_realtime_sec() - last_connected; + int is_ephemeral = 0; + + if (argv[IDX_IS_EPHEMERAL]) + is_ephemeral = str2i(argv[IDX_IS_EPHEMERAL]); + char guid[UUID_STR_LEN]; uuid_unparse_lower(*(uuid_t *)argv[IDX_HOST_ID], guid); + if (is_ephemeral && age > rrdhost_free_ephemeral_time_s) { + netdata_log_info( + "Skipping ephemeral hostname \"%s\" with GUID \"%s\", age = %ld seconds (limit %ld seconds)", + (const char *)argv[IDX_HOSTNAME], + guid, + age, + rrdhost_free_ephemeral_time_s); + return 0; + } + struct rrdhost_system_info *system_info = callocz(1, sizeof(struct rrdhost_system_info)); __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(struct rrdhost_system_info), __ATOMIC_RELAXED); @@ -105,32 +113,48 @@ static int create_host_callback(void *data, int argc, char **argv, char **column sql_build_host_system_info((uuid_t *)argv[IDX_HOST_ID], system_info); RRDHOST *host = rrdhost_find_or_create( - (const char *) argv[IDX_HOSTNAME] - , (const char *) argv[IDX_REGISTRY] - , guid - , (const char *) argv[IDX_OS] - , (const char *) argv[IDX_TIMEZONE] - , (const char *) argv[IDX_ABBREV_TIMEZONE] - , (int32_t) (argv[IDX_UTC_OFFSET] ? str2uint32_t(argv[IDX_UTC_OFFSET], NULL) : 0) - , (const char *) argv[IDX_TAGS] - , (const char *) (argv[IDX_PROGRAM_NAME] ? argv[IDX_PROGRAM_NAME] : "unknown") - , (const char *) (argv[IDX_PROGRAM_VERSION] ? argv[IDX_PROGRAM_VERSION] : "unknown") - , argv[IDX_UPDATE_EVERY] ? str2i(argv[IDX_UPDATE_EVERY]) : 1 - , argv[IDX_ENTRIES] ? str2i(argv[IDX_ENTRIES]) : 0 - , default_rrd_memory_mode - , 0 // health - , 0 // rrdpush enabled - , NULL //destination - , NULL // api key - , NULL // send charts matching - , false // rrdpush_enable_replication - , 0 // rrdpush_seconds_to_replicate - , 0 // rrdpush_replication_step - , system_info - , 1 - ); - if (likely(host)) + (const char *)argv[IDX_HOSTNAME], + (const char *)argv[IDX_REGISTRY], + guid, + (const char *)argv[IDX_OS], + (const char *)argv[IDX_TIMEZONE], + (const char *)argv[IDX_ABBREV_TIMEZONE], + (int32_t)(argv[IDX_UTC_OFFSET] ? str2uint32_t(argv[IDX_UTC_OFFSET], NULL) : 0), + (const char *)argv[IDX_TAGS], + (const char *)(argv[IDX_PROGRAM_NAME] ? argv[IDX_PROGRAM_NAME] : "unknown"), + (const char *)(argv[IDX_PROGRAM_VERSION] ? argv[IDX_PROGRAM_VERSION] : "unknown"), + argv[IDX_UPDATE_EVERY] ? str2i(argv[IDX_UPDATE_EVERY]) : 1, + argv[IDX_ENTRIES] ? str2i(argv[IDX_ENTRIES]) : 0, + default_rrd_memory_mode, + 0 // health + , + 0 // rrdpush enabled + , + NULL //destination + , + NULL // api key + , + NULL // send charts matching + , + false // rrdpush_enable_replication + , + 0 // rrdpush_seconds_to_replicate + , + 0 // rrdpush_replication_step + , + system_info, + 1); + + if (likely(host)) { + if (is_ephemeral) + rrdhost_option_set(host, RRDHOST_OPTION_EPHEMERAL_HOST); + + if (is_ephemeral) + host->child_disconnected_time = now_realtime_sec(); + host->rrdlabels = sql_load_host_labels((uuid_t *)argv[IDX_HOST_ID]); + host->last_connected = last_connected; + } (*number_of_chidren)++; @@ -138,43 +162,14 @@ static int create_host_callback(void *data, int argc, char **argv, char **column char node_str[UUID_STR_LEN] = ""; if (likely(host->node_id)) uuid_unparse_lower(*host->node_id, node_str); - internal_error(true, "Adding archived host \"%s\" with GUID \"%s\" node id = \"%s\"", rrdhost_hostname(host), host->machine_guid, node_str); + internal_error(true, "Adding archived host \"%s\" with GUID \"%s\" node id = \"%s\" ephemeral=%d", rrdhost_hostname(host), host->machine_guid, node_str, is_ephemeral); #endif return 0; } #ifdef ENABLE_ACLK -static struct aclk_database_cmd aclk_database_deq_cmd(void) -{ - struct aclk_database_cmd ret; - unsigned queue_size; - - uv_mutex_lock(&aclk_sync_config.cmd_mutex); - queue_size = aclk_sync_config.queue_size; - if (queue_size == 0) { - memset(&ret, 0, sizeof(ret)); - ret.opcode = ACLK_DATABASE_NOOP; - ret.completion = NULL; - - } else { - /* dequeue command */ - ret = aclk_sync_config.cmd_queue.cmd_array[aclk_sync_config.cmd_queue.head]; - if (queue_size == 1) { - aclk_sync_config.cmd_queue.head = aclk_sync_config.cmd_queue.tail = 0; - } else { - aclk_sync_config.cmd_queue.head = aclk_sync_config.cmd_queue.head != ACLK_DATABASE_CMD_Q_MAX_SIZE - 1 ? - aclk_sync_config.cmd_queue.head + 1 : 0; - } - aclk_sync_config.queue_size = queue_size - 1; - /* wake up producers */ - uv_cond_signal(&aclk_sync_config.cmd_cond); - } - uv_mutex_unlock(&aclk_sync_config.cmd_mutex); - return ret; -} - -#define SQL_SELECT_HOST_BY_UUID "SELECT host_id FROM host WHERE host_id = @host_id;" +#define SQL_SELECT_HOST_BY_UUID "SELECT host_id FROM host WHERE host_id = @host_id" static int is_host_available(uuid_t *host_id) { sqlite3_stmt *res = NULL; @@ -226,20 +221,14 @@ static void sql_delete_aclk_table_list(char *host_guid) uuid_unparse_lower(host_uuid, host_str); uuid_unparse_lower_fix(&host_uuid, uuid_str); - netdata_log_debug(D_ACLK_SYNC, "Checking if I should delete aclk tables for node %s", host_str); - - if (is_host_available(&host_uuid)) { - netdata_log_debug(D_ACLK_SYNC, "Host %s exists, not deleting aclk sync tables", host_str); + if (is_host_available(&host_uuid)) return; - } - - netdata_log_debug(D_ACLK_SYNC, "Host %s does NOT exist, can delete aclk sync tables", host_str); sqlite3_stmt *res = NULL; BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE, &netdata_buffers_statistics.buffers_sqlite); buffer_sprintf(sql,"SELECT 'drop '||type||' IF EXISTS '||name||';' FROM sqlite_schema " \ - "WHERE name LIKE 'aclk_%%_%s' AND type IN ('table', 'trigger', 'index');", uuid_str); + "WHERE name LIKE 'aclk_%%_%s' AND type IN ('table', 'trigger', 'index')", uuid_str); rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); if (rc != SQLITE_OK) { @@ -263,24 +252,67 @@ static void sql_delete_aclk_table_list(char *host_guid) buffer_free(sql); } +// OPCODE: ACLK_DATABASE_NODE_UNREGISTER +static void sql_unregister_node(char *machine_guid) +{ + int rc; + uuid_t host_uuid; + + if (unlikely(!machine_guid)) + return; + + rc = uuid_parse(machine_guid, host_uuid); + if (rc) { + freez(machine_guid); + return; + } + + sqlite3_stmt *res = NULL; + + rc = sqlite3_prepare_v2(db_meta, "UPDATE node_instance SET node_id = NULL WHERE host_id = @host_id", -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to remove the host node id"); + freez(machine_guid); + return; + } + + rc = sqlite3_bind_blob(res, 1, &host_uuid, sizeof(host_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host_id parameter to remove host node id"); + goto skip; + } + rc = sqlite3_step_monitored(res); + if (unlikely(rc != SQLITE_DONE)) { + error_report("Failed to execute command to remove host node id"); + } else { + // node: machine guid will be freed after processing + metadata_delete_host_chart_labels(machine_guid); + machine_guid = NULL; + } + +skip: + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("Failed to finalize statement to remove host node id"); + freez(machine_guid); +} + + static int sql_check_aclk_table(void *data __maybe_unused, int argc __maybe_unused, char **argv __maybe_unused, char **column __maybe_unused) { - netdata_log_debug(D_ACLK_SYNC,"Scheduling aclk sync table check for node %s", (char *) argv[0]); struct aclk_database_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = ACLK_DATABASE_DELETE_HOST; cmd.param[0] = strdupz((char *) argv[0]); - aclk_database_enq_cmd_noblock(&cmd); + aclk_database_enq_cmd(&cmd); return 0; } #define SQL_SELECT_ACLK_ACTIVE_LIST "SELECT REPLACE(SUBSTR(name,19),'_','-') FROM sqlite_schema " \ - "WHERE name LIKE 'aclk_chart_latest_%' AND type IN ('table');" + "WHERE name LIKE 'aclk_chart_latest_%' AND type IN ('table')" static void sql_check_aclk_table_list(void) { char *err_msg = NULL; - netdata_log_debug(D_ACLK_SYNC,"Cleaning tables for nodes that do not exist"); int rc = sqlite3_exec_monitored(db_meta, SQL_SELECT_ACLK_ACTIVE_LIST, sql_check_aclk_table, NULL, &err_msg); if (rc != SQLITE_OK) { error_report("Query failed when trying to check for obsolete ACLK sync tables, %s", err_msg); @@ -288,24 +320,22 @@ static void sql_check_aclk_table_list(void) } } -#define SQL_ALERT_CLEANUP "DELETE FROM aclk_alert_%s WHERE date_submitted IS NOT NULL AND CAST(date_cloud_ack AS INT) < unixepoch()-%d;" +#define SQL_ALERT_CLEANUP "DELETE FROM aclk_alert_%s WHERE date_submitted IS NOT NULL AND CAST(date_cloud_ack AS INT) < unixepoch()-%d" static int sql_maint_aclk_sync_database(void *data __maybe_unused, int argc __maybe_unused, char **argv, char **column __maybe_unused) { - char sql[512]; - snprintfz(sql,511, SQL_ALERT_CLEANUP, (char *) argv[0], ACLK_DELETE_ACK_ALERTS_INTERNAL); + char sql[ACLK_SYNC_QUERY_SIZE]; + snprintfz(sql,sizeof(sql) - 1, SQL_ALERT_CLEANUP, (char *) argv[0], ACLK_DELETE_ACK_ALERTS_INTERNAL); if (unlikely(db_execute(db_meta, sql))) error_report("Failed to clean stale ACLK alert entries"); return 0; } - -#define SQL_SELECT_ACLK_ALERT_LIST "SELECT SUBSTR(name,12) FROM sqlite_schema WHERE name LIKE 'aclk_alert_%' AND type IN ('table');" +#define SQL_SELECT_ACLK_ALERT_LIST "SELECT SUBSTR(name,12) FROM sqlite_schema WHERE name LIKE 'aclk_alert_%' AND type IN ('table')" static void sql_maint_aclk_sync_database_all(void) { char *err_msg = NULL; - netdata_log_debug(D_ACLK_SYNC,"Cleaning tables for nodes that do not exist"); int rc = sqlite3_exec_monitored(db_meta, SQL_SELECT_ACLK_ALERT_LIST, sql_maint_aclk_sync_database, NULL, &err_msg); if (rc != SQLITE_OK) { error_report("Query failed when trying to check for obsolete ACLK sync tables, %s", err_msg); @@ -315,7 +345,7 @@ static void sql_maint_aclk_sync_database_all(void) static int aclk_config_parameters(void *data __maybe_unused, int argc __maybe_unused, char **argv, char **column __maybe_unused) { - char uuid_str[GUID_LEN + 1]; + char uuid_str[UUID_STR_LEN]; uuid_unparse_lower(*((uuid_t *) argv[0]), uuid_str); RRDHOST *host = rrdhost_find_by_guid(uuid_str); @@ -343,18 +373,15 @@ static void timer_cb(uv_timer_t *handle) struct aclk_database_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - time_t now = now_realtime_sec(); - - if (config->cleanup_after && config->cleanup_after < now) { + if (config->cleanup_after < now_realtime_sec()) { cmd.opcode = ACLK_DATABASE_CLEANUP; - if (!aclk_database_enq_cmd_noblock(&cmd)) - config->cleanup_after += ACLK_DATABASE_CLEANUP_INTERVAL; + aclk_database_enq_cmd(&cmd); + config->cleanup_after += ACLK_DATABASE_CLEANUP_INTERVAL; } if (aclk_connected) { cmd.opcode = ACLK_DATABASE_PUSH_ALERT; - aclk_database_enq_cmd_noblock(&cmd); - + aclk_database_enq_cmd(&cmd); aclk_check_node_info_and_collectors(); } } @@ -425,12 +452,16 @@ static void aclk_synchronization(void *arg __maybe_unused) case ACLK_DATABASE_NODE_STATE:; RRDHOST *host = cmd.param[0]; int live = (host == localhost || host->receiver || !(rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) ? 1 : 0; - struct aclk_sync_host_config *ahc = host->aclk_sync_host_config; + struct aclk_sync_cfg_t *ahc = host->aclk_config; if (unlikely(!ahc)) sql_create_aclk_table(host, &host->host_uuid, host->node_id); - aclk_host_state_update(host, live); + aclk_host_state_update(host, live, 1); + break; + case ACLK_DATABASE_NODE_UNREGISTER: + sql_unregister_node(cmd.param[0]); + break; -// ALERTS + // ALERTS case ACLK_DATABASE_PUSH_ALERT_CONFIG: aclk_push_alert_config_event(cmd.param[0], cmd.param[1]); break; @@ -444,7 +475,6 @@ static void aclk_synchronization(void *arg __maybe_unused) sql_process_queue_removed_alerts_to_aclk(cmd.param[0]); break; default: - netdata_log_debug(D_ACLK_SYNC, "%s: default.", __func__); break; } if (cmd.completion) @@ -456,8 +486,6 @@ static void aclk_synchronization(void *arg __maybe_unused) uv_close((uv_handle_t *)&config->timer_req, NULL); uv_close((uv_handle_t *)&config->async, NULL); -// uv_close((uv_handle_t *)&config->async_exit, NULL); - uv_cond_destroy(&config->cmd_cond); (void) uv_loop_close(loop); worker_unregister(); @@ -467,11 +495,7 @@ static void aclk_synchronization(void *arg __maybe_unused) static void aclk_synchronization_init(void) { - aclk_sync_config.cmd_queue.head = aclk_sync_config.cmd_queue.tail = 0; - aclk_sync_config.queue_size = 0; - fatal_assert(0 == uv_cond_init(&aclk_sync_config.cmd_cond)); - fatal_assert(0 == uv_mutex_init(&aclk_sync_config.cmd_mutex)); - + memset(&aclk_sync_config, 0, sizeof(aclk_sync_config)); fatal_assert(0 == uv_thread_create(&aclk_sync_config.thread, aclk_synchronization, &aclk_sync_config)); } #endif @@ -481,8 +505,8 @@ static void aclk_synchronization_init(void) void sql_create_aclk_table(RRDHOST *host __maybe_unused, uuid_t *host_uuid __maybe_unused, uuid_t *node_id __maybe_unused) { #ifdef ENABLE_ACLK - char uuid_str[GUID_LEN + 1]; - char host_guid[GUID_LEN + 1]; + char uuid_str[UUID_STR_LEN]; + char host_guid[UUID_STR_LEN]; int rc; uuid_unparse_lower_fix(host_uuid, uuid_str); @@ -490,27 +514,34 @@ void sql_create_aclk_table(RRDHOST *host __maybe_unused, uuid_t *host_uuid __may char sql[ACLK_SYNC_QUERY_SIZE]; - snprintfz(sql, ACLK_SYNC_QUERY_SIZE-1, TABLE_ACLK_ALERT, uuid_str); + snprintfz(sql, sizeof(sql) - 1, TABLE_ACLK_ALERT, uuid_str); rc = db_execute(db_meta, sql); if (unlikely(rc)) error_report("Failed to create ACLK alert table for host %s", host ? rrdhost_hostname(host) : host_guid); else { - snprintfz(sql, ACLK_SYNC_QUERY_SIZE -1, INDEX_ACLK_ALERT, uuid_str, uuid_str); + snprintfz(sql, sizeof(sql) - 1, INDEX_ACLK_ALERT1, uuid_str, uuid_str); + rc = db_execute(db_meta, sql); + if (unlikely(rc)) + error_report( + "Failed to create ACLK alert table index 1 for host %s", host ? string2str(host->hostname) : host_guid); + + snprintfz(sql, sizeof(sql) - 1, INDEX_ACLK_ALERT2, uuid_str, uuid_str); rc = db_execute(db_meta, sql); if (unlikely(rc)) - error_report("Failed to create ACLK alert table index for host %s", host ? string2str(host->hostname) : host_guid); + error_report( + "Failed to create ACLK alert table index 2 for host %s", host ? string2str(host->hostname) : host_guid); } - if (likely(host) && unlikely(host->aclk_sync_host_config)) + if (likely(host) && unlikely(host->aclk_config)) return; if (unlikely(!host)) return; - struct aclk_sync_host_config *wc = callocz(1, sizeof(struct aclk_sync_host_config)); + struct aclk_sync_cfg_t *wc = callocz(1, sizeof(struct aclk_sync_cfg_t)); if (node_id && !uuid_is_null(*node_id)) uuid_unparse_lower(*node_id, wc->node_id); - host->aclk_sync_host_config = (void *)wc; + host->aclk_config = wc; if (node_id && !host->node_id) { host->node_id = mallocz(sizeof(*host->node_id)); uuid_copy(*host->node_id, *node_id); @@ -524,12 +555,18 @@ void sql_create_aclk_table(RRDHOST *host __maybe_unused, uuid_t *host_uuid __may #endif } -#define SQL_FETCH_ALL_HOSTS "SELECT host_id, hostname, registry_hostname, update_every, os, " \ - "timezone, tags, hops, memory_mode, abbrev_timezone, utc_offset, program_name, " \ - "program_version, entries, health_enabled FROM host WHERE hops >0;" +#define SQL_FETCH_ALL_HOSTS \ + "SELECT host_id, hostname, registry_hostname, update_every, os, " \ + "timezone, tags, hops, memory_mode, abbrev_timezone, utc_offset, program_name, " \ + "program_version, entries, health_enabled, last_connected, " \ + "(SELECT CASE WHEN hl.label_value = 'true' THEN 1 ELSE 0 END FROM " \ + "host_label hl WHERE hl.host_id = h.host_id AND hl.label_key = '_is_ephemeral') " \ + "FROM host h WHERE hops > 0" + +#define SQL_FETCH_ALL_INSTANCES \ + "SELECT ni.host_id, ni.node_id FROM host h, node_instance ni " \ + "WHERE h.host_id = ni.host_id AND ni.node_id IS NOT NULL" -#define SQL_FETCH_ALL_INSTANCES "SELECT ni.host_id, ni.node_id FROM host h, node_instance ni " \ - "WHERE h.host_id = ni.host_id AND ni.node_id IS NOT NULL; " void sql_aclk_sync_init(void) { char *err_msg = NULL; @@ -624,3 +661,18 @@ void schedule_node_info_update(RRDHOST *host __maybe_unused) aclk_database_enq_cmd(&cmd); #endif } + +#ifdef ENABLE_ACLK +void unregister_node(const char *machine_guid) +{ + if (unlikely(!machine_guid)) + return; + + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_NODE_UNREGISTER; + cmd.param[0] = strdupz(machine_guid); + cmd.completion = NULL; + aclk_database_enq_cmd(&cmd); +} +#endif \ No newline at end of file diff --git a/database/sqlite/sqlite_aclk.h b/database/sqlite/sqlite_aclk.h index 705102d741f600..0db2647bf0689a 100644 --- a/database/sqlite/sqlite_aclk.h +++ b/database/sqlite/sqlite_aclk.h @@ -5,14 +5,13 @@ #include "sqlite3.h" - #ifndef ACLK_MAX_CHART_BATCH #define ACLK_MAX_CHART_BATCH (200) #endif #ifndef ACLK_MAX_CHART_BATCH_COUNT #define ACLK_MAX_CHART_BATCH_COUNT (10) #endif -#define ACLK_MAX_ALERT_UPDATES (5) +#define ACLK_MAX_ALERT_UPDATES "5" #define ACLK_DATABASE_CLEANUP_FIRST (1200) #define ACLK_DATABASE_CLEANUP_INTERVAL (3600) #define ACLK_DELETE_ACK_ALERTS_INTERNAL (86400) @@ -41,11 +40,14 @@ static inline int claimed() return localhost->aclk_state.claimed_id != NULL; } -#define TABLE_ACLK_ALERT "CREATE TABLE IF NOT EXISTS aclk_alert_%s (sequence_id INTEGER PRIMARY KEY, " \ - "alert_unique_id, date_created, date_submitted, date_cloud_ack, filtered_alert_unique_id NOT NULL, " \ - "unique(alert_unique_id));" +#define TABLE_ACLK_ALERT \ + "CREATE TABLE IF NOT EXISTS aclk_alert_%s (sequence_id INTEGER PRIMARY KEY, " \ + "alert_unique_id, date_created, date_submitted, date_cloud_ack, filtered_alert_unique_id NOT NULL, " \ + "UNIQUE(alert_unique_id))" + +#define INDEX_ACLK_ALERT1 "CREATE INDEX IF NOT EXISTS aclk_alert_index1_%s ON aclk_alert_%s (filtered_alert_unique_id)" +#define INDEX_ACLK_ALERT2 "CREATE INDEX IF NOT EXISTS aclk_alert_index2_%s ON aclk_alert_%s (date_submitted)" -#define INDEX_ACLK_ALERT "CREATE INDEX IF NOT EXISTS aclk_alert_index_%s ON aclk_alert_%s (alert_unique_id);" enum aclk_database_opcode { ACLK_DATABASE_NOOP = 0, @@ -57,6 +59,7 @@ enum aclk_database_opcode { ACLK_DATABASE_PUSH_ALERT_SNAPSHOT, ACLK_DATABASE_PUSH_ALERT_CHECKPOINT, ACLK_DATABASE_QUEUE_REMOVED_ALERTS, + ACLK_DATABASE_NODE_UNREGISTER, ACLK_DATABASE_TIMER, // leave this last @@ -68,16 +71,10 @@ struct aclk_database_cmd { enum aclk_database_opcode opcode; void *param[2]; struct completion *completion; + struct aclk_database_cmd *prev, *next; }; -#define ACLK_DATABASE_CMD_Q_MAX_SIZE (1024) - -struct aclk_database_cmdqueue { - unsigned head, tail; - struct aclk_database_cmd cmd_array[ACLK_DATABASE_CMD_Q_MAX_SIZE]; -}; - -struct aclk_sync_host_config { +typedef struct aclk_sync_cfg_t { RRDHOST *host; int alert_updates; int alert_checkpoint_req; @@ -89,17 +86,16 @@ struct aclk_sync_host_config { char *alerts_snapshot_uuid; // will contain the snapshot_uuid value if snapshot was requested uint64_t alerts_log_first_sequence_id; uint64_t alerts_log_last_sequence_id; -}; +} aclk_sync_cfg_t; -extern sqlite3 *db_meta; - -int aclk_database_enq_cmd_noblock(struct aclk_database_cmd *cmd); void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id); void sql_aclk_sync_init(void); void aclk_push_alert_config(const char *node_id, const char *config_hash); void aclk_push_node_alert_snapshot(const char *node_id); -void aclk_push_node_health_log(const char *node_id); void aclk_push_node_removed_alerts(const char *node_id); void schedule_node_info_update(RRDHOST *host); +#ifdef ENABLE_ACLK +void unregister_node(const char *machine_guid); +#endif #endif //NETDATA_SQLITE_ACLK_H diff --git a/database/sqlite/sqlite_aclk_alert.c b/database/sqlite/sqlite_aclk_alert.c index 20ca0573ded623..30e487a2ea5014 100644 --- a/database/sqlite/sqlite_aclk_alert.c +++ b/database/sqlite/sqlite_aclk_alert.c @@ -7,43 +7,84 @@ #include "../../aclk/aclk_alarm_api.h" #endif -#define SQL_UPDATE_FILTERED_ALERT "UPDATE aclk_alert_%s SET filtered_alert_unique_id = %u, date_created = unixepoch() where filtered_alert_unique_id = %u" -void update_filtered(ALARM_ENTRY *ae, uint32_t unique_id, char *uuid_str) { - char sql[ACLK_SYNC_QUERY_SIZE]; - snprintfz(sql, ACLK_SYNC_QUERY_SIZE-1, SQL_UPDATE_FILTERED_ALERT, uuid_str, ae->unique_id, unique_id); - sqlite3_exec_monitored(db_meta, sql, 0, 0, NULL); - ae->flags |= HEALTH_ENTRY_FLAG_ACLK_QUEUED; -} +#define SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param) \ + ({ \ + int _param = (param); \ + sqlite3_column_bytes((res), (_param)) ? strdupz((char *)sqlite3_column_text((res), (_param))) : NULL; \ + }) -#define SQL_SELECT_VARIABLE_ALERT_BY_UNIQUE_ID "SELECT hld.unique_id FROM health_log hl, alert_hash ah, health_log_detail hld WHERE hld.unique_id = %u " \ - "AND hl.config_hash_id = ah.hash_id AND hld.health_log_id = hl.health_log_id AND host_id = @host_id " \ - "AND ah.warn IS NULL AND ah.crit IS NULL;" -static inline bool is_event_from_alert_variable_config(uint32_t unique_id, uuid_t *host_id) { +#define SQL_UPDATE_FILTERED_ALERT \ + "UPDATE aclk_alert_%s SET filtered_alert_unique_id = @new_alert, date_created = UNIXEPOCH() " \ + "WHERE filtered_alert_unique_id = @old_alert" + +static void update_filtered(ALARM_ENTRY *ae, int64_t unique_id, char *uuid_str) +{ sqlite3_stmt *res = NULL; - int rc = 0; - bool ret = false; char sql[ACLK_SYNC_QUERY_SIZE]; - snprintfz(sql,ACLK_SYNC_QUERY_SIZE-1, SQL_SELECT_VARIABLE_ALERT_BY_UNIQUE_ID, unique_id); + snprintfz(sql, sizeof(sql) - 1, SQL_UPDATE_FILTERED_ALERT, uuid_str); + int rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement when trying to update_filtered"); + return; + } + + rc = sqlite3_bind_int64(res, 1, ae->unique_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind ae unique_id for update_filtered"); + goto done; + } + + rc = sqlite3_bind_int64(res, 2, unique_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind unique_id for update_filtered"); + goto done; + } + + rc = sqlite3_step_monitored(res); + if (likely(rc == SQLITE_DONE)) + ae->flags |= HEALTH_ENTRY_FLAG_ACLK_QUEUED; + +done: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize statement when trying to update_filtered, rc = %d", rc); +} + +#define SQL_SELECT_VARIABLE_ALERT_BY_UNIQUE_ID \ + "SELECT hld.unique_id FROM health_log hl, alert_hash ah, health_log_detail hld " \ + "WHERE hld.unique_id = @unique_id AND hl.config_hash_id = ah.hash_id AND hld.health_log_id = hl.health_log_id " \ + "AND hl.host_id = @host_id AND ah.warn IS NULL AND ah.crit IS NULL" + +static inline bool is_event_from_alert_variable_config(int64_t unique_id, uuid_t *host_id) +{ + sqlite3_stmt *res = NULL; - rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); + int rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_VARIABLE_ALERT_BY_UNIQUE_ID, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement when trying to check for alert variables."); return false; } - rc = sqlite3_bind_blob(res, 1, host_id, sizeof(*host_id), SQLITE_STATIC); + bool ret = false; + + rc = sqlite3_bind_int64(res, 1, unique_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind unique_id for checking alert variable."); + goto done; + } + + rc = sqlite3_bind_blob(res, 2, host_id, sizeof(*host_id), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id for checking alert variable."); - sqlite3_finalize(res); - return false; + goto done; } rc = sqlite3_step_monitored(res); - if (likely(rc == SQLITE_ROW)) { + if (likely(rc == SQLITE_ROW)) ret = true; - } +done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize statement when trying to check for alert variables, rc = %d", rc); @@ -54,137 +95,117 @@ static inline bool is_event_from_alert_variable_config(uint32_t unique_id, uuid_ #define MAX_REMOVED_PERIOD 604800 //a week //decide if some events should be sent or not -#define SQL_SELECT_ALERT_BY_ID "SELECT hld.new_status, hl.config_hash_id, hld.unique_id FROM health_log hl, aclk_alert_%s aa, health_log_detail hld " \ - "WHERE hld.unique_id = aa.filtered_alert_unique_id " \ - "AND hld.alarm_id = %u AND hl.host_id = @host_id AND hl.health_log_id = hld.health_log_id " \ - "ORDER BY hld.alarm_event_id DESC LIMIT 1;" -int should_send_to_cloud(RRDHOST *host, ALARM_ENTRY *ae) +#define SQL_SELECT_ALERT_BY_ID \ + "SELECT hld.new_status, hl.config_hash_id, hld.unique_id FROM health_log hl, aclk_alert_%s aa, health_log_detail hld " \ + "WHERE hl.host_id = @host_id AND +hld.unique_id = aa.filtered_alert_unique_id " \ + "AND hld.alarm_id = @alarm_id AND hl.health_log_id = hld.health_log_id " \ + "ORDER BY hld.rowid DESC LIMIT 1" + +static bool should_send_to_cloud(RRDHOST *host, ALARM_ENTRY *ae) { sqlite3_stmt *res = NULL; - char uuid_str[UUID_STR_LEN]; - uuid_unparse_lower_fix(&host->host_uuid, uuid_str); - int send = 1; - if (ae->new_status == RRDCALC_STATUS_REMOVED || ae->new_status == RRDCALC_STATUS_UNINITIALIZED) { + if (ae->new_status == RRDCALC_STATUS_REMOVED || ae->new_status == RRDCALC_STATUS_UNINITIALIZED) return 0; - } - if (unlikely(uuid_is_null(ae->config_hash_id))) + if (unlikely(uuid_is_null(ae->config_hash_id) || !host->aclk_config)) return 0; char sql[ACLK_SYNC_QUERY_SIZE]; - uuid_t config_hash_id; - RRDCALC_STATUS status; - uint32_t unique_id; //get the previous sent event of this alarm_id //base the search on the last filtered event - snprintfz(sql,ACLK_SYNC_QUERY_SIZE-1, SQL_SELECT_ALERT_BY_ID, uuid_str, ae->alarm_id); + snprintfz(sql, sizeof(sql) - 1, SQL_SELECT_ALERT_BY_ID, host->aclk_config->uuid_str); int rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); if (rc != SQLITE_OK) { - error_report("Failed to prepare statement when trying to filter alert events."); - send = 1; - return send; + error_report("Failed to prepare statement when trying should_send_to_cloud."); + return true; } + bool send = false; + rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id for checking alert variable."); - sqlite3_finalize(res); - return false; - } - - rc = sqlite3_step_monitored(res); - if (likely(rc == SQLITE_ROW)) { - status = (RRDCALC_STATUS) sqlite3_column_int(res, 0); - if (sqlite3_column_type(res, 1) != SQLITE_NULL) - uuid_copy(config_hash_id, *((uuid_t *) sqlite3_column_blob(res, 1))); - unique_id = (uint32_t) sqlite3_column_int64(res, 2); - } else { - send = 1; + error_report("Failed to bind host_id for checking should_send_to_cloud"); goto done; } - if (ae->new_status != (RRDCALC_STATUS)status) { - send = 1; + rc = sqlite3_bind_int(res, 2, (int) ae->alarm_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind alarm_id for checking should_send_to_cloud"); goto done; } - if (uuid_memcmp(&ae->config_hash_id, &config_hash_id)) { - send = 1; - goto done; - } + rc = sqlite3_step_monitored(res); + + if (likely(rc == SQLITE_ROW)) { + uuid_t config_hash_id; + RRDCALC_STATUS status = (RRDCALC_STATUS)sqlite3_column_int(res, 0); - //same status, same config - send = 0; - update_filtered(ae, unique_id, uuid_str); + if (sqlite3_column_type(res, 1) != SQLITE_NULL) + uuid_copy(config_hash_id, *((uuid_t *)sqlite3_column_blob(res, 1))); + + int64_t unique_id = sqlite3_column_int64(res, 2); + + if (ae->new_status != (RRDCALC_STATUS)status || uuid_memcmp(&ae->config_hash_id, &config_hash_id)) + send = true; + else + update_filtered(ae, unique_id, host->aclk_config->uuid_str); + } else + send = true; done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize statement when trying to filter alert events, rc = %d", rc); + error_report("Failed to finalize statement when trying should_send_to_cloud, rc = %d", rc); return send; } -#define SQL_QUEUE_ALERT_TO_CLOUD "INSERT INTO aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " \ - "VALUES (@alert_unique_id, unixepoch(), @alert_unique_id) ON CONFLICT (alert_unique_id) do nothing;" -int sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae, int skip_filter) -{ - if(!service_running(SERVICE_ACLK)) - return 0; - - if (!claimed()) - return 0; +#define SQL_QUEUE_ALERT_TO_CLOUD \ + "INSERT INTO aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " \ + "VALUES (@alert_unique_id, UNIXEPOCH(), @alert_unique_id) ON CONFLICT (alert_unique_id) DO NOTHING" - if (ae->flags & HEALTH_ENTRY_FLAG_ACLK_QUEUED) { - return 0; - } +void sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae, bool skip_filter) +{ + sqlite3_stmt *res_alert = NULL; + char sql[ACLK_SYNC_QUERY_SIZE]; - CHECK_SQLITE_CONNECTION(db_meta); + if (!service_running(SERVICE_ACLK)) + return; - if (!skip_filter) { - if (!should_send_to_cloud(host, ae)) { - return 0; - } - } + if (!claimed() || ae->flags & HEALTH_ENTRY_FLAG_ACLK_QUEUED) + return; - char uuid_str[UUID_STR_LEN]; - uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + if (false == skip_filter && !should_send_to_cloud(host, ae)) + return; if (is_event_from_alert_variable_config(ae->unique_id, &host->host_uuid)) - return 0; - - sqlite3_stmt *res_alert = NULL; - char sql[ACLK_SYNC_QUERY_SIZE]; + return; - snprintfz(sql, ACLK_SYNC_QUERY_SIZE - 1, SQL_QUEUE_ALERT_TO_CLOUD, uuid_str); + snprintfz(sql, sizeof(sql) - 1, SQL_QUEUE_ALERT_TO_CLOUD, host->aclk_config->uuid_str); int rc = sqlite3_prepare_v2(db_meta, sql, -1, &res_alert, 0); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to prepare statement to store alert event"); - return 1; + return; } - rc = sqlite3_bind_int(res_alert, 1, (int) ae->unique_id); + rc = sqlite3_bind_int64(res_alert, 1, ae->unique_id); if (unlikely(rc != SQLITE_OK)) - goto bind_fail; + goto done; rc = execute_insert(res_alert); - if (unlikely(rc != SQLITE_DONE)) { - error_report("Failed to store alert event %u, rc = %d", ae->unique_id, rc); - goto bind_fail; - } - - ae->flags |= HEALTH_ENTRY_FLAG_ACLK_QUEUED; - rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); + if (unlikely(rc == SQLITE_DONE)) { + ae->flags |= HEALTH_ENTRY_FLAG_ACLK_QUEUED; + rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); + } else + error_report("Failed to store alert event %"PRId64", rc = %d", ae->unique_id, rc); -bind_fail: +done: if (unlikely(sqlite3_finalize(res_alert) != SQLITE_OK)) error_report("Failed to reset statement in store alert event, rc = %d", rc); - - return 0; } int rrdcalc_status_to_proto_enum(RRDCALC_STATUS status) @@ -238,15 +259,16 @@ static inline char *sqlite3_text_strdupz_empty(sqlite3_stmt *res, int iCol) { } -void aclk_push_alert_event(struct aclk_sync_host_config *wc) +static void aclk_push_alert_event(struct aclk_sync_cfg_t *wc __maybe_unused) { -#ifndef ENABLE_ACLK - UNUSED(wc); -#else +#ifdef ENABLE_ACLK int rc; if (unlikely(!wc->alert_updates)) { - netdata_log_access("ACLK STA [%s (%s)]: Ignoring alert push event, updates have been turned off for this node.", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A"); + nd_log(NDLS_ACCESS, NDLP_NOTICE, + "ACLK STA [%s (%s)]: Ignoring alert push event, updates have been turned off for this node.", + wc->node_id, + wc->host ? rrdhost_hostname(wc->host) : "N/A"); return; } @@ -261,33 +283,30 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc) BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite); - int limit = ACLK_MAX_ALERT_UPDATES; - sqlite3_stmt *res = NULL; - buffer_sprintf(sql, "select aa.sequence_id, hld.unique_id, hld.alarm_id, hl.config_hash_id, hld.updated_by_id, hld.when_key, " \ - " hld.duration, hld.non_clear_duration, hld.flags, hld.exec_run_timestamp, hld.delay_up_to_timestamp, hl.name, " \ - " hl.chart, hl.family, hl.exec, hl.recipient, ha.source, hl.units, hld.info, hld.exec_code, hld.new_status, " \ - " hld.old_status, hld.delay, hld.new_value, hld.old_value, hld.last_repeat, hl.chart_context, hld.transition_id, hld.alarm_event_id, hl.chart_name " \ - " from health_log hl, aclk_alert_%s aa, alert_hash ha, health_log_detail hld " \ - " where hld.unique_id = aa.alert_unique_id and hl.config_hash_id = ha.hash_id and aa.date_submitted is null " \ - " and hl.host_id = @host_id and hl.health_log_id = hld.health_log_id " \ - " order by aa.sequence_id asc limit %d;", wc->uuid_str, limit); + buffer_sprintf( + sql, + "SELECT aa.sequence_id, hld.unique_id, hld.alarm_id, hl.config_hash_id, hld.updated_by_id, hld.when_key, " + " hld.duration, hld.non_clear_duration, hld.flags, hld.exec_run_timestamp, hld.delay_up_to_timestamp, hl.name, " + " hl.chart, hl.exec, hl.recipient, ha.source, hl.units, hld.info, hld.exec_code, hld.new_status, " + " hld.old_status, hld.delay, hld.new_value, hld.old_value, hld.last_repeat, hl.chart_context, hld.transition_id, " + " hld.alarm_event_id, hl.chart_name, hld.summary " + " FROM health_log hl, aclk_alert_%s aa, alert_hash ha, health_log_detail hld " + " WHERE hld.unique_id = aa.alert_unique_id AND hl.config_hash_id = ha.hash_id AND aa.date_submitted IS NULL " + " AND hl.host_id = @host_id AND hl.health_log_id = hld.health_log_id " + " ORDER BY aa.sequence_id ASC LIMIT "ACLK_MAX_ALERT_UPDATES, + wc->uuid_str); rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); if (rc != SQLITE_OK) { BUFFER *sql_fix = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite); buffer_sprintf(sql_fix, TABLE_ACLK_ALERT, wc->uuid_str); + rc = db_execute(db_meta, buffer_tostring(sql_fix)); if (unlikely(rc)) error_report("Failed to create ACLK alert table for host %s", rrdhost_hostname(wc->host)); - else { - buffer_flush(sql_fix); - buffer_sprintf(sql_fix, INDEX_ACLK_ALERT, wc->uuid_str, wc->uuid_str); - if (unlikely(db_execute(db_meta, buffer_tostring(sql_fix)))) - error_report("Failed to create ACLK alert table for host %s", rrdhost_hostname(wc->host)); - } buffer_free(sql_fix); // Try again @@ -304,10 +323,7 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc) rc = sqlite3_bind_blob(res, 1, &wc->host->host_uuid, sizeof(wc->host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id for pushing alert event."); - sqlite3_finalize(res); - buffer_free(sql); - freez(claim_id); - return; + goto done; } uint64_t first_sequence_id = 0; @@ -320,63 +336,52 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc) alarm_log.node_id = wc->node_id; alarm_log.claim_id = claim_id; - alarm_log.chart = strdupz((char *)sqlite3_column_text(res, 12)); alarm_log.name = strdupz((char *)sqlite3_column_text(res, 11)); - alarm_log.family = sqlite3_column_bytes(res, 13) > 0 ? strdupz((char *)sqlite3_column_text(res, 13)) : NULL; - - //alarm_log.batch_id = wc->alerts_batch_id; - //alarm_log.sequence_id = (uint64_t) sqlite3_column_int64(res, 0); alarm_log.when = (time_t) sqlite3_column_int64(res, 5); - alarm_log.config_hash = sqlite3_uuid_unparse_strdupz(res, 3); - alarm_log.utc_offset = wc->host->utc_offset; alarm_log.timezone = strdupz(rrdhost_abbrev_timezone(wc->host)); - alarm_log.exec_path = sqlite3_column_bytes(res, 14) > 0 ? strdupz((char *)sqlite3_column_text(res, 14)) : + alarm_log.exec_path = sqlite3_column_bytes(res, 13) > 0 ? strdupz((char *)sqlite3_column_text(res, 13)) : strdupz((char *)string2str(wc->host->health.health_default_exec)); + alarm_log.conf_source = sqlite3_column_bytes(res, 15) > 0 ? strdupz((char *)sqlite3_column_text(res, 15)) : strdupz(""); - alarm_log.conf_source = sqlite3_column_bytes(res, 16) > 0 ? strdupz((char *)sqlite3_column_text(res, 16)) : strdupz(""); - - char *edit_command = sqlite3_column_bytes(res, 16) > 0 ? - health_edit_command_from_source((char *)sqlite3_column_text(res, 16)) : + char *edit_command = sqlite3_column_bytes(res, 15) > 0 ? + health_edit_command_from_source((char *)sqlite3_column_text(res, 15)) : strdupz("UNKNOWN=0=UNKNOWN"); alarm_log.command = strdupz(edit_command); alarm_log.duration = (time_t) sqlite3_column_int64(res, 6); alarm_log.non_clear_duration = (time_t) sqlite3_column_int64(res, 7); - alarm_log.status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS) sqlite3_column_int(res, 20)); - alarm_log.old_status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS) sqlite3_column_int(res, 21)); - alarm_log.delay = (int) sqlite3_column_int(res, 22); + alarm_log.status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS) sqlite3_column_int(res, 19)); + alarm_log.old_status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS) sqlite3_column_int(res, 20)); + alarm_log.delay = (int) sqlite3_column_int(res, 21); alarm_log.delay_up_to_timestamp = (time_t) sqlite3_column_int64(res, 10); - alarm_log.last_repeat = (time_t) sqlite3_column_int64(res, 25); - + alarm_log.last_repeat = (time_t) sqlite3_column_int64(res, 24); alarm_log.silenced = ((sqlite3_column_int64(res, 8) & HEALTH_ENTRY_FLAG_SILENCED) || - (sqlite3_column_type(res, 15) != SQLITE_NULL && - !strncmp((char *)sqlite3_column_text(res, 15), "silent", 6))) ? + (sqlite3_column_type(res, 14) != SQLITE_NULL && + !strncmp((char *)sqlite3_column_text(res, 14), "silent", 6))) ? 1 : 0; - alarm_log.value_string = - sqlite3_column_type(res, 23) == SQLITE_NULL ? + sqlite3_column_type(res, 22) == SQLITE_NULL ? strdupz((char *)"-") : strdupz((char *)format_value_and_unit( - new_value_string, 100, sqlite3_column_double(res, 23), (char *)sqlite3_column_text(res, 17), -1)); - + new_value_string, 100, sqlite3_column_double(res, 22), (char *)sqlite3_column_text(res, 16), -1)); alarm_log.old_value_string = - sqlite3_column_type(res, 24) == SQLITE_NULL ? + sqlite3_column_type(res, 23) == SQLITE_NULL ? strdupz((char *)"-") : strdupz((char *)format_value_and_unit( - old_value_string, 100, sqlite3_column_double(res, 24), (char *)sqlite3_column_text(res, 17), -1)); - - alarm_log.value = (NETDATA_DOUBLE) sqlite3_column_double(res, 23); - alarm_log.old_value = (NETDATA_DOUBLE) sqlite3_column_double(res, 24); + old_value_string, 100, sqlite3_column_double(res, 23), (char *)sqlite3_column_text(res, 16), -1)); + alarm_log.value = (NETDATA_DOUBLE) sqlite3_column_double(res, 22); + alarm_log.old_value = (NETDATA_DOUBLE) sqlite3_column_double(res, 23); alarm_log.updated = (sqlite3_column_int64(res, 8) & HEALTH_ENTRY_FLAG_UPDATED) ? 1 : 0; - alarm_log.rendered_info = sqlite3_text_strdupz_empty(res, 18); - alarm_log.chart_context = sqlite3_text_strdupz_empty(res, 26); - alarm_log.transition_id = sqlite3_uuid_unparse_strdupz(res, 27); - alarm_log.event_id = (time_t) sqlite3_column_int64(res, 28); - alarm_log.chart_name = sqlite3_text_strdupz_empty(res, 29); + alarm_log.rendered_info = sqlite3_text_strdupz_empty(res, 17); + alarm_log.chart_context = sqlite3_text_strdupz_empty(res, 25); + alarm_log.transition_id = sqlite3_uuid_unparse_strdupz(res, 26); + alarm_log.event_id = (time_t) sqlite3_column_int64(res, 27); + alarm_log.chart_name = sqlite3_text_strdupz_empty(res, 28); + alarm_log.summary = sqlite3_text_strdupz_empty(res, 29); aclk_send_alarm_log_entry(&alarm_log); @@ -395,9 +400,13 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc) if (first_sequence_id) { buffer_flush(sql); - buffer_sprintf(sql, "UPDATE aclk_alert_%s SET date_submitted=unixepoch() " - "WHERE date_submitted IS NULL AND sequence_id BETWEEN %" PRIu64 " AND %" PRIu64 ";", - wc->uuid_str, first_sequence_id, last_sequence_id); + buffer_sprintf( + sql, + "UPDATE aclk_alert_%s SET date_submitted=unixepoch() " + "WHERE +date_submitted IS NULL AND sequence_id BETWEEN %" PRIu64 " AND %" PRIu64, + wc->uuid_str, + first_sequence_id, + last_sequence_id); if (unlikely(db_execute(db_meta, buffer_tostring(sql)))) error_report("Failed to mark ACLK alert entries as submitted for host %s", rrdhost_hostname(wc->host)); @@ -407,7 +416,7 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc) } else { if (wc->alerts_log_first_sequence_id) - netdata_log_access( + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK RES [%s (%s)]: ALERTS SENT from %" PRIu64 " to %" PRIu64 "", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A", @@ -417,6 +426,7 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc) wc->alerts_log_last_sequence_id = 0; } +done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize statement to send alert entries from the database, rc = %d", rc); @@ -431,13 +441,13 @@ void aclk_push_alert_events_for_all_hosts(void) RRDHOST *host; dfe_start_reentrant(rrdhost_root_index, host) { - if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED) || !rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS)) + if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED) || + !rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS)) continue; - internal_error(true, "ACLK SYNC: Scanning host %s", rrdhost_hostname(host)); rrdhost_flag_clear(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); - struct aclk_sync_host_config *wc = host->aclk_sync_host_config; + struct aclk_sync_cfg_t *wc = host->aclk_config; if (likely(wc)) aclk_push_alert_event(wc); } @@ -446,57 +456,54 @@ void aclk_push_alert_events_for_all_hosts(void) void sql_queue_existing_alerts_to_aclk(RRDHOST *host) { - char uuid_str[UUID_STR_LEN]; - uuid_unparse_lower_fix(&host->host_uuid, uuid_str); - BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite); sqlite3_stmt *res = NULL; int rc; + struct aclk_sync_cfg_t *wc = host->aclk_config; + + BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite); + rw_spinlock_write_lock(&host->health_log.spinlock); - buffer_sprintf(sql, "delete from aclk_alert_%s; ", uuid_str); - if (unlikely(db_execute(db_meta, buffer_tostring(sql)))) { - rw_spinlock_write_unlock(&host->health_log.spinlock); - buffer_free(sql); - return; - } + buffer_sprintf(sql, "DELETE FROM aclk_alert_%s", wc->uuid_str); + if (unlikely(db_execute(db_meta, buffer_tostring(sql)))) + goto skip; buffer_flush(sql); - buffer_sprintf(sql, "insert into aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " \ - "select hld.unique_id alert_unique_id, unixepoch(), hld.unique_id alert_unique_id from health_log_detail hld, health_log hl " \ - "where hld.new_status <> 0 and hld.new_status <> -2 and hl.health_log_id = hld.health_log_id and hl.config_hash_id is not null " \ - "and hld.updated_by_id = 0 and hl.host_id = @host_id order by hld.unique_id asc on conflict (alert_unique_id) do nothing;", uuid_str); + + buffer_sprintf( + sql, + "INSERT INTO aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " + "SELECT hld.unique_id alert_unique_id, unixepoch(), hld.unique_id alert_unique_id FROM health_log_detail hld, health_log hl " + "WHERE hld.new_status <> 0 AND hld.new_status <> -2 AND hl.health_log_id = hld.health_log_id AND hl.config_hash_id IS NOT NULL " + "AND hld.updated_by_id = 0 AND hl.host_id = @host_id ORDER BY hld.unique_id ASC ON CONFLICT (alert_unique_id) DO NOTHING", + wc->uuid_str); rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement when trying to queue existing alerts."); - rw_spinlock_write_unlock(&host->health_log.spinlock); - buffer_free(sql); - return; + goto skip; } rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id for when trying to queue existing alerts."); - sqlite3_finalize(res); - rw_spinlock_write_unlock(&host->health_log.spinlock); - buffer_free(sql); - return; + goto done; } rc = execute_insert(res); - if (unlikely(rc != SQLITE_DONE)) { + if (unlikely(rc != SQLITE_DONE)) error_report("Failed to queue existing alerts, rc = %d", rc); - } - + else + rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); +done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize statement to queue existing alerts, rc = %d", rc); +skip: rw_spinlock_write_unlock(&host->health_log.spinlock); - buffer_free(sql); - rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); } void aclk_send_alarm_configuration(char *config_hash) @@ -504,55 +511,51 @@ void aclk_send_alarm_configuration(char *config_hash) if (unlikely(!config_hash)) return; - struct aclk_sync_host_config *wc = (struct aclk_sync_host_config *) localhost->aclk_sync_host_config; + struct aclk_sync_cfg_t *wc = localhost->aclk_config; if (unlikely(!wc)) return; - netdata_log_access("ACLK REQ [%s (%s)]: Request to send alert config %s.", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A", config_hash); + nd_log(NDLS_ACCESS, NDLP_DEBUG, + "ACLK REQ [%s (%s)]: Request to send alert config %s.", + wc->node_id, + wc->host ? rrdhost_hostname(wc->host) : "N/A", + config_hash); aclk_push_alert_config(wc->node_id, config_hash); } -#define SQL_SELECT_ALERT_CONFIG "SELECT alarm, template, on_key, class, type, component, os, hosts, plugin," \ - "module, charts, families, lookup, every, units, green, red, calc, warn, crit, to_key, exec, delay, repeat, info," \ - "options, host_labels, p_db_lookup_dimensions, p_db_lookup_method, p_db_lookup_options, p_db_lookup_after," \ - "p_db_lookup_before, p_update_every, chart_labels FROM alert_hash WHERE hash_id = @hash_id;" -int aclk_push_alert_config_event(char *node_id __maybe_unused, char *config_hash __maybe_unused) -{ - int rc = 0; +#define SQL_SELECT_ALERT_CONFIG \ + "SELECT alarm, template, on_key, class, type, component, os, hosts, plugin," \ + "module, charts, lookup, every, units, green, red, calc, warn, crit, to_key, exec, delay, repeat, info," \ + "options, host_labels, p_db_lookup_dimensions, p_db_lookup_method, p_db_lookup_options, p_db_lookup_after," \ + "p_db_lookup_before, p_update_every, chart_labels, summary FROM alert_hash WHERE hash_id = @hash_id" +void aclk_push_alert_config_event(char *node_id __maybe_unused, char *config_hash __maybe_unused) +{ #ifdef ENABLE_ACLK - - CHECK_SQLITE_CONNECTION(db_meta); + int rc; sqlite3_stmt *res = NULL; + struct aclk_sync_cfg_t *wc; - struct aclk_sync_host_config *wc = NULL; RRDHOST *host = find_host_by_node_id(node_id); - if (unlikely(!host)) { + if (unlikely(!host || !(wc = host->aclk_config))) { freez(config_hash); freez(node_id); - return 1; - } - - wc = (struct aclk_sync_host_config *)host->aclk_sync_host_config; - if (unlikely(!wc)) { - freez(config_hash); - freez(node_id); - return 1; + return; } rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_ALERT_CONFIG, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement when trying to fetch an alarm hash configuration"); - return 1; + return; } uuid_t hash_uuid; if (uuid_parse(config_hash, hash_uuid)) - return 1; + return; rc = sqlite3_bind_blob(res, 1, &hash_uuid , sizeof(hash_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) @@ -564,37 +567,33 @@ int aclk_push_alert_config_event(char *node_id __maybe_unused, char *config_hash if (sqlite3_step_monitored(res) == SQLITE_ROW) { - alarm_config.alarm = sqlite3_column_bytes(res, 0) > 0 ? strdupz((char *)sqlite3_column_text(res, 0)) : NULL; - alarm_config.tmpl = sqlite3_column_bytes(res, 1) > 0 ? strdupz((char *)sqlite3_column_text(res, 1)) : NULL; - alarm_config.on_chart = sqlite3_column_bytes(res, 2) > 0 ? strdupz((char *)sqlite3_column_text(res, 2)) : NULL; - alarm_config.classification = sqlite3_column_bytes(res, 3) > 0 ? strdupz((char *)sqlite3_column_text(res, 3)) : NULL; - alarm_config.type = sqlite3_column_bytes(res, 4) > 0 ? strdupz((char *)sqlite3_column_text(res, 4)) : NULL; - alarm_config.component = sqlite3_column_bytes(res, 5) > 0 ? strdupz((char *)sqlite3_column_text(res, 5)) : NULL; - - alarm_config.os = sqlite3_column_bytes(res, 6) > 0 ? strdupz((char *)sqlite3_column_text(res, 6)) : NULL; - alarm_config.hosts = sqlite3_column_bytes(res, 7) > 0 ? strdupz((char *)sqlite3_column_text(res, 7)) : NULL; - alarm_config.plugin = sqlite3_column_bytes(res, 8) > 0 ? strdupz((char *)sqlite3_column_text(res, 8)) : NULL; - alarm_config.module = sqlite3_column_bytes(res, 9) > 0 ? strdupz((char *)sqlite3_column_text(res, 9)) : NULL; - alarm_config.charts = sqlite3_column_bytes(res, 10) > 0 ? strdupz((char *)sqlite3_column_text(res, 10)) : NULL; - alarm_config.families = sqlite3_column_bytes(res, 11) > 0 ? strdupz((char *)sqlite3_column_text(res, 11)) : NULL; - alarm_config.lookup = sqlite3_column_bytes(res, 12) > 0 ? strdupz((char *)sqlite3_column_text(res, 12)) : NULL; - alarm_config.every = sqlite3_column_bytes(res, 13) > 0 ? strdupz((char *)sqlite3_column_text(res, 13)) : NULL; - alarm_config.units = sqlite3_column_bytes(res, 14) > 0 ? strdupz((char *)sqlite3_column_text(res, 14)) : NULL; - - alarm_config.green = sqlite3_column_bytes(res, 15) > 0 ? strdupz((char *)sqlite3_column_text(res, 15)) : NULL; - alarm_config.red = sqlite3_column_bytes(res, 16) > 0 ? strdupz((char *)sqlite3_column_text(res, 16)) : NULL; - - alarm_config.calculation_expr = sqlite3_column_bytes(res, 17) > 0 ? strdupz((char *)sqlite3_column_text(res, 17)) : NULL; - alarm_config.warning_expr = sqlite3_column_bytes(res, 18) > 0 ? strdupz((char *)sqlite3_column_text(res, 18)) : NULL; - alarm_config.critical_expr = sqlite3_column_bytes(res, 19) > 0 ? strdupz((char *)sqlite3_column_text(res, 19)) : NULL; - - alarm_config.recipient = sqlite3_column_bytes(res, 20) > 0 ? strdupz((char *)sqlite3_column_text(res, 20)) : NULL; - alarm_config.exec = sqlite3_column_bytes(res, 21) > 0 ? strdupz((char *)sqlite3_column_text(res, 21)) : NULL; - alarm_config.delay = sqlite3_column_bytes(res, 22) > 0 ? strdupz((char *)sqlite3_column_text(res, 22)) : NULL; - alarm_config.repeat = sqlite3_column_bytes(res, 23) > 0 ? strdupz((char *)sqlite3_column_text(res, 23)) : NULL; - alarm_config.info = sqlite3_column_bytes(res, 24) > 0 ? strdupz((char *)sqlite3_column_text(res, 24)) : NULL; - alarm_config.options = sqlite3_column_bytes(res, 25) > 0 ? strdupz((char *)sqlite3_column_text(res, 25)) : NULL; - alarm_config.host_labels = sqlite3_column_bytes(res, 26) > 0 ? strdupz((char *)sqlite3_column_text(res, 26)) : NULL; + int param = 0; + alarm_config.alarm = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.tmpl = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.on_chart = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.classification = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.type = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.component = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.os = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.hosts = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.plugin = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.module = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.charts = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.lookup = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.every = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.units = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.green = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.red = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.calculation_expr = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.warning_expr = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.critical_expr = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.recipient = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.exec = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.delay = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.repeat = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.info = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.options = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); + alarm_config.host_labels = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); // Current param 25 alarm_config.p_db_lookup_dimensions = NULL; alarm_config.p_db_lookup_method = NULL; @@ -602,36 +601,39 @@ int aclk_push_alert_config_event(char *node_id __maybe_unused, char *config_hash alarm_config.p_db_lookup_after = 0; alarm_config.p_db_lookup_before = 0; - if (sqlite3_column_bytes(res, 30) > 0) { + if (sqlite3_column_bytes(res, 29) > 0) { - alarm_config.p_db_lookup_dimensions = sqlite3_column_bytes(res, 27) > 0 ? strdupz((char *)sqlite3_column_text(res, 27)) : NULL; - alarm_config.p_db_lookup_method = sqlite3_column_bytes(res, 28) > 0 ? strdupz((char *)sqlite3_column_text(res, 28)) : NULL; + alarm_config.p_db_lookup_dimensions = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); // Current param 26 + alarm_config.p_db_lookup_method = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param++); // Current param 27 + if (param != 28) + netdata_log_error("aclk_push_alert_config_event: Unexpected param number %d", param); BUFFER *tmp_buf = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite); - buffer_data_options2string(tmp_buf, sqlite3_column_int(res, 29)); + buffer_data_options2string(tmp_buf, sqlite3_column_int(res, 28)); alarm_config.p_db_lookup_options = strdupz((char *)buffer_tostring(tmp_buf)); buffer_free(tmp_buf); - alarm_config.p_db_lookup_after = sqlite3_column_int(res, 30); - alarm_config.p_db_lookup_before = sqlite3_column_int(res, 31); + alarm_config.p_db_lookup_after = sqlite3_column_int(res, 29); + alarm_config.p_db_lookup_before = sqlite3_column_int(res, 30); } - alarm_config.p_update_every = sqlite3_column_int(res, 32); + alarm_config.p_update_every = sqlite3_column_int(res, 31); - alarm_config.chart_labels = sqlite3_column_bytes(res, 33) > 0 ? strdupz((char *)sqlite3_column_text(res, 33)) : NULL; + alarm_config.chart_labels = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, 32); + alarm_config.summary = SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, 33); p_alarm_config.cfg_hash = strdupz((char *) config_hash); p_alarm_config.cfg = alarm_config; } if (likely(p_alarm_config.cfg_hash)) { - netdata_log_access("ACLK RES [%s (%s)]: Sent alert config %s.", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A", config_hash); + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK RES [%s (%s)]: Sent alert config %s.", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A", config_hash); aclk_send_provide_alarm_cfg(&p_alarm_config); freez(p_alarm_config.cfg_hash); destroy_aclk_alarm_configuration(&alarm_config); } else - netdata_log_access("ACLK STA [%s (%s)]: Alert config for %s not found.", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A", config_hash); + nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK STA [%s (%s)]: Alert config for %s not found.", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A", config_hash); bind_fail: rc = sqlite3_finalize(res); @@ -641,64 +643,61 @@ int aclk_push_alert_config_event(char *node_id __maybe_unused, char *config_hash freez(config_hash); freez(node_id); #endif - return rc; } // Start streaming alerts void aclk_start_alert_streaming(char *node_id, bool resets) { - if (unlikely(!node_id)) - return; - uuid_t node_uuid; - if (uuid_parse(node_id, node_uuid)) - return; - - RRDHOST *host = find_host_by_node_id(node_id); - if (unlikely(!host)) + if (unlikely(!node_id || uuid_parse(node_id, node_uuid))) return; - struct aclk_sync_host_config *wc = host->aclk_sync_host_config; + struct aclk_sync_cfg_t *wc; - if (unlikely(!wc)) + RRDHOST *host = find_host_by_node_id(node_id); + if (unlikely(!host || !(wc = host->aclk_config))) return; if (unlikely(!host->health.health_enabled)) { - netdata_log_access("ACLK STA [%s (N/A)]: Ignoring request to stream alert state changes, health is disabled.", node_id); + nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK STA [%s (N/A)]: Ignoring request to stream alert state changes, health is disabled.", node_id); return; } if (resets) { - netdata_log_access("ACLK REQ [%s (%s)]: STREAM ALERTS ENABLED (RESET REQUESTED)", node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A"); + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK REQ [%s (%s)]: STREAM ALERTS ENABLED (RESET REQUESTED)", node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A"); sql_queue_existing_alerts_to_aclk(host); } else - netdata_log_access("ACLK REQ [%s (%s)]: STREAM ALERTS ENABLED", node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A"); + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK REQ [%s (%s)]: STREAM ALERTS ENABLED", node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A"); wc->alert_updates = 1; wc->alert_queue_removed = SEND_REMOVED_AFTER_HEALTH_LOOPS; } -#define SQL_QUEUE_REMOVE_ALERTS "INSERT INTO aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " \ +#define SQL_QUEUE_REMOVE_ALERTS \ + "INSERT INTO aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " \ "SELECT hld.unique_id alert_unique_id, UNIXEPOCH(), hld.unique_id alert_unique_id FROM health_log hl, health_log_detail hld " \ - "WHERE hl.host_id = @host_id AND hl.health_log_id = hld.health_log_id AND hld.new_status = -2 AND hld.updated_by_id = 0 " \ - "AND hld.unique_id NOT IN (SELECT alert_unique_id FROM aclk_alert_%s) " \ - "AND hl.config_hash_id NOT IN (select hash_id from alert_hash where warn is null and crit is null) " \ - "ORDER BY hld.unique_id ASC ON CONFLICT (alert_unique_id) DO NOTHING;" + "WHERE hl.host_id = @host_id AND hl.health_log_id = hld.health_log_id AND hld.new_status = -2 AND hld.updated_by_id = 0 " \ + "AND hld.unique_id NOT IN (SELECT alert_unique_id FROM aclk_alert_%s) " \ + "AND hl.config_hash_id NOT IN (SELECT hash_id FROM alert_hash WHERE warn IS NULL AND crit IS NULL) " \ + "AND hl.name || hl.chart NOT IN (select name || chart FROM health_log WHERE name = hl.name AND " \ + "chart = hl.chart AND alarm_id > hl.alarm_id AND host_id = hl.host_id) " \ + "ORDER BY hld.unique_id ASC ON CONFLICT (alert_unique_id) DO NOTHING" + void sql_process_queue_removed_alerts_to_aclk(char *node_id) { - struct aclk_sync_host_config *wc; + struct aclk_sync_cfg_t *wc; RRDHOST *host = find_host_by_node_id(node_id); freez(node_id); - if (unlikely(!host || !(wc = host->aclk_sync_host_config))) + if (unlikely(!host || !(wc = host->aclk_config))) return; char sql[ACLK_SYNC_QUERY_SIZE * 2]; sqlite3_stmt *res = NULL; - snprintfz(sql, ACLK_SYNC_QUERY_SIZE * 2 - 1, SQL_QUEUE_REMOVE_ALERTS, wc->uuid_str, wc->uuid_str); + snprintfz(sql, sizeof(sql) - 1, SQL_QUEUE_REMOVE_ALERTS, wc->uuid_str, wc->uuid_str); int rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); if (rc != SQLITE_OK) { @@ -709,33 +708,25 @@ void sql_process_queue_removed_alerts_to_aclk(char *node_id) rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id for when trying to queue remvoed alerts."); - sqlite3_finalize(res); - return; + goto skip; } rc = execute_insert(res); - if (unlikely(rc != SQLITE_DONE)) { - sqlite3_finalize(res); - error_report("Failed to queue removed alerts, rc = %d", rc); - return; + if (likely(rc == SQLITE_DONE)) { + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK STA [%s (%s)]: QUEUED REMOVED ALERTS", wc->node_id, rrdhost_hostname(wc->host)); + rrdhost_flag_set(wc->host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); + wc->alert_queue_removed = 0; } +skip: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize statement to queue removed alerts, rc = %d", rc); - - netdata_log_access("ACLK STA [%s (%s)]: QUEUED REMOVED ALERTS", wc->node_id, rrdhost_hostname(wc->host)); - - rrdhost_flag_set(wc->host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); - wc->alert_queue_removed = 0; } void sql_queue_removed_alerts_to_aclk(RRDHOST *host) { - if (unlikely(!host->aclk_sync_host_config)) - return; - - if (!claimed() || !host->node_id) + if (unlikely(!host->aclk_config || !claimed() || !host->node_id)) return; char node_id[UUID_STR_LEN]; @@ -747,32 +738,28 @@ void sql_queue_removed_alerts_to_aclk(RRDHOST *host) void aclk_process_send_alarm_snapshot(char *node_id, char *claim_id __maybe_unused, char *snapshot_uuid) { uuid_t node_uuid; + if (unlikely(!node_id || uuid_parse(node_id, node_uuid))) return; + struct aclk_sync_cfg_t *wc; + RRDHOST *host = find_host_by_node_id(node_id); - if (unlikely(!host)) { - netdata_log_access("ACLK STA [%s (N/A)]: ACLK node id does not exist", node_id); + if (unlikely(!host || !(wc = host->aclk_config))) { + nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK STA [%s (N/A)]: ACLK node id does not exist", node_id); return; } - struct aclk_sync_host_config *wc = (struct aclk_sync_host_config *)host->aclk_sync_host_config; - - if (unlikely(!wc)) { - netdata_log_access("ACLK STA [%s (N/A)]: ACLK node id does not exist", node_id); - return; - } - - netdata_log_access( + nd_log(NDLS_ACCESS, NDLP_DEBUG, "IN [%s (%s)]: Request to send alerts snapshot, snapshot_uuid %s", node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A", snapshot_uuid); + if (wc->alerts_snapshot_uuid && !strcmp(wc->alerts_snapshot_uuid,snapshot_uuid)) return; - __sync_synchronize(); + wc->alerts_snapshot_uuid = strdupz(snapshot_uuid); - __sync_synchronize(); aclk_push_node_alert_snapshot(node_id); } @@ -788,11 +775,8 @@ void health_alarm_entry2proto_nolock(struct alarm_log_entry *alarm_log, ALARM_EN alarm_log->chart = strdupz(ae_chart_id(ae)); alarm_log->name = strdupz(ae_name(ae)); - alarm_log->family = strdupz(ae_family(ae)); - alarm_log->batch_id = 0; - alarm_log->sequence_id = 0; - alarm_log->when = (time_t)ae->when; + alarm_log->when = ae->when; alarm_log->config_hash = strdupz((char *)config_hash_id); @@ -807,7 +791,7 @@ void health_alarm_entry2proto_nolock(struct alarm_log_entry *alarm_log, ALARM_EN alarm_log->non_clear_duration = (time_t)ae->non_clear_duration; alarm_log->status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS)ae->new_status); alarm_log->old_status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS)ae->old_status); - alarm_log->delay = (int)ae->delay; + alarm_log->delay = ae->delay; alarm_log->delay_up_to_timestamp = (time_t)ae->delay_up_to_timestamp; alarm_log->last_repeat = (time_t)ae->last_repeat; @@ -830,23 +814,25 @@ void health_alarm_entry2proto_nolock(struct alarm_log_entry *alarm_log, ALARM_EN alarm_log->transition_id = strdupz((char *)transition_id); alarm_log->event_id = (uint64_t) ae->alarm_event_id; + alarm_log->summary = strdupz(ae_summary(ae)); + freez(edit_command); } #endif #ifdef ENABLE_ACLK -static int have_recent_alarm(RRDHOST *host, uint32_t alarm_id, uint32_t mark) +static bool have_recent_alarm(RRDHOST *host, int64_t alarm_id, int64_t mark) { ALARM_ENTRY *ae = host->health_log.alarms; while (ae) { if (ae->alarm_id == alarm_id && ae->unique_id >mark && (ae->new_status != RRDCALC_STATUS_WARNING && ae->new_status != RRDCALC_STATUS_CRITICAL)) - return 1; + return true; ae = ae->next; } - return 0; + return false; } #endif @@ -857,17 +843,17 @@ void aclk_push_alert_snapshot_event(char *node_id __maybe_unused) RRDHOST *host = find_host_by_node_id(node_id); if (unlikely(!host)) { - netdata_log_access("AC [%s (N/A)]: Node id not found", node_id); + nd_log(NDLS_ACCESS, NDLP_WARNING, "AC [%s (N/A)]: Node id not found", node_id); freez(node_id); return; } freez(node_id); - struct aclk_sync_host_config *wc = host->aclk_sync_host_config; + struct aclk_sync_cfg_t *wc = host->aclk_config; // we perhaps we don't need this for snapshots if (unlikely(!wc->alert_updates)) { - netdata_log_access( + nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK STA [%s (%s)]: Ignoring alert snapshot event, updates have been turned off for this node.", wc->node_id, wc->host ? rrdhost_hostname(wc->host) : "N/A"); @@ -881,11 +867,9 @@ void aclk_push_alert_snapshot_event(char *node_id __maybe_unused) if (unlikely(!claim_id)) return; - netdata_log_access("ACLK REQ [%s (%s)]: Sending alerts snapshot, snapshot_uuid %s", wc->node_id, rrdhost_hostname(wc->host), wc->alerts_snapshot_uuid); + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK REQ [%s (%s)]: Sending alerts snapshot, snapshot_uuid %s", wc->node_id, rrdhost_hostname(wc->host), wc->alerts_snapshot_uuid); uint32_t cnt = 0; - char uuid_str[UUID_STR_LEN]; - uuid_unparse_lower_fix(&host->host_uuid, uuid_str); rw_spinlock_read_lock(&host->health_log.spinlock); @@ -908,7 +892,7 @@ void aclk_push_alert_snapshot_event(char *node_id __maybe_unused) } if (cnt) { - uint32_t chunk = 1, chunks = 0; + uint32_t chunks; chunks = (cnt / ALARM_EVENTS_PER_CHUNK) + (cnt % ALARM_EVENTS_PER_CHUNK != 0); ae = host->health_log.alarms; @@ -919,15 +903,12 @@ void aclk_push_alert_snapshot_event(char *node_id __maybe_unused) alarm_snap.claim_id = claim_id; alarm_snap.snapshot_uuid = wc->alerts_snapshot_uuid; alarm_snap.chunks = chunks; - alarm_snap.chunk = chunk; + alarm_snap.chunk = 1; alarm_snapshot_proto_ptr_t snapshot_proto = NULL; for (; ae; ae = ae->next) { - if (likely(ae->updated_by_id)) - continue; - - if (unlikely(ae->new_status == RRDCALC_STATUS_UNINITIALIZED)) + if (likely(ae->updated_by_id) || unlikely(ae->new_status == RRDCALC_STATUS_UNINITIALIZED)) continue; if (have_recent_alarm(host, ae->alarm_id, ae->unique_id)) @@ -950,19 +931,9 @@ void aclk_push_alert_snapshot_event(char *node_id __maybe_unused) if (cnt == ALARM_EVENTS_PER_CHUNK) { aclk_send_alarm_snapshot(snapshot_proto); - cnt = 0; - - if (chunk < chunks) { - chunk++; - - struct alarm_snapshot alarm_snap; - alarm_snap.node_id = wc->node_id; - alarm_snap.claim_id = claim_id; - alarm_snap.snapshot_uuid = wc->alerts_snapshot_uuid; - alarm_snap.chunks = chunks; - alarm_snap.chunk = chunk; - + if (alarm_snap.chunk < chunks) { + alarm_snap.chunk++; snapshot_proto = generate_alarm_snapshot_proto(&alarm_snap); } } @@ -979,51 +950,70 @@ void aclk_push_alert_snapshot_event(char *node_id __maybe_unused) #endif } -#define SQL_DELETE_ALERT_ENTRIES "DELETE FROM aclk_alert_%s WHERE date_created + %d < UNIXEPOCH();" +#define SQL_DELETE_ALERT_ENTRIES "DELETE FROM aclk_alert_%s WHERE date_created < UNIXEPOCH() - @period" + void sql_aclk_alert_clean_dead_entries(RRDHOST *host) { - char uuid_str[UUID_STR_LEN]; - uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + struct aclk_sync_cfg_t *wc = host->aclk_config; + if (unlikely(!wc)) + return; char sql[ACLK_SYNC_QUERY_SIZE]; - snprintfz(sql, ACLK_SYNC_QUERY_SIZE - 1, SQL_DELETE_ALERT_ENTRIES, uuid_str, MAX_REMOVED_PERIOD); + snprintfz(sql, sizeof(sql) - 1, SQL_DELETE_ALERT_ENTRIES, wc->uuid_str); - char *err_msg = NULL; - int rc = sqlite3_exec_monitored(db_meta, sql, NULL, NULL, &err_msg); + sqlite3_stmt *res = NULL; + int rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); if (rc != SQLITE_OK) { - error_report("Failed when trying to clean stale ACLK alert entries from aclk_alert_%s, error message \"%s\"", uuid_str, err_msg); - sqlite3_free(err_msg); + error_report("Failed to prepare statement for cleaning stale ACLK alert entries."); + return; } + + rc = sqlite3_bind_int64(res, 1, MAX_REMOVED_PERIOD); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind MAX_REMOVED_PERIOD parameter."); + goto skip; + } + + rc = sqlite3_step_monitored(res); + if (rc != SQLITE_DONE) + error_report("Failed to execute DELETE query for cleaning stale ACLK alert entries."); + +skip: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize statement for cleaning stale ACLK alert entries."); } #define SQL_GET_MIN_MAX_ALERT_SEQ "SELECT MIN(sequence_id), MAX(sequence_id), " \ "(SELECT MAX(sequence_id) FROM aclk_alert_%s WHERE date_submitted IS NOT NULL) " \ - "FROM aclk_alert_%s WHERE date_submitted IS NULL;" + "FROM aclk_alert_%s WHERE date_submitted IS NULL" int get_proto_alert_status(RRDHOST *host, struct proto_alert_status *proto_alert_status) { - int rc; - struct aclk_sync_host_config *wc = NULL; - wc = (struct aclk_sync_host_config *)host->aclk_sync_host_config; + + struct aclk_sync_cfg_t *wc = host->aclk_config; if (!wc) return 1; proto_alert_status->alert_updates = wc->alert_updates; char sql[ACLK_SYNC_QUERY_SIZE]; - sqlite3_stmt *res = NULL; - snprintfz(sql, ACLK_SYNC_QUERY_SIZE - 1, SQL_GET_MIN_MAX_ALERT_SEQ, wc->uuid_str, wc->uuid_str); + sqlite3_stmt *res = NULL; + snprintfz(sql, sizeof(sql) - 1, SQL_GET_MIN_MAX_ALERT_SEQ, wc->uuid_str, wc->uuid_str); - rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); + int rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement to get alert log status from the database."); return 1; } while (sqlite3_step_monitored(res) == SQLITE_ROW) { - proto_alert_status->pending_min_sequence_id = sqlite3_column_bytes(res, 0) > 0 ? (uint64_t) sqlite3_column_int64(res, 0) : 0; - proto_alert_status->pending_max_sequence_id = sqlite3_column_bytes(res, 1) > 0 ? (uint64_t) sqlite3_column_int64(res, 1) : 0; - proto_alert_status->last_submitted_sequence_id = sqlite3_column_bytes(res, 2) > 0 ? (uint64_t) sqlite3_column_int64(res, 2) : 0; + proto_alert_status->pending_min_sequence_id = + sqlite3_column_bytes(res, 0) > 0 ? (uint64_t)sqlite3_column_int64(res, 0) : 0; + proto_alert_status->pending_max_sequence_id = + sqlite3_column_bytes(res, 1) > 0 ? (uint64_t)sqlite3_column_int64(res, 1) : 0; + proto_alert_status->last_submitted_sequence_id = + sqlite3_column_bytes(res, 2) > 0 ? (uint64_t)sqlite3_column_int64(res, 2) : 0; } rc = sqlite3_finalize(res); @@ -1038,21 +1028,15 @@ void aclk_send_alarm_checkpoint(char *node_id, char *claim_id __maybe_unused) if (unlikely(!node_id)) return; - struct aclk_sync_host_config *wc = NULL; + struct aclk_sync_cfg_t *wc; RRDHOST *host = find_host_by_node_id(node_id); - if (unlikely(!host)) - return; - - wc = (struct aclk_sync_host_config *)host->aclk_sync_host_config; - if (unlikely(!wc)) { - netdata_log_access("ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT REQUEST RECEIVED FOR INVALID NODE", node_id); - return; + if (unlikely(!host || !(wc = host->aclk_config))) + nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT REQUEST RECEIVED FOR INVALID NODE", node_id); + else { + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK REQ [%s (%s)]: ALERTS CHECKPOINT REQUEST RECEIVED", node_id, rrdhost_hostname(host)); + wc->alert_checkpoint_req = SEND_CHECKPOINT_AFTER_HEALTH_LOOPS; } - - netdata_log_access("ACLK REQ [%s (%s)]: ALERTS CHECKPOINT REQUEST RECEIVED", node_id, rrdhost_hostname(host)); - - wc->alert_checkpoint_req = SEND_CHECKPOINT_AFTER_HEALTH_LOOPS; } typedef struct active_alerts { @@ -1061,15 +1045,14 @@ typedef struct active_alerts { RRDCALC_STATUS status; } active_alerts_t; -static inline int compare_active_alerts(const void * a, const void * b) { +static inline int compare_active_alerts(const void *a, const void *b) +{ active_alerts_t *active_alerts_a = (active_alerts_t *)a; active_alerts_t *active_alerts_b = (active_alerts_t *)b; - if( !(strcmp(active_alerts_a->name, active_alerts_b->name)) ) - { - return strcmp(active_alerts_a->chart, active_alerts_b->chart); - } - else + if (!(strcmp(active_alerts_a->name, active_alerts_b->name))) { + return strcmp(active_alerts_a->chart, active_alerts_b->chart); + } else return strcmp(active_alerts_a->name, active_alerts_b->name); } @@ -1077,16 +1060,16 @@ static inline int compare_active_alerts(const void * a, const void * b) { void aclk_push_alarm_checkpoint(RRDHOST *host __maybe_unused) { #ifdef ENABLE_ACLK - struct aclk_sync_host_config *wc = host->aclk_sync_host_config; + struct aclk_sync_cfg_t *wc = host->aclk_config; if (unlikely(!wc)) { - netdata_log_access("ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT REQUEST RECEIVED FOR INVALID NODE", rrdhost_hostname(host)); + nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT REQUEST RECEIVED FOR INVALID NODE", rrdhost_hostname(host)); return; } if (rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS)) { //postpone checkpoint send - wc->alert_checkpoint_req+=3; - netdata_log_access("ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT POSTPONED", rrdhost_hostname(host)); + wc->alert_checkpoint_req += 3; + nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT POSTPONED", rrdhost_hostname(host)); return; } @@ -1119,16 +1102,16 @@ void aclk_push_alarm_checkpoint(RRDHOST *host __maybe_unused) BUFFER *alarms_to_hash; if (cnt) { - qsort (active_alerts, cnt, sizeof(active_alerts_t), compare_active_alerts); + qsort(active_alerts, cnt, sizeof(active_alerts_t), compare_active_alerts); alarms_to_hash = buffer_create(len, NULL); - for (uint32_t i=0;inode_id, rrdhost_hostname(host)); - } else { - netdata_log_access("ACLK RES [%s (%s)]: FAILED TO CREATE ALERTS CHECKPOINT HASH", wc->node_id, rrdhost_hostname(host)); - } + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK RES [%s (%s)]: ALERTS CHECKPOINT SENT", wc->node_id, rrdhost_hostname(host)); + } else + nd_log(NDLS_ACCESS, NDLP_ERR, "ACLK RES [%s (%s)]: FAILED TO CREATE ALERTS CHECKPOINT HASH", wc->node_id, rrdhost_hostname(host)); + wc->alert_checkpoint_req = 0; buffer_free(alarms_to_hash); #endif diff --git a/database/sqlite/sqlite_aclk_alert.h b/database/sqlite/sqlite_aclk_alert.h index d7252aad6f6c21..cfb3468b996b3c 100644 --- a/database/sqlite/sqlite_aclk_alert.h +++ b/database/sqlite/sqlite_aclk_alert.h @@ -15,10 +15,8 @@ struct proto_alert_status { uint64_t last_submitted_sequence_id; }; -int aclk_add_alert_event(struct aclk_sync_host_config *wc, struct aclk_database_cmd cmd); -void aclk_push_alert_event(struct aclk_sync_host_config *wc); void aclk_send_alarm_configuration (char *config_hash); -int aclk_push_alert_config_event(char *node_id, char *config_hash); +void aclk_push_alert_config_event(char *node_id, char *config_hash); void aclk_start_alert_streaming(char *node_id, bool resets); void sql_queue_removed_alerts_to_aclk(RRDHOST *host); void sql_process_queue_removed_alerts_to_aclk(char *node_id); @@ -28,8 +26,7 @@ void aclk_push_alarm_checkpoint(RRDHOST *host); void aclk_push_alert_snapshot_event(char *node_id); void aclk_process_send_alarm_snapshot(char *node_id, char *claim_id, char *snapshot_uuid); int get_proto_alert_status(RRDHOST *host, struct proto_alert_status *proto_alert_status); -int sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae, int skip_filter); +void sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae, bool skip_filter); void aclk_push_alert_events_for_all_hosts(void); - #endif //NETDATA_SQLITE_ACLK_ALERT_H diff --git a/database/sqlite/sqlite_aclk_node.c b/database/sqlite/sqlite_aclk_node.c index 82927854ab2342..dcc8c375cb2fc3 100644 --- a/database/sqlite/sqlite_aclk_node.c +++ b/database/sqlite/sqlite_aclk_node.c @@ -7,17 +7,16 @@ #include "../../aclk/aclk_capas.h" #ifdef ENABLE_ACLK + DICTIONARY *collectors_from_charts(RRDHOST *host, DICTIONARY *dict) { RRDSET *st; char name[500]; - rrdset_foreach_read(st, host) { + rrdset_foreach_read(st, host) + { if (rrdset_is_available_for_viewers(st)) { - struct collector_info col = { - .plugin = rrdset_plugin_name(st), - .module = rrdset_module_name(st) - }; - snprintfz(name, 499, "%s:%s", col.plugin, col.module); + struct collector_info col = {.plugin = rrdset_plugin_name(st), .module = rrdset_module_name(st)}; + snprintfz(name, sizeof(name) - 1, "%s:%s", col.plugin, col.module); dictionary_set(dict, name, &col, sizeof(struct collector_info)); } } @@ -26,17 +25,9 @@ DICTIONARY *collectors_from_charts(RRDHOST *host, DICTIONARY *dict) { return dict; } -static void build_node_collectors(char *node_id __maybe_unused) +static void build_node_collectors(RRDHOST *host) { - - RRDHOST *host = find_host_by_node_id(node_id); - - if (unlikely(!host)) - return; - - struct aclk_sync_host_config *wc = (struct aclk_sync_host_config *) host->aclk_sync_host_config; - if (unlikely(!wc)) - return; + struct aclk_sync_cfg_t *wc = host->aclk_config; struct update_node_collectors upd_node_collectors; DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED); @@ -50,45 +41,33 @@ static void build_node_collectors(char *node_id __maybe_unused) dictionary_destroy(dict); freez(upd_node_collectors.claim_id); - netdata_log_access("ACLK RES [%s (%s)]: NODE COLLECTORS SENT", node_id, rrdhost_hostname(host)); - - freez(node_id); + nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK RES [%s (%s)]: NODE COLLECTORS SENT", wc->node_id, rrdhost_hostname(host)); } -static void build_node_info(char *node_id __maybe_unused) +static void build_node_info(RRDHOST *host) { struct update_node_info node_info; - RRDHOST *host = find_host_by_node_id(node_id); - - if (unlikely((!host))) { - freez(node_id); - return; - } - - struct aclk_sync_host_config *wc = (struct aclk_sync_host_config *) host->aclk_sync_host_config; - - if (unlikely(!wc)) { - freez(node_id); - return; - } + struct aclk_sync_cfg_t *wc = host->aclk_config; rrd_rdlock(); node_info.node_id = wc->node_id; node_info.claim_id = get_agent_claimid(); node_info.machine_guid = host->machine_guid; - node_info.child = (wc->host != localhost); + node_info.child = (host != localhost); node_info.ml_info.ml_capable = ml_capable(); - node_info.ml_info.ml_enabled = ml_enabled(wc->host); + node_info.ml_info.ml_enabled = ml_enabled(host); - node_info.node_instance_capabilities = aclk_get_node_instance_capas(wc->host); + node_info.node_instance_capabilities = aclk_get_node_instance_capas(host); now_realtime_timeval(&node_info.updated_at); char *host_version = NULL; if (host != localhost) { netdata_mutex_lock(&host->receiver_lock); - host_version = strdupz(host->receiver && host->receiver->program_version ? host->receiver->program_version : rrdhost_program_version(host)); + host_version = strdupz( + host->receiver && host->receiver->program_version ? host->receiver->program_version : + rrdhost_program_version(host)); netdata_mutex_unlock(&host->receiver_lock); } @@ -112,10 +91,11 @@ static void build_node_info(char *node_id __maybe_unused) node_info.data.machine_guid = host->machine_guid; struct capability node_caps[] = { - { .name = "ml", .version = host->system_info->ml_capable, .enabled = host->system_info->ml_enabled }, - { .name = "mc", .version = host->system_info->mc_version ? host->system_info->mc_version : 0, .enabled = host->system_info->mc_version ? 1 : 0 }, - { .name = NULL, .version = 0, .enabled = 0 } - }; + {.name = "ml", .version = host->system_info->ml_capable, .enabled = host->system_info->ml_enabled}, + {.name = "mc", + .version = host->system_info->mc_version ? host->system_info->mc_version : 0, + .enabled = host->system_info->mc_version ? 1 : 0}, + {.name = NULL, .version = 0, .enabled = 0}}; node_info.node_capabilities = node_caps; node_info.data.ml_info.ml_capable = host->system_info->ml_capable; @@ -124,7 +104,14 @@ static void build_node_info(char *node_id __maybe_unused) node_info.data.host_labels_ptr = host->rrdlabels; aclk_update_node_info(&node_info); - netdata_log_access("ACLK RES [%s (%s)]: NODE INFO SENT for guid [%s] (%s)", wc->node_id, rrdhost_hostname(wc->host), host->machine_guid, wc->host == localhost ? "parent" : "child"); + nd_log( + NDLS_ACCESS, + NDLP_DEBUG, + "ACLK RES [%s (%s)]: NODE INFO SENT for guid [%s] (%s)", + wc->node_id, + rrdhost_hostname(host), + host->machine_guid, + host == localhost ? "parent" : "child"); rrd_unlock(); freez(node_info.claim_id); @@ -132,10 +119,21 @@ static void build_node_info(char *node_id __maybe_unused) freez(host_version); wc->node_collectors_send = now_realtime_sec(); - freez(node_id); - } +static bool host_is_replicating(RRDHOST *host) +{ + bool replicating = false; + RRDSET *st; + rrdset_foreach_reentrant(st, host) { + if (rrdset_is_replicating(st)) { + replicating = true; + break; + } + } + rrdset_foreach_done(st); + return replicating; +} void aclk_check_node_info_and_collectors(void) { @@ -144,35 +142,59 @@ void aclk_check_node_info_and_collectors(void) if (unlikely(!aclk_connected)) return; - size_t pending = 0; - dfe_start_reentrant(rrdhost_root_index, host) { + size_t context_loading = 0; + size_t replicating = 0; + size_t context_pp = 0; - struct aclk_sync_host_config *wc = host->aclk_sync_host_config; + time_t now = now_realtime_sec(); + dfe_start_reentrant(rrdhost_root_index, host) + { + struct aclk_sync_cfg_t *wc = host->aclk_config; if (unlikely(!wc)) continue; if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_PENDING_CONTEXT_LOAD))) { internal_error(true, "ACLK SYNC: Context still pending for %s", rrdhost_hostname(host)); - pending++; + context_loading++; continue; } - if (wc->node_info_send_time && wc->node_info_send_time + 30 < now_realtime_sec()) { + if (unlikely(host_is_replicating(host))) { + internal_error(true, "ACLK SYNC: Host %s is still replicating", rrdhost_hostname(host)); + replicating++; + continue; + } + + bool pp_queue_empty = !(host->rrdctx.pp_queue && dictionary_entries(host->rrdctx.pp_queue)); + + if (!pp_queue_empty && (wc->node_info_send_time || wc->node_collectors_send)) + context_pp++; + + if (pp_queue_empty && wc->node_info_send_time && wc->node_info_send_time + 30 < now) { wc->node_info_send_time = 0; - build_node_info(strdupz(wc->node_id)); + build_node_info(host); internal_error(true, "ACLK SYNC: Sending node info for %s", rrdhost_hostname(host)); } - if (wc->node_collectors_send && wc->node_collectors_send + 30 < now_realtime_sec()) { - build_node_collectors(strdupz(wc->node_id)); + if (pp_queue_empty && wc->node_collectors_send && wc->node_collectors_send + 30 < now) { + build_node_collectors(host); internal_error(true, "ACLK SYNC: Sending collectors for %s", rrdhost_hostname(host)); wc->node_collectors_send = 0; } } dfe_done(host); - if(pending) - netdata_log_info("ACLK: %zu nodes are pending for contexts to load, skipped sending node info for them", pending); + if (context_loading || replicating || context_pp) { + nd_log_limit_static_thread_var(erl, 10, 100 * USEC_PER_MS); + nd_log_limit( + &erl, + NDLS_DAEMON, + NDLP_INFO, + "%zu nodes loading contexts, %zu replicating data, %zu pending context post processing", + context_loading, + replicating, + context_pp); + } } #endif diff --git a/database/sqlite/sqlite_context.c b/database/sqlite/sqlite_context.c index f29fe51e3c0869..26ed8a96aab31f 100644 --- a/database/sqlite/sqlite_context.c +++ b/database/sqlite/sqlite_context.c @@ -7,16 +7,16 @@ #define DB_CONTEXT_METADATA_VERSION 1 const char *database_context_config[] = { - "CREATE TABLE IF NOT EXISTS context (host_id BLOB, id TEXT NOT NULL, version INT NOT NULL, title TEXT NOT NULL, " \ + "CREATE TABLE IF NOT EXISTS context (host_id BLOB, id TEXT NOT NULL, version INT NOT NULL, title TEXT NOT NULL, " "chart_type TEXT NOT NULL, unit TEXT NOT NULL, priority INT NOT NULL, first_time_t INT NOT NULL, " "last_time_t INT NOT NULL, deleted INT NOT NULL, " - "family TEXT, PRIMARY KEY (host_id, id));", + "family TEXT, PRIMARY KEY (host_id, id))", NULL }; const char *database_context_cleanup[] = { - "VACUUM;", + "VACUUM", NULL }; @@ -31,7 +31,7 @@ int sql_init_context_database(int memory) int rc; if (likely(!memory)) - snprintfz(sqlite_database, FILENAME_MAX, "%s/context-meta.db", netdata_configured_cache_dir); + snprintfz(sqlite_database, sizeof(sqlite_database) - 1, "%s/context-meta.db", netdata_configured_cache_dir); else strcpy(sqlite_database, ":memory:"); @@ -52,51 +52,20 @@ int sql_init_context_database(int memory) if (likely(!memory)) target_version = perform_context_database_migration(db_context_meta, DB_CONTEXT_METADATA_VERSION); - // https://www.sqlite.org/pragma.html#pragma_auto_vacuum - // PRAGMA schema.auto_vacuum = 0 | NONE | 1 | FULL | 2 | INCREMENTAL; - snprintfz(buf, 1024, "PRAGMA auto_vacuum=%s;", config_get(CONFIG_SECTION_SQLITE, "auto vacuum", "INCREMENTAL")); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_synchronous - // PRAGMA schema.synchronous = 0 | OFF | 1 | NORMAL | 2 | FULL | 3 | EXTRA; - snprintfz(buf, 1024, "PRAGMA synchronous=%s;", config_get(CONFIG_SECTION_SQLITE, "synchronous", "NORMAL")); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_journal_mode - // PRAGMA schema.journal_mode = DELETE | TRUNCATE | PERSIST | MEMORY | WAL | OFF - snprintfz(buf, 1024, "PRAGMA journal_mode=%s;", config_get(CONFIG_SECTION_SQLITE, "journal mode", "WAL")); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_temp_store - // PRAGMA temp_store = 0 | DEFAULT | 1 | FILE | 2 | MEMORY; - snprintfz(buf, 1024, "PRAGMA temp_store=%s;", config_get(CONFIG_SECTION_SQLITE, "temp store", "MEMORY")); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_journal_size_limit - // PRAGMA schema.journal_size_limit = N ; - snprintfz(buf, 1024, "PRAGMA journal_size_limit=%lld;", config_get_number(CONFIG_SECTION_SQLITE, "journal size limit", 16777216)); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_cache_size - // PRAGMA schema.cache_size = pages; - // PRAGMA schema.cache_size = -kibibytes; - snprintfz(buf, 1024, "PRAGMA cache_size=%lld;", config_get_number(CONFIG_SECTION_SQLITE, "cache size", -2000)); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; - - snprintfz(buf, 1024, "PRAGMA user_version=%d;", target_version); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; + if (configure_sqlite_database(db_context_meta, target_version)) + return 1; if (likely(!memory)) - snprintfz(buf, 1024, "ATTACH DATABASE \"%s/netdata-meta.db\" as meta;", netdata_configured_cache_dir); + snprintfz(buf, sizeof(buf) - 1, "ATTACH DATABASE \"%s/netdata-meta.db\" as meta", netdata_configured_cache_dir); else - snprintfz(buf, 1024, "ATTACH DATABASE ':memory:' as meta;"); + snprintfz(buf, sizeof(buf) - 1, "ATTACH DATABASE ':memory:' as meta"); - if(init_database_batch(db_context_meta, DB_CHECK_NONE, 0, list)) return 1; + if(init_database_batch(db_context_meta, list)) return 1; - if (init_database_batch(db_context_meta, DB_CHECK_NONE, 0, &database_context_config[0])) + if (init_database_batch(db_context_meta, &database_context_config[0])) return 1; - if (init_database_batch(db_context_meta, DB_CHECK_NONE, 0, &database_context_cleanup[0])) + if (init_database_batch(db_context_meta, &database_context_cleanup[0])) return 1; return 0; @@ -123,7 +92,7 @@ void sql_close_context_database(void) // Fetching data // #define CTX_GET_CHART_LIST "SELECT c.chart_id, c.type||'.'||c.id, c.name, c.context, c.title, c.unit, c.priority, " \ - "c.update_every, c.chart_type, c.family FROM meta.chart c WHERE c.host_id = @host_id and c.chart_id is not null; " + "c.update_every, c.chart_type, c.family FROM chart c WHERE c.host_id = @host_id AND c.chart_id IS NOT NULL" void ctx_get_chart_list(uuid_t *host_uuid, void (*dict_cb)(SQL_CHART_DATA *, void *), void *data) { @@ -136,7 +105,7 @@ void ctx_get_chart_list(uuid_t *host_uuid, void (*dict_cb)(SQL_CHART_DATA *, voi } if (unlikely(!res)) { - rc = prepare_statement(db_context_meta, CTX_GET_CHART_LIST, &res); + rc = prepare_statement(db_meta, CTX_GET_CHART_LIST, &res); if (rc != SQLITE_OK) { error_report("Failed to prepare statement to fetch chart list"); return; @@ -172,14 +141,14 @@ void ctx_get_chart_list(uuid_t *host_uuid, void (*dict_cb)(SQL_CHART_DATA *, voi // Dimension list #define CTX_GET_DIMENSION_LIST "SELECT d.dim_id, d.id, d.name, CASE WHEN INSTR(d.options,\"hidden\") > 0 THEN 1 ELSE 0 END " \ - "FROM meta.dimension d WHERE d.chart_id = @id and d.dim_id is not null ORDER BY d.rowid ASC;" + "FROM dimension d WHERE d.chart_id = @id AND d.dim_id IS NOT NULL ORDER BY d.rowid ASC" void ctx_get_dimension_list(uuid_t *chart_uuid, void (*dict_cb)(SQL_DIMENSION_DATA *, void *), void *data) { int rc; static __thread sqlite3_stmt *res = NULL; if (unlikely(!res)) { - rc = prepare_statement(db_context_meta, CTX_GET_DIMENSION_LIST, &res); + rc = prepare_statement(db_meta, CTX_GET_DIMENSION_LIST, &res); if (rc != SQLITE_OK) { error_report("Failed to prepare statement to fetch chart dimension data"); return; @@ -209,7 +178,8 @@ void ctx_get_dimension_list(uuid_t *chart_uuid, void (*dict_cb)(SQL_DIMENSION_DA } // LABEL LIST -#define CTX_GET_LABEL_LIST "SELECT l.label_key, l.label_value, l.source_type FROM meta.chart_label l WHERE l.chart_id = @id;" +#define CTX_GET_LABEL_LIST "SELECT l.label_key, l.label_value, l.source_type FROM meta.chart_label l WHERE l.chart_id = @id" + void ctx_get_label_list(uuid_t *chart_uuid, void (*dict_cb)(SQL_CLABEL_DATA *, void *), void *data) { int rc; @@ -246,7 +216,8 @@ void ctx_get_label_list(uuid_t *chart_uuid, void (*dict_cb)(SQL_CLABEL_DATA *, v // CONTEXT LIST #define CTX_GET_CONTEXT_LIST "SELECT id, version, title, chart_type, unit, priority, first_time_t, " \ - "last_time_t, deleted, family FROM context c WHERE c.host_id = @host_id;" + "last_time_t, deleted, family FROM context c WHERE c.host_id = @host_id" + void ctx_get_context_list(uuid_t *host_uuid, void (*dict_cb)(VERSIONED_CONTEXT_DATA *, void *), void *data) { @@ -297,9 +268,10 @@ void ctx_get_context_list(uuid_t *host_uuid, void (*dict_cb)(VERSIONED_CONTEXT_D // // Storing Data // -#define CTX_STORE_CONTEXT "INSERT OR REPLACE INTO context " \ - "(host_id, id, version, title, chart_type, unit, priority, first_time_t, last_time_t, deleted, family) " \ - "VALUES (@host_id, @context, @version, @title, @chart_type, @unit, @priority, @first_time_t, @last_time_t, @deleted, @family);" +#define CTX_STORE_CONTEXT \ + "INSERT OR REPLACE INTO context " \ + "(host_id, id, version, title, chart_type, unit, priority, first_time_t, last_time_t, deleted, family) " \ + "VALUES (@host_id, @context, @version, @title, @chart_type, @unit, @priority, @first_t, @last_t, @delete, @family)" int ctx_store_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data) { @@ -323,7 +295,7 @@ int ctx_store_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data) rc = bind_text_null(res, 2, context_data->id, 0); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind context to store details"); + error_report("Failed to bind context to store context details"); goto skip_store; } @@ -335,19 +307,19 @@ int ctx_store_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data) rc = bind_text_null(res, 4, context_data->title, 0); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind context to store details"); + error_report("Failed to bind context to store context details"); goto skip_store; } rc = bind_text_null(res, 5, context_data->chart_type, 0); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind context to store details"); + error_report("Failed to bind context to store context details"); goto skip_store; } rc = bind_text_null(res, 6, context_data->units, 0); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind context to store details"); + error_report("Failed to bind context to store context details"); goto skip_store; } @@ -396,7 +368,7 @@ int ctx_store_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data) // Delete a context -#define CTX_DELETE_CONTEXT "DELETE FROM context WHERE host_id = @host_id AND id = @context;" +#define CTX_DELETE_CONTEXT "DELETE FROM context WHERE host_id = @host_id AND id = @context" int ctx_delete_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data) { int rc, rc_stored = 1; @@ -413,13 +385,13 @@ int ctx_delete_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data) rc = sqlite3_bind_blob(res, 1, host_uuid, sizeof(*host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id to delete context data"); + error_report("Failed to bind host_id for context data deletion"); goto skip_delete; } rc = sqlite3_bind_text(res, 2, context_data->id, -1, SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind context id for data deletion"); + error_report("Failed to bind context id for context data deletion"); goto skip_delete; } @@ -427,13 +399,6 @@ int ctx_delete_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data) if (rc_stored != SQLITE_DONE) error_report("Failed to delete context %s, rc = %d", context_data->id, rc_stored); -#ifdef NETDATA_INTERNAL_CHECKS - else { - char host_uuid_str[UUID_STR_LEN]; - uuid_unparse_lower(*host_uuid, host_uuid_str); - netdata_log_info("%s: Deleted context %s under host %s", __FUNCTION__, context_data->id, host_uuid_str); - } -#endif skip_delete: rc = sqlite3_finalize(res); diff --git a/database/sqlite/sqlite_db_migration.c b/database/sqlite/sqlite_db_migration.c index 548b7de07a84d2..00a1e105584e5a 100644 --- a/database/sqlite/sqlite_db_migration.c +++ b/database/sqlite/sqlite_db_migration.c @@ -7,20 +7,53 @@ static int return_int_cb(void *data, int argc, char **argv, char **column) int *status = data; UNUSED(argc); UNUSED(column); - *status = str2uint32_t(argv[0], NULL); + *status = (int) str2uint32_t(argv[0], NULL); return 0; } -int table_exists_in_database(const char *table) +static int get_auto_vaccum(sqlite3 *database) { char *err_msg = NULL; char sql[128]; int exists = 0; - snprintf(sql, 127, "select 1 from sqlite_schema where type = 'table' and name = '%s';", table); + snprintf(sql, sizeof(sql) - 1, "PRAGMA auto_vacuum"); - int rc = sqlite3_exec_monitored(db_meta, sql, return_int_cb, (void *) &exists, &err_msg); + int rc = sqlite3_exec_monitored(database, sql, return_int_cb, (void *) &exists, &err_msg); + if (rc != SQLITE_OK) { + netdata_log_info("Error checking database auto vacuum setting; %s", err_msg); + sqlite3_free(err_msg); + } + + return exists; +} + +int db_table_count(sqlite3 *database) +{ + char *err_msg = NULL; + char sql[128]; + + int count = 0; + snprintf(sql, sizeof(sql) - 1, "select count(1) from sqlite_schema where type = 'table'"); + int rc = sqlite3_exec_monitored(database, sql, return_int_cb, (void *) &count, &err_msg); + if (rc != SQLITE_OK) { + netdata_log_info("Error checking database table count; %s", err_msg); + sqlite3_free(err_msg); + } + return count; +} + +int table_exists_in_database(sqlite3 *database, const char *table) +{ + char *err_msg = NULL; + char sql[128]; + + int exists = 0; + + snprintf(sql, sizeof(sql) - 1, "select 1 from sqlite_schema where type = 'table' and name = '%s'", table); + + int rc = sqlite3_exec_monitored(database, sql, return_int_cb, (void *) &exists, &err_msg); if (rc != SQLITE_OK) { netdata_log_info("Error checking table existence; %s", err_msg); sqlite3_free(err_msg); @@ -29,16 +62,16 @@ int table_exists_in_database(const char *table) return exists; } -static int column_exists_in_table(const char *table, const char *column) +static int column_exists_in_table(sqlite3 *database, const char *table, const char *column) { char *err_msg = NULL; char sql[128]; int exists = 0; - snprintf(sql, 127, "SELECT 1 FROM pragma_table_info('%s') where name = '%s';", table, column); + snprintf(sql, sizeof(sql) - 1, "SELECT 1 FROM pragma_table_info('%s') where name = '%s'", table, column); - int rc = sqlite3_exec_monitored(db_meta, sql, return_int_cb, (void *) &exists, &err_msg); + int rc = sqlite3_exec_monitored(database, sql, return_int_cb, (void *) &exists, &err_msg); if (rc != SQLITE_OK) { netdata_log_info("Error checking column existence; %s", err_msg); sqlite3_free(err_msg); @@ -47,77 +80,100 @@ static int column_exists_in_table(const char *table, const char *column) return exists; } +static int get_database_user_version(sqlite3 *database) +{ + int user_version = 0; + + int rc = sqlite3_exec_monitored(database, "PRAGMA user_version", return_int_cb, (void *)&user_version, NULL); + if (rc != SQLITE_OK) + netdata_log_error("Failed to get user version for database"); + + return user_version; +} + const char *database_migrate_v1_v2[] = { - "ALTER TABLE host ADD hops INTEGER NOT NULL DEFAULT 0;", + "ALTER TABLE host ADD hops INTEGER NOT NULL DEFAULT 0", NULL }; const char *database_migrate_v2_v3[] = { - "ALTER TABLE host ADD memory_mode INT NOT NULL DEFAULT 0;", - "ALTER TABLE host ADD abbrev_timezone TEXT NOT NULL DEFAULT '';", - "ALTER TABLE host ADD utc_offset INT NOT NULL DEFAULT 0;", - "ALTER TABLE host ADD program_name TEXT NOT NULL DEFAULT 'unknown';", - "ALTER TABLE host ADD program_version TEXT NOT NULL DEFAULT 'unknown';", - "ALTER TABLE host ADD entries INT NOT NULL DEFAULT 0;", - "ALTER TABLE host ADD health_enabled INT NOT NULL DEFAULT 0;", + "ALTER TABLE host ADD memory_mode INT NOT NULL DEFAULT 0", + "ALTER TABLE host ADD abbrev_timezone TEXT NOT NULL DEFAULT ''", + "ALTER TABLE host ADD utc_offset INT NOT NULL DEFAULT 0", + "ALTER TABLE host ADD program_name TEXT NOT NULL DEFAULT 'unknown'", + "ALTER TABLE host ADD program_version TEXT NOT NULL DEFAULT 'unknown'", + "ALTER TABLE host ADD entries INT NOT NULL DEFAULT 0", + "ALTER TABLE host ADD health_enabled INT NOT NULL DEFAULT 0", NULL }; const char *database_migrate_v4_v5[] = { - "DROP TABLE IF EXISTS chart_active;", - "DROP TABLE IF EXISTS dimension_active;", - "DROP TABLE IF EXISTS chart_hash;", - "DROP TABLE IF EXISTS chart_hash_map;", - "DROP VIEW IF EXISTS v_chart_hash;", + "DROP TABLE IF EXISTS chart_active", + "DROP TABLE IF EXISTS dimension_active", + "DROP TABLE IF EXISTS chart_hash", + "DROP TABLE IF EXISTS chart_hash_map", + "DROP VIEW IF EXISTS v_chart_hash", NULL }; const char *database_migrate_v5_v6[] = { - "DROP TRIGGER IF EXISTS tr_dim_del;", - "DROP TABLE IF EXISTS dimension_delete;", + "DROP TRIGGER IF EXISTS tr_dim_del", + "DROP TABLE IF EXISTS dimension_delete", NULL }; const char *database_migrate_v9_v10[] = { - "ALTER TABLE alert_hash ADD chart_labels TEXT;", + "ALTER TABLE alert_hash ADD chart_labels TEXT", NULL }; const char *database_migrate_v10_v11[] = { - "ALTER TABLE health_log ADD chart_name TEXT;", + "ALTER TABLE health_log ADD chart_name TEXT", NULL }; -static int do_migration_v1_v2(sqlite3 *database, const char *name) -{ - UNUSED(name); - netdata_log_info("Running \"%s\" database migration", name); +const char *database_migrate_v11_v12[] = { + "ALTER TABLE health_log_detail ADD summary TEXT", + "ALTER TABLE alert_hash ADD summary TEXT", + NULL +}; + +const char *database_migrate_v12_v13_detail[] = { + "ALTER TABLE health_log_detail ADD summary TEXT", + NULL +}; + +const char *database_migrate_v12_v13_hash[] = { + "ALTER TABLE alert_hash ADD summary TEXT", + NULL +}; + +const char *database_migrate_v13_v14[] = { + "ALTER TABLE host ADD last_connected INT NOT NULL DEFAULT 0", + NULL +}; - if (table_exists_in_database("host") && !column_exists_in_table("host", "hops")) - return init_database_batch(database, DB_CHECK_NONE, 0, &database_migrate_v1_v2[0]); +static int do_migration_v1_v2(sqlite3 *database) +{ + if (table_exists_in_database(database, "host") && !column_exists_in_table(database, "host", "hops")) + return init_database_batch(database, &database_migrate_v1_v2[0]); return 0; } -static int do_migration_v2_v3(sqlite3 *database, const char *name) +static int do_migration_v2_v3(sqlite3 *database) { - UNUSED(name); - netdata_log_info("Running \"%s\" database migration", name); - - if (table_exists_in_database("host") && !column_exists_in_table("host", "memory_mode")) - return init_database_batch(database, DB_CHECK_NONE, 0, &database_migrate_v2_v3[0]); + if (table_exists_in_database(database, "host") && !column_exists_in_table(database, "host", "memory_mode")) + return init_database_batch(database, &database_migrate_v2_v3[0]); return 0; } -static int do_migration_v3_v4(sqlite3 *database, const char *name) +static int do_migration_v3_v4(sqlite3 *database) { - UNUSED(name); - netdata_log_info("Running database migration %s", name); - char sql[256]; int rc; sqlite3_stmt *res = NULL; - snprintfz(sql, 255, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'health_log_%%';"); + snprintfz(sql, sizeof(sql) - 1, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'health_log_%%'"); rc = sqlite3_prepare_v2(database, sql, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement to alter health_log tables"); @@ -126,8 +182,8 @@ static int do_migration_v3_v4(sqlite3 *database, const char *name) while (sqlite3_step_monitored(res) == SQLITE_ROW) { char *table = strdupz((char *) sqlite3_column_text(res, 0)); - if (!column_exists_in_table(table, "chart_context")) { - snprintfz(sql, 255, "ALTER TABLE %s ADD chart_context text", table); + if (!column_exists_in_table(database, table, "chart_context")) { + snprintfz(sql, sizeof(sql) - 1, "ALTER TABLE %s ADD chart_context text", table); sqlite3_exec_monitored(database, sql, 0, 0, NULL); } freez(table); @@ -140,32 +196,23 @@ static int do_migration_v3_v4(sqlite3 *database, const char *name) return 0; } -static int do_migration_v4_v5(sqlite3 *database, const char *name) +static int do_migration_v4_v5(sqlite3 *database) { - UNUSED(name); - netdata_log_info("Running \"%s\" database migration", name); - - return init_database_batch(database, DB_CHECK_NONE, 0, &database_migrate_v4_v5[0]); + return init_database_batch(database, &database_migrate_v4_v5[0]); } -static int do_migration_v5_v6(sqlite3 *database, const char *name) +static int do_migration_v5_v6(sqlite3 *database) { - UNUSED(name); - netdata_log_info("Running \"%s\" database migration", name); - - return init_database_batch(database, DB_CHECK_NONE, 0, &database_migrate_v5_v6[0]); + return init_database_batch(database, &database_migrate_v5_v6[0]); } -static int do_migration_v6_v7(sqlite3 *database, const char *name) +static int do_migration_v6_v7(sqlite3 *database) { - UNUSED(name); - netdata_log_info("Running \"%s\" database migration", name); - char sql[256]; int rc; sqlite3_stmt *res = NULL; - snprintfz(sql, 255, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'aclk_alert_%%';"); + snprintfz(sql, sizeof(sql) - 1, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'aclk_alert_%%'"); rc = sqlite3_prepare_v2(database, sql, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement to alter aclk_alert tables"); @@ -174,10 +221,10 @@ static int do_migration_v6_v7(sqlite3 *database, const char *name) while (sqlite3_step_monitored(res) == SQLITE_ROW) { char *table = strdupz((char *) sqlite3_column_text(res, 0)); - if (!column_exists_in_table(table, "filtered_alert_unique_id")) { - snprintfz(sql, 255, "ALTER TABLE %s ADD filtered_alert_unique_id", table); + if (!column_exists_in_table(database, table, "filtered_alert_unique_id")) { + snprintfz(sql, sizeof(sql) - 1, "ALTER TABLE %s ADD filtered_alert_unique_id", table); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 255, "UPDATE %s SET filtered_alert_unique_id = alert_unique_id", table); + snprintfz(sql, sizeof(sql) - 1, "UPDATE %s SET filtered_alert_unique_id = alert_unique_id", table); sqlite3_exec_monitored(database, sql, 0, 0, NULL); } freez(table); @@ -190,16 +237,13 @@ static int do_migration_v6_v7(sqlite3 *database, const char *name) return 0; } -static int do_migration_v7_v8(sqlite3 *database, const char *name) +static int do_migration_v7_v8(sqlite3 *database) { - UNUSED(name); - netdata_log_info("Running database migration %s", name); - char sql[256]; int rc; sqlite3_stmt *res = NULL; - snprintfz(sql, 255, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'health_log_%%';"); + snprintfz(sql, sizeof(sql) - 1, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'health_log_%%'"); rc = sqlite3_prepare_v2(database, sql, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement to alter health_log tables"); @@ -208,8 +252,8 @@ static int do_migration_v7_v8(sqlite3 *database, const char *name) while (sqlite3_step_monitored(res) == SQLITE_ROW) { char *table = strdupz((char *) sqlite3_column_text(res, 0)); - if (!column_exists_in_table(table, "transition_id")) { - snprintfz(sql, 255, "ALTER TABLE %s ADD transition_id blob", table); + if (!column_exists_in_table(database, table, "transition_id")) { + snprintfz(sql, sizeof(sql) - 1, "ALTER TABLE %s ADD transition_id blob", table); sqlite3_exec_monitored(database, sql, 0, 0, NULL); } freez(table); @@ -222,47 +266,45 @@ static int do_migration_v7_v8(sqlite3 *database, const char *name) return 0; } -static int do_migration_v8_v9(sqlite3 *database, const char *name) +static int do_migration_v8_v9(sqlite3 *database) { - netdata_log_info("Running database migration %s", name); - char sql[2048]; int rc; sqlite3_stmt *res = NULL; //create the health_log table and it's index - snprintfz(sql, 2047, "CREATE TABLE IF NOT EXISTS health_log (health_log_id INTEGER PRIMARY KEY, host_id blob, alarm_id int, " \ + snprintfz(sql, sizeof(sql) - 1, "CREATE TABLE IF NOT EXISTS health_log (health_log_id INTEGER PRIMARY KEY, host_id blob, alarm_id int, " \ "config_hash_id blob, name text, chart text, family text, recipient text, units text, exec text, " \ - "chart_context text, last_transition_id blob, UNIQUE (host_id, alarm_id)) ;"); + "chart_context text, last_transition_id blob, UNIQUE (host_id, alarm_id))"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); //TODO indexes - snprintfz(sql, 2047, "CREATE INDEX IF NOT EXISTS health_log_ind_1 ON health_log (host_id);"); + snprintfz(sql, sizeof(sql) - 1, "CREATE INDEX IF NOT EXISTS health_log_ind_1 ON health_log (host_id)"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "CREATE TABLE IF NOT EXISTS health_log_detail (health_log_id int, unique_id int, alarm_id int, alarm_event_id int, " \ + snprintfz(sql, sizeof(sql) - 1, "CREATE TABLE IF NOT EXISTS health_log_detail (health_log_id int, unique_id int, alarm_id int, alarm_event_id int, " \ "updated_by_id int, updates_id int, when_key int, duration int, non_clear_duration int, " \ "flags int, exec_run_timestamp int, delay_up_to_timestamp int, " \ "info text, exec_code int, new_status real, old_status real, delay int, " \ - "new_value double, old_value double, last_repeat int, transition_id blob, global_id int, host_id blob);"); + "new_value double, old_value double, last_repeat int, transition_id blob, global_id int, summary text, host_id blob)"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "CREATE INDEX IF NOT EXISTS health_log_d_ind_1 ON health_log_detail (unique_id);"); + snprintfz(sql, sizeof(sql) - 1, "CREATE INDEX IF NOT EXISTS health_log_d_ind_1 ON health_log_detail (unique_id)"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "CREATE INDEX IF NOT EXISTS health_log_d_ind_2 ON health_log_detail (global_id);"); + snprintfz(sql, sizeof(sql) - 1, "CREATE INDEX IF NOT EXISTS health_log_d_ind_2 ON health_log_detail (global_id)"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "CREATE INDEX IF NOT EXISTS health_log_d_ind_3 ON health_log_detail (transition_id);"); + snprintfz(sql, sizeof(sql) - 1, "CREATE INDEX IF NOT EXISTS health_log_d_ind_3 ON health_log_detail (transition_id)"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "CREATE INDEX IF NOT EXISTS health_log_d_ind_4 ON health_log_detail (health_log_id);"); + snprintfz(sql, sizeof(sql) - 1, "CREATE INDEX IF NOT EXISTS health_log_d_ind_4 ON health_log_detail (health_log_id)"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "ALTER TABLE alert_hash ADD source text;"); + snprintfz(sql, sizeof(sql) - 1, "ALTER TABLE alert_hash ADD source text"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "CREATE INDEX IF NOT EXISTS alert_hash_index ON alert_hash (hash_id);"); + snprintfz(sql, sizeof(sql) - 1, "CREATE INDEX IF NOT EXISTS alert_hash_index ON alert_hash (hash_id)"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); - snprintfz(sql, 2047, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'health_log_%%' AND name <> 'health_log_detail';"); + snprintfz(sql, sizeof(sql) - 1, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'health_log_%%' AND name <> 'health_log_detail'"); rc = sqlite3_prepare_v2(database, sql, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement to alter health_log tables"); @@ -290,42 +332,118 @@ static int do_migration_v8_v9(sqlite3 *database, const char *name) dfe_done(table); dictionary_destroy(dict_tables); - snprintfz(sql, 2047, "ALTER TABLE health_log_detail DROP COLUMN host_id;"); + snprintfz(sql, sizeof(sql) - 1, "ALTER TABLE health_log_detail DROP COLUMN host_id"); sqlite3_exec_monitored(database, sql, 0, 0, NULL); return 0; } -static int do_migration_v9_v10(sqlite3 *database, const char *name) +static int do_migration_v9_v10(sqlite3 *database) +{ + if (table_exists_in_database(database, "alert_hash") && !column_exists_in_table(database, "alert_hash", "chart_labels")) + return init_database_batch(database, &database_migrate_v9_v10[0]); + return 0; +} + +static int do_migration_v10_v11(sqlite3 *database) { - netdata_log_info("Running \"%s\" database migration", name); + if (table_exists_in_database(database, "health_log") && !column_exists_in_table(database, "health_log", "chart_name")) + return init_database_batch(database, &database_migrate_v10_v11[0]); - if (table_exists_in_database("alert_hash") && !column_exists_in_table("alert_hash", "chart_labels")) - return init_database_batch(database, DB_CHECK_NONE, 0, &database_migrate_v9_v10[0]); return 0; } -static int do_migration_v10_v11(sqlite3 *database, const char *name) +#define MIGR_11_12_UPD_HEALTH_LOG_DETAIL "UPDATE health_log_detail SET summary = (select name from health_log where health_log_id = health_log_detail.health_log_id)" +static int do_migration_v11_v12(sqlite3 *database) { - netdata_log_info("Running \"%s\" database migration", name); + int rc = 0; - if (table_exists_in_database("health_log") && !column_exists_in_table("health_log", "chart_name")) - return init_database_batch(database, DB_CHECK_NONE, 0, &database_migrate_v10_v11[0]); + if (table_exists_in_database(database, "health_log_detail") && !column_exists_in_table(database, "health_log_detail", "summary") && + table_exists_in_database(database, "alert_hash") && !column_exists_in_table(database, "alert_hash", "summary")) + rc = init_database_batch(database, &database_migrate_v11_v12[0]); + + if (!rc) + sqlite3_exec_monitored(database, MIGR_11_12_UPD_HEALTH_LOG_DETAIL, 0, 0, NULL); + + return rc; +} + +static int do_migration_v14_v15(sqlite3 *database) +{ + char sql[256]; + + int rc; + sqlite3_stmt *res = NULL; + snprintfz(sql, sizeof(sql) - 1, "SELECT name FROM sqlite_schema WHERE type = \"index\" AND name LIKE \"aclk_alert_index@_%%\" ESCAPE \"@\""); + rc = sqlite3_prepare_v2(database, sql, -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement to drop unused indices"); + return 1; + } + + BUFFER *wb = buffer_create(128, NULL); + while (sqlite3_step_monitored(res) == SQLITE_ROW) + buffer_sprintf(wb, "DROP INDEX IF EXISTS %s", (char *) sqlite3_column_text(res, 0)); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize statement when dropping unused indices, rc = %d", rc); + + (void) db_execute(database, buffer_tostring(wb)); + + buffer_free(wb); + return 0; +} + +static int do_migration_v12_v13(sqlite3 *database) +{ + int rc = 0; + + if (table_exists_in_database(database, "health_log_detail") && !column_exists_in_table(database, "health_log_detail", "summary")) { + rc = init_database_batch(database, &database_migrate_v12_v13_detail[0]); + sqlite3_exec_monitored(database, MIGR_11_12_UPD_HEALTH_LOG_DETAIL, 0, 0, NULL); + } + + if (table_exists_in_database(database, "alert_hash") && !column_exists_in_table(database, "alert_hash", "summary")) + rc = init_database_batch(database, &database_migrate_v12_v13_hash[0]); + + return rc; +} + +static int do_migration_v13_v14(sqlite3 *database) +{ + if (table_exists_in_database(database, "host") && !column_exists_in_table(database, "host", "last_connected")) + return init_database_batch(database, &database_migrate_v13_v14[0]); return 0; } -static int do_migration_noop(sqlite3 *database, const char *name) + +// Actions for ML migration +const char *database_ml_migrate_v1_v2[] = { + "PRAGMA journal_mode=delete", + "PRAGMA journal_mode=WAL", + "PRAGMA auto_vacuum=2", + "VACUUM", + NULL +}; + +static int do_ml_migration_v1_v2(sqlite3 *database) +{ + if (get_auto_vaccum(database) != 2) + return init_database_batch(database, &database_ml_migrate_v1_v2[0]); + return 0; +} + +static int do_migration_noop(sqlite3 *database) { UNUSED(database); - UNUSED(name); - netdata_log_info("Running database migration %s", name); return 0; } typedef struct database_func_migration_list { char *name; - int (*func)(sqlite3 *database, const char *name); + int (*func)(sqlite3 *database); } DATABASE_FUNC_MIGRATION_LIST; @@ -334,7 +452,7 @@ static int migrate_database(sqlite3 *database, int target_version, char *db_name int user_version = 0; char *err_msg = NULL; - int rc = sqlite3_exec_monitored(database, "PRAGMA user_version;", return_int_cb, (void *) &user_version, &err_msg); + int rc = sqlite3_exec_monitored(database, "PRAGMA user_version", return_int_cb, (void *) &user_version, &err_msg); if (rc != SQLITE_OK) { netdata_log_info("Error checking the %s database version; %s", db_name, err_msg); sqlite3_free(err_msg); @@ -347,14 +465,14 @@ static int migrate_database(sqlite3 *database, int target_version, char *db_name netdata_log_info("Database version is %d, current version is %d. Running migration for %s ...", user_version, target_version, db_name); for (int i = user_version; i < target_version && migration_list[i].func; i++) { - rc = (migration_list[i].func)(database, migration_list[i].name); + netdata_log_info("Running database \"%s\" migration %s", db_name, migration_list[i].name); + rc = (migration_list[i].func)(database); if (unlikely(rc)) { error_report("Database %s migration from version %d to version %d failed", db_name, i, i + 1); return i; } } return target_version; - } DATABASE_FUNC_MIGRATION_LIST migration_action[] = { @@ -369,6 +487,10 @@ DATABASE_FUNC_MIGRATION_LIST migration_action[] = { {.name = "v8 to v9", .func = do_migration_v8_v9}, {.name = "v9 to v10", .func = do_migration_v9_v10}, {.name = "v10 to v11", .func = do_migration_v10_v11}, + {.name = "v11 to v12", .func = do_migration_v11_v12}, + {.name = "v12 to v13", .func = do_migration_v12_v13}, + {.name = "v13 to v14", .func = do_migration_v13_v14}, + {.name = "v14 to v15", .func = do_migration_v14_v15}, // the terminator of this array {.name = NULL, .func = NULL} }; @@ -379,13 +501,34 @@ DATABASE_FUNC_MIGRATION_LIST context_migration_action[] = { {.name = NULL, .func = NULL} }; +DATABASE_FUNC_MIGRATION_LIST ml_migration_action[] = { + {.name = "v0 to v1", .func = do_migration_noop}, + {.name = "v1 to v2", .func = do_ml_migration_v1_v2}, + // the terminator of this array + {.name = NULL, .func = NULL} +}; int perform_database_migration(sqlite3 *database, int target_version) { + int user_version = get_database_user_version(database); + + if (!user_version && !db_table_count(database)) + return target_version; + return migrate_database(database, target_version, "metadata", migration_action); } int perform_context_database_migration(sqlite3 *database, int target_version) { + int user_version = get_database_user_version(database); + + if (!user_version && !table_exists_in_database(database, "context")) + return target_version; + return migrate_database(database, target_version, "context", context_migration_action); } + +int perform_ml_database_migration(sqlite3 *database, int target_version) +{ + return migrate_database(database, target_version, "ml", ml_migration_action); +} diff --git a/database/sqlite/sqlite_db_migration.h b/database/sqlite/sqlite_db_migration.h index edaac526980967..e3c1be84f1f1a2 100644 --- a/database/sqlite/sqlite_db_migration.h +++ b/database/sqlite/sqlite_db_migration.h @@ -8,6 +8,7 @@ int perform_database_migration(sqlite3 *database, int target_version); int perform_context_database_migration(sqlite3 *database, int target_version); -int table_exists_in_database(const char *table); +int table_exists_in_database(sqlite3 *database, const char *table); +int perform_ml_database_migration(sqlite3 *database, int target_version); #endif //NETDATA_SQLITE_DB_MIGRATION_H diff --git a/database/sqlite/sqlite_functions.c b/database/sqlite/sqlite_functions.c index d976a3c6e6f66d..f08c6d35010397 100644 --- a/database/sqlite/sqlite_functions.c +++ b/database/sqlite/sqlite_functions.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "sqlite_functions.h" +#include "sqlite3recover.h" #include "sqlite_db_migration.h" -#define DB_METADATA_VERSION 11 +#define DB_METADATA_VERSION 15 const char *database_config[] = { "CREATE TABLE IF NOT EXISTS host(host_id BLOB PRIMARY KEY, hostname TEXT NOT NULL, " @@ -13,67 +14,76 @@ const char *database_config[] = { "memory_mode INT DEFAULT 0, abbrev_timezone TEXT DEFAULT '', utc_offset INT NOT NULL DEFAULT 0," "program_name TEXT NOT NULL DEFAULT 'unknown', program_version TEXT NOT NULL DEFAULT 'unknown', " "entries INT NOT NULL DEFAULT 0," - "health_enabled INT NOT NULL DEFAULT 0);", + "health_enabled INT NOT NULL DEFAULT 0, last_connected INT NOT NULL DEFAULT 0)", "CREATE TABLE IF NOT EXISTS chart(chart_id blob PRIMARY KEY, host_id blob, type text, id text, name text, " "family text, context text, title text, unit text, plugin text, module text, priority int, update_every int, " - "chart_type int, memory_mode int, history_entries);", + "chart_type int, memory_mode int, history_entries)", + "CREATE TABLE IF NOT EXISTS dimension(dim_id blob PRIMARY KEY, chart_id blob, id text, name text, " - "multiplier int, divisor int , algorithm int, options text);", + "multiplier int, divisor int , algorithm int, options text)", + + "CREATE TABLE IF NOT EXISTS metadata_migration(filename text, file_size, date_created int)", + + "CREATE INDEX IF NOT EXISTS ind_d2 on dimension (chart_id)", + + "CREATE INDEX IF NOT EXISTS ind_c3 on chart (host_id)", - "CREATE TABLE IF NOT EXISTS metadata_migration(filename text, file_size, date_created int);", - "CREATE INDEX IF NOT EXISTS ind_d2 on dimension (chart_id);", - "CREATE INDEX IF NOT EXISTS ind_c3 on chart (host_id);", "CREATE TABLE IF NOT EXISTS chart_label(chart_id blob, source_type int, label_key text, " - "label_value text, date_created int, PRIMARY KEY (chart_id, label_key));", - "CREATE TABLE IF NOT EXISTS node_instance (host_id blob PRIMARY KEY, claim_id, node_id, date_created);", + "label_value text, date_created int, PRIMARY KEY (chart_id, label_key))", + + "CREATE TABLE IF NOT EXISTS node_instance (host_id blob PRIMARY KEY, claim_id, node_id, date_created)", + "CREATE TABLE IF NOT EXISTS alert_hash(hash_id blob PRIMARY KEY, date_updated int, alarm text, template text, " "on_key text, class text, component text, type text, os text, hosts text, lookup text, " "every text, units text, calc text, families text, plugin text, module text, charts text, green text, " "red text, warn text, crit text, exec text, to_key text, info text, delay text, options text, " "repeat text, host_labels text, p_db_lookup_dimensions text, p_db_lookup_method text, p_db_lookup_options int, " - "p_db_lookup_after int, p_db_lookup_before int, p_update_every int, source text, chart_labels text);", + "p_db_lookup_after int, p_db_lookup_before int, p_update_every int, source text, chart_labels text, summary text)", "CREATE TABLE IF NOT EXISTS host_info(host_id blob, system_key text NOT NULL, system_value text NOT NULL, " - "date_created INT, PRIMARY KEY(host_id, system_key));", + "date_created INT, PRIMARY KEY(host_id, system_key))", "CREATE TABLE IF NOT EXISTS host_label(host_id blob, source_type int, label_key text NOT NULL, " - "label_value text NOT NULL, date_created INT, PRIMARY KEY (host_id, label_key));", + "label_value text NOT NULL, date_created INT, PRIMARY KEY (host_id, label_key))", "CREATE TRIGGER IF NOT EXISTS ins_host AFTER INSERT ON host BEGIN INSERT INTO node_instance (host_id, date_created)" - " SELECT new.host_id, unixepoch() WHERE new.host_id NOT IN (SELECT host_id FROM node_instance); END;", + " SELECT new.host_id, unixepoch() WHERE new.host_id NOT IN (SELECT host_id FROM node_instance); END", "CREATE TABLE IF NOT EXISTS health_log (health_log_id INTEGER PRIMARY KEY, host_id blob, alarm_id int, " "config_hash_id blob, name text, chart text, family text, recipient text, units text, exec text, " - "chart_context text, last_transition_id blob, chart_name text, UNIQUE (host_id, alarm_id)) ;", + "chart_context text, last_transition_id blob, chart_name text, UNIQUE (host_id, alarm_id))", - "CREATE INDEX IF NOT EXISTS health_log_ind_1 ON health_log (host_id);", + "CREATE INDEX IF NOT EXISTS health_log_ind_1 ON health_log (host_id)", "CREATE TABLE IF NOT EXISTS health_log_detail (health_log_id int, unique_id int, alarm_id int, alarm_event_id int, " "updated_by_id int, updates_id int, when_key int, duration int, non_clear_duration int, " "flags int, exec_run_timestamp int, delay_up_to_timestamp int, " "info text, exec_code int, new_status real, old_status real, delay int, " - "new_value double, old_value double, last_repeat int, transition_id blob, global_id int);", + "new_value double, old_value double, last_repeat int, transition_id blob, global_id int, summary text)", - "CREATE INDEX IF NOT EXISTS health_log_d_ind_1 ON health_log_detail (unique_id);", - "CREATE INDEX IF NOT EXISTS health_log_d_ind_2 ON health_log_detail (global_id);", - "CREATE INDEX IF NOT EXISTS health_log_d_ind_3 ON health_log_detail (transition_id);", - "CREATE INDEX IF NOT EXISTS health_log_d_ind_4 ON health_log_detail (health_log_id);", + "CREATE INDEX IF NOT EXISTS health_log_d_ind_2 ON health_log_detail (global_id)", + "CREATE INDEX IF NOT EXISTS health_log_d_ind_3 ON health_log_detail (transition_id)", + "CREATE INDEX IF NOT EXISTS health_log_d_ind_5 ON health_log_detail (health_log_id, unique_id DESC)", + "CREATE INDEX IF NOT EXISTS health_log_d_ind_6 on health_log_detail (health_log_id, when_key)", + "CREATE INDEX IF NOT EXISTS health_log_d_ind_7 on health_log_detail (alarm_id)", + "CREATE INDEX IF NOT EXISTS health_log_d_ind_8 on health_log_detail (new_status, updated_by_id)", NULL }; const char *database_cleanup[] = { - "DELETE FROM chart WHERE chart_id NOT IN (SELECT chart_id FROM dimension);", - "DELETE FROM host WHERE host_id NOT IN (SELECT host_id FROM chart);", - "DELETE FROM node_instance WHERE host_id NOT IN (SELECT host_id FROM host);", - "DELETE FROM host_info WHERE host_id NOT IN (SELECT host_id FROM host);", - "DELETE FROM host_label WHERE host_id NOT IN (SELECT host_id FROM host);", - "DROP TRIGGER IF EXISTS tr_dim_del;", - "DROP INDEX IF EXISTS ind_d1;", - "DROP INDEX IF EXISTS ind_c1;", - "DROP INDEX IF EXISTS ind_c2;", - "DROP INDEX IF EXISTS alert_hash_index;", + "DELETE FROM host WHERE host_id NOT IN (SELECT host_id FROM chart)", + "DELETE FROM node_instance WHERE host_id NOT IN (SELECT host_id FROM host)", + "DELETE FROM host_info WHERE host_id NOT IN (SELECT host_id FROM host)", + "DELETE FROM host_label WHERE host_id NOT IN (SELECT host_id FROM host)", + "DROP TRIGGER IF EXISTS tr_dim_del", + "DROP INDEX IF EXISTS ind_d1", + "DROP INDEX IF EXISTS ind_c1", + "DROP INDEX IF EXISTS ind_c2", + "DROP INDEX IF EXISTS alert_hash_index", + "DROP INDEX IF EXISTS health_log_d_ind_4", + "DROP INDEX IF EXISTS health_log_d_ind_1", NULL }; @@ -120,23 +130,126 @@ SQLITE_API int sqlite3_step_monitored(sqlite3_stmt *stmt) { return rc; } -int execute_insert(sqlite3_stmt *res) +static bool mark_database_to_recover(sqlite3_stmt *res, sqlite3 *database) { - int rc; - int cnt = 0; - while ((rc = sqlite3_step_monitored(res)) != SQLITE_DONE && ++cnt < SQL_MAX_RETRY && likely(!netdata_exit)) { - if (likely(rc == SQLITE_BUSY || rc == SQLITE_LOCKED)) { - usleep(SQLITE_INSERT_DELAY * USEC_PER_MS); - error_report("Failed to insert/update, rc = %d -- attempt %d", rc, cnt); + + if (!res && !database) + return false; + + if (!database) + database = sqlite3_db_handle(res); + + if (db_meta == database) { + char recover_file[FILENAME_MAX + 1]; + snprintfz(recover_file, FILENAME_MAX, "%s/.netdata-meta.db.recover", netdata_configured_cache_dir); + int fd = open(recover_file, O_WRONLY | O_CREAT | O_TRUNC, 444); + if (fd >= 0) { + close(fd); + return true; } - else { - error_report("SQLite error %d", rc); - break; + } + return false; +} + +static void recover_database(const char *sqlite_database, const char *new_sqlite_database) +{ + sqlite3 *database; + int rc = sqlite3_open(sqlite_database, &database); + if (rc != SQLITE_OK) + return; + + netdata_log_info("Recover %s", sqlite_database); + netdata_log_info(" to %s", new_sqlite_database); + + // This will remove the -shm and -wal files when we close the database + (void) db_execute(database, "select count(*) from sqlite_master limit 0"); + + sqlite3_recover *recover = sqlite3_recover_init(database, "main", new_sqlite_database); + if (recover) { + + rc = sqlite3_recover_run(recover); + + if (rc == SQLITE_OK) + netdata_log_info("Recover complete"); + else + netdata_log_info("Recover encountered an error but the database may be usable"); + + rc = sqlite3_recover_finish(recover); + + (void) sqlite3_close(database); + + if (rc == SQLITE_OK) { + rc = rename(new_sqlite_database, sqlite_database); + if (rc == 0) { + netdata_log_info("Renamed %s", new_sqlite_database); + netdata_log_info(" to %s", sqlite_database); + } } } + else + (void) sqlite3_close(database); +} + +int execute_insert(sqlite3_stmt *res) +{ + int rc; + rc = sqlite3_step_monitored(res); + if (rc == SQLITE_CORRUPT) { + (void)mark_database_to_recover(res, NULL); + error_report("SQLite error %d", rc); + } return rc; } +int configure_sqlite_database(sqlite3 *database, int target_version) +{ + char buf[1024 + 1] = ""; + const char *list[2] = { buf, NULL }; + + // https://www.sqlite.org/pragma.html#pragma_auto_vacuum + // PRAGMA schema.auto_vacuum = 0 | NONE | 1 | FULL | 2 | INCREMENTAL; + snprintfz(buf, sizeof(buf) - 1, "PRAGMA auto_vacuum=%s", config_get(CONFIG_SECTION_SQLITE, "auto vacuum", "INCREMENTAL")); + if (init_database_batch(database, list)) + return 1; + + // https://www.sqlite.org/pragma.html#pragma_synchronous + // PRAGMA schema.synchronous = 0 | OFF | 1 | NORMAL | 2 | FULL | 3 | EXTRA; + snprintfz(buf, sizeof(buf) - 1, "PRAGMA synchronous=%s", config_get(CONFIG_SECTION_SQLITE, "synchronous", "NORMAL")); + if (init_database_batch(database, list)) + return 1; + + // https://www.sqlite.org/pragma.html#pragma_journal_mode + // PRAGMA schema.journal_mode = DELETE | TRUNCATE | PERSIST | MEMORY | WAL | OFF + snprintfz(buf, sizeof(buf) - 1, "PRAGMA journal_mode=%s", config_get(CONFIG_SECTION_SQLITE, "journal mode", "WAL")); + if (init_database_batch(database, list)) + return 1; + + // https://www.sqlite.org/pragma.html#pragma_temp_store + // PRAGMA temp_store = 0 | DEFAULT | 1 | FILE | 2 | MEMORY; + snprintfz(buf, sizeof(buf) - 1, "PRAGMA temp_store=%s", config_get(CONFIG_SECTION_SQLITE, "temp store", "MEMORY")); + if (init_database_batch(database, list)) + return 1; + + // https://www.sqlite.org/pragma.html#pragma_journal_size_limit + // PRAGMA schema.journal_size_limit = N ; + snprintfz(buf, sizeof(buf) - 1, "PRAGMA journal_size_limit=%lld", config_get_number(CONFIG_SECTION_SQLITE, "journal size limit", 16777216)); + if (init_database_batch(database, list)) + return 1; + + // https://www.sqlite.org/pragma.html#pragma_cache_size + // PRAGMA schema.cache_size = pages; + // PRAGMA schema.cache_size = -kibibytes; + snprintfz(buf, sizeof(buf) - 1, "PRAGMA cache_size=%lld", config_get_number(CONFIG_SECTION_SQLITE, "cache size", -2000)); + if (init_database_batch(database, list)) + return 1; + + snprintfz(buf, sizeof(buf) - 1, "PRAGMA user_version=%d", target_version); + if (init_database_batch(database, list)) + return 1; + + return 0; +} + #define MAX_OPEN_STATEMENTS (512) static void add_stmt_to_list(sqlite3_stmt *res) @@ -202,134 +315,21 @@ int prepare_statement(sqlite3 *database, const char *query, sqlite3_stmt **state return rc; } -static int check_table_integrity_cb(void *data, int argc, char **argv, char **column) -{ - int *status = data; - UNUSED(argc); - UNUSED(column); - netdata_log_info("---> %s", argv[0]); - *status = (strcmp(argv[0], "ok") != 0); - return 0; -} - - -static int check_table_integrity(char *table) -{ - int status = 0; - char *err_msg = NULL; - char wstr[255]; - - if (table) { - netdata_log_info("Checking table %s", table); - snprintfz(wstr, 254, "PRAGMA integrity_check(%s);", table); - } - else { - netdata_log_info("Checking entire database"); - strcpy(wstr,"PRAGMA integrity_check;"); - } - - int rc = sqlite3_exec_monitored(db_meta, wstr, check_table_integrity_cb, (void *) &status, &err_msg); - if (rc != SQLITE_OK) { - error_report("SQLite error during database integrity check for %s, rc = %d (%s)", - table ? table : "the entire database", rc, err_msg); - sqlite3_free(err_msg); - } - - return status; -} - -const char *rebuild_chart_commands[] = { - "BEGIN TRANSACTION; ", - "DROP INDEX IF EXISTS ind_c1;" , - "DROP TABLE IF EXISTS chart_backup; " , - "CREATE TABLE chart_backup AS SELECT * FROM chart; " , - "DROP TABLE chart; ", - "CREATE TABLE IF NOT EXISTS chart(chart_id blob PRIMARY KEY, host_id blob, type text, id text, " - "name text, family text, context text, title text, unit text, plugin text, " - "module text, priority int, update_every int, chart_type int, memory_mode int, history_entries); ", - "INSERT INTO chart SELECT DISTINCT * FROM chart_backup; ", - "DROP TABLE chart_backup; " , - "CREATE INDEX IF NOT EXISTS ind_c1 on chart (host_id, id, type, name);", - "COMMIT TRANSACTION;", - NULL -}; - -static void rebuild_chart() -{ - int rc; - char *err_msg = NULL; - netdata_log_info("Rebuilding chart table"); - for (int i = 0; rebuild_chart_commands[i]; i++) { - netdata_log_info("Executing %s", rebuild_chart_commands[i]); - rc = sqlite3_exec_monitored(db_meta, rebuild_chart_commands[i], 0, 0, &err_msg); - if (rc != SQLITE_OK) { - error_report("SQLite error during database setup, rc = %d (%s)", rc, err_msg); - error_report("SQLite failed statement %s", rebuild_chart_commands[i]); - sqlite3_free(err_msg); - } - } -} - -const char *rebuild_dimension_commands[] = { - "BEGIN TRANSACTION; ", - "DROP INDEX IF EXISTS ind_d1;" , - "DROP TABLE IF EXISTS dimension_backup; " , - "CREATE TABLE dimension_backup AS SELECT * FROM dimension; " , - "DROP TABLE dimension; " , - "CREATE TABLE IF NOT EXISTS dimension(dim_id blob PRIMARY KEY, chart_id blob, id text, name text, " - "multiplier int, divisor int , algorithm int, options text);" , - "INSERT INTO dimension SELECT distinct * FROM dimension_backup; " , - "DROP TABLE dimension_backup; " , - "CREATE INDEX IF NOT EXISTS ind_d1 on dimension (chart_id, id, name);", - "COMMIT TRANSACTION;", - NULL -}; - -void rebuild_dimension() -{ - int rc; - char *err_msg = NULL; - - netdata_log_info("Rebuilding dimension table"); - for (int i = 0; rebuild_dimension_commands[i]; i++) { - netdata_log_info("Executing %s", rebuild_dimension_commands[i]); - rc = sqlite3_exec_monitored(db_meta, rebuild_dimension_commands[i], 0, 0, &err_msg); - if (rc != SQLITE_OK) { - error_report("SQLite error during database setup, rc = %d (%s)", rc, err_msg); - error_report("SQLite failed statement %s", rebuild_dimension_commands[i]); - sqlite3_free(err_msg); - } - } -} - -static int attempt_database_fix() -{ - netdata_log_info("Closing database and attempting to fix it"); - int rc = sqlite3_close(db_meta); - if (rc != SQLITE_OK) - error_report("Failed to close database, rc = %d", rc); - netdata_log_info("Attempting to fix database"); - db_meta = NULL; - return sql_init_database(DB_CHECK_FIX_DB | DB_CHECK_CONT, 0); -} - -int init_database_batch(sqlite3 *database, int rebuild, int init_type, const char *batch[]) +int init_database_batch(sqlite3 *database, const char *batch[]) { int rc; char *err_msg = NULL; for (int i = 0; batch[i]; i++) { - netdata_log_debug(D_METADATALOG, "Executing %s", batch[i]); rc = sqlite3_exec_monitored(database, batch[i], 0, 0, &err_msg); if (rc != SQLITE_OK) { - error_report("SQLite error during database %s, rc = %d (%s)", init_type ? "cleanup" : "setup", rc, err_msg); + error_report("SQLite error during database initialization, rc = %d (%s)", rc, err_msg); error_report("SQLite failed statement %s", batch[i]); + analytics_set_data_str(&analytics_data.netdata_fail_reason, sqlite3_errstr(sqlite3_extended_errcode(database))); sqlite3_free(err_msg); if (SQLITE_CORRUPT == rc) { - if (!rebuild) - return attempt_database_fix(); - rc = check_table_integrity(NULL); - if (rc) - error_report("Databse integrity errors reported"); + if (mark_database_to_recover(NULL, database)) + error_report("Database is corrupted will attempt to fix"); + return SQLITE_CORRUPT; } return 1; } @@ -389,64 +389,47 @@ int sql_init_database(db_check_action_type_t rebuild, int memory) char sqlite_database[FILENAME_MAX + 1]; int rc; - if (likely(!memory)) + if (likely(!memory)) { + snprintfz(sqlite_database, sizeof(sqlite_database) - 1, "%s/.netdata-meta.db.recover", netdata_configured_cache_dir); + rc = unlink(sqlite_database); snprintfz(sqlite_database, FILENAME_MAX, "%s/netdata-meta.db", netdata_configured_cache_dir); + + if (rc == 0 || (rebuild & DB_CHECK_RECOVER)) { + char new_sqlite_database[FILENAME_MAX + 1]; + snprintfz(new_sqlite_database, sizeof(new_sqlite_database) - 1, "%s/netdata-meta-recover.db", netdata_configured_cache_dir); + recover_database(sqlite_database, new_sqlite_database); + if (rebuild & DB_CHECK_RECOVER) + return 0; + } + } else strcpy(sqlite_database, ":memory:"); rc = sqlite3_open(sqlite_database, &db_meta); if (rc != SQLITE_OK) { error_report("Failed to initialize database at %s, due to \"%s\"", sqlite_database, sqlite3_errstr(rc)); + analytics_set_data_str(&analytics_data.netdata_fail_reason, sqlite3_errstr(sqlite3_extended_errcode(db_meta))); sqlite3_close(db_meta); db_meta = NULL; return 1; } - if (rebuild & (DB_CHECK_INTEGRITY | DB_CHECK_FIX_DB)) { - int errors_detected = 0; - if (!(rebuild & DB_CHECK_CONT)) - netdata_log_info("Running database check on %s", sqlite_database); - - if (check_table_integrity("chart")) { - errors_detected++; - if (rebuild & DB_CHECK_FIX_DB) - rebuild_chart(); - else - error_report("Errors reported -- run with -W sqlite-fix"); - } - - if (check_table_integrity("dimension")) { - errors_detected++; - if (rebuild & DB_CHECK_FIX_DB) - rebuild_dimension(); - else - error_report("Errors reported -- run with -W sqlite-fix"); - } - - if (!errors_detected) { - if (check_table_integrity(NULL)) - error_report("Errors reported"); - } - } - if (rebuild & DB_CHECK_RECLAIM_SPACE) { - if (!(rebuild & DB_CHECK_CONT)) - netdata_log_info("Reclaiming space of %s", sqlite_database); - rc = sqlite3_exec_monitored(db_meta, "VACUUM;", 0, 0, &err_msg); + netdata_log_info("Reclaiming space of %s", sqlite_database); + rc = sqlite3_exec_monitored(db_meta, "VACUUM", 0, 0, &err_msg); if (rc != SQLITE_OK) { error_report("Failed to execute VACUUM rc = %d (%s)", rc, err_msg); sqlite3_free(err_msg); } - } - - if (rebuild && !(rebuild & DB_CHECK_CONT)) + else { + (void) db_execute(db_meta, "select count(*) from sqlite_master limit 0"); + (void) sqlite3_close(db_meta); + } return 1; + } netdata_log_info("SQLite database %s initialization", sqlite_database); - char buf[1024 + 1] = ""; - const char *list[2] = { buf, NULL }; - rc = sqlite3_create_function(db_meta, "u2h", 1, SQLITE_ANY | SQLITE_DETERMINISTIC, 0, sqlite_uuid_parse, 0, 0); if (unlikely(rc != SQLITE_OK)) error_report("Failed to register internal u2h function"); @@ -464,44 +447,13 @@ int sql_init_database(db_check_action_type_t rebuild, int memory) if (likely(!memory)) target_version = perform_database_migration(db_meta, DB_METADATA_VERSION); - // https://www.sqlite.org/pragma.html#pragma_auto_vacuum - // PRAGMA schema.auto_vacuum = 0 | NONE | 1 | FULL | 2 | INCREMENTAL; - snprintfz(buf, 1024, "PRAGMA auto_vacuum=%s;", config_get(CONFIG_SECTION_SQLITE, "auto vacuum", "INCREMENTAL")); - if(init_database_batch(db_meta, rebuild, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_synchronous - // PRAGMA schema.synchronous = 0 | OFF | 1 | NORMAL | 2 | FULL | 3 | EXTRA; - snprintfz(buf, 1024, "PRAGMA synchronous=%s;", config_get(CONFIG_SECTION_SQLITE, "synchronous", "NORMAL")); - if(init_database_batch(db_meta, rebuild, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_journal_mode - // PRAGMA schema.journal_mode = DELETE | TRUNCATE | PERSIST | MEMORY | WAL | OFF - snprintfz(buf, 1024, "PRAGMA journal_mode=%s;", config_get(CONFIG_SECTION_SQLITE, "journal mode", "WAL")); - if(init_database_batch(db_meta, rebuild, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_temp_store - // PRAGMA temp_store = 0 | DEFAULT | 1 | FILE | 2 | MEMORY; - snprintfz(buf, 1024, "PRAGMA temp_store=%s;", config_get(CONFIG_SECTION_SQLITE, "temp store", "MEMORY")); - if(init_database_batch(db_meta, rebuild, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_journal_size_limit - // PRAGMA schema.journal_size_limit = N ; - snprintfz(buf, 1024, "PRAGMA journal_size_limit=%lld;", config_get_number(CONFIG_SECTION_SQLITE, "journal size limit", 16777216)); - if(init_database_batch(db_meta, rebuild, 0, list)) return 1; - - // https://www.sqlite.org/pragma.html#pragma_cache_size - // PRAGMA schema.cache_size = pages; - // PRAGMA schema.cache_size = -kibibytes; - snprintfz(buf, 1024, "PRAGMA cache_size=%lld;", config_get_number(CONFIG_SECTION_SQLITE, "cache size", -2000)); - if(init_database_batch(db_meta, rebuild, 0, list)) return 1; - - snprintfz(buf, 1024, "PRAGMA user_version=%d;", target_version); - if(init_database_batch(db_meta, rebuild, 0, list)) return 1; + if (configure_sqlite_database(db_meta, target_version)) + return 1; - if (init_database_batch(db_meta, rebuild, 0, &database_config[0])) + if (init_database_batch(db_meta, &database_config[0])) return 1; - if (init_database_batch(db_meta, rebuild, 0, &database_cleanup[0])) + if (init_database_batch(db_meta, &database_cleanup[0])) return 1; netdata_log_info("SQLite database initialization completed"); @@ -525,6 +477,9 @@ void sql_close_database(void) add_stmt_to_list(NULL); + (void) db_execute(db_meta, "PRAGMA analysis_limit=1000"); + (void) db_execute(db_meta, "PRAGMA optimize"); + rc = sqlite3_close_v2(db_meta); if (unlikely(rc != SQLITE_OK)) error_report("Error %d while closing the SQLite database, %s", rc, sqlite3_errstr(rc)); @@ -543,7 +498,7 @@ int exec_statement_with_uuid(const char *sql, uuid_t *uuid) rc = sqlite3_bind_blob(res, 1, uuid, sizeof(*uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host parameter to %s, rc = %d", sql, rc); + error_report("Failed to bind UUID parameter to %s, rc = %d", sql, rc); goto skip; } @@ -565,22 +520,25 @@ int db_execute(sqlite3 *db, const char *cmd) { int rc; int cnt = 0; + while (cnt < SQL_MAX_RETRY) { char *err_msg; rc = sqlite3_exec_monitored(db, cmd, 0, 0, &err_msg); - if (rc != SQLITE_OK) { - error_report("Failed to execute '%s', rc = %d (%s) -- attempt %d", cmd, rc, err_msg, cnt); - sqlite3_free(err_msg); - if (likely(rc == SQLITE_BUSY || rc == SQLITE_LOCKED)) { - usleep(SQLITE_INSERT_DELAY * USEC_PER_MS); - } - else - break; - } - else + if (likely(rc == SQLITE_OK)) break; ++cnt; + error_report("Failed to execute '%s', rc = %d (%s) -- attempt %d", cmd, rc, err_msg, cnt); + sqlite3_free(err_msg); + + if (likely(rc == SQLITE_BUSY || rc == SQLITE_LOCKED)) { + usleep(SQLITE_INSERT_DELAY * USEC_PER_MS); + continue; + } + + if (rc == SQLITE_CORRUPT) + mark_database_to_recover(NULL, db); + break; } return (rc != SQLITE_OK); } @@ -596,7 +554,7 @@ static inline void set_host_node_id(RRDHOST *host, uuid_t *node_id) return; } - struct aclk_sync_host_config *wc = host->aclk_sync_host_config; + struct aclk_sync_cfg_t *wc = host->aclk_config; if (unlikely(!host->node_id)) { uuid_t *t = mallocz(sizeof(*host->node_id)); @@ -613,7 +571,7 @@ static inline void set_host_node_id(RRDHOST *host, uuid_t *node_id) uuid_unparse_lower(*node_id, wc->node_id); } -#define SQL_UPDATE_NODE_ID "update node_instance set node_id = @node_id where host_id = @host_id;" +#define SQL_UPDATE_NODE_ID "UPDATE node_instance SET node_id = @node_id WHERE host_id = @host_id" int update_node_id(uuid_t *host_id, uuid_t *node_id) { @@ -665,45 +623,7 @@ int update_node_id(uuid_t *host_id, uuid_t *node_id) return rc - 1; } -#define SQL_SELECT_HOST_BY_NODE_ID "select host_id from node_instance where node_id = @node_id;" - -int get_host_id(uuid_t *node_id, uuid_t *host_id) -{ - static __thread sqlite3_stmt *res = NULL; - int rc; - - if (unlikely(!db_meta)) { - if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) - error_report("Database has not been initialized"); - return 1; - } - - if (unlikely(!res)) { - rc = prepare_statement(db_meta, SQL_SELECT_HOST_BY_NODE_ID, &res); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement to select node instance information for a node"); - return 1; - } - } - - rc = sqlite3_bind_blob(res, 1, node_id, sizeof(*node_id), SQLITE_STATIC); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id parameter to select node instance information"); - goto failed; - } - - rc = sqlite3_step_monitored(res); - if (likely(rc == SQLITE_ROW && host_id)) - uuid_copy(*host_id, *((uuid_t *) sqlite3_column_blob(res, 0))); - -failed: - if (unlikely(sqlite3_reset(res) != SQLITE_OK)) - error_report("Failed to reset the prepared statement when selecting node instance information"); - - return (rc == SQLITE_ROW) ? 0 : -1; -} - -#define SQL_SELECT_NODE_ID "SELECT node_id FROM node_instance WHERE host_id = @host_id AND node_id IS NOT NULL;" +#define SQL_SELECT_NODE_ID "SELECT node_id FROM node_instance WHERE host_id = @host_id AND node_id IS NOT NULL" int get_node_id(uuid_t *host_id, uuid_t *node_id) { @@ -739,8 +659,9 @@ int get_node_id(uuid_t *host_id, uuid_t *node_id) return (rc == SQLITE_ROW) ? 0 : -1; } -#define SQL_INVALIDATE_NODE_INSTANCES "UPDATE node_instance SET node_id = NULL WHERE EXISTS " \ - "(SELECT host_id FROM node_instance WHERE host_id = @host_id AND (@claim_id IS NULL OR claim_id <> @claim_id));" +#define SQL_INVALIDATE_NODE_INSTANCES \ + "UPDATE node_instance SET node_id = NULL WHERE EXISTS " \ + "(SELECT host_id FROM node_instance WHERE host_id = @host_id AND (@claim_id IS NULL OR claim_id <> @claim_id))" void invalidate_node_instances(uuid_t *host_id, uuid_t *claim_id) { @@ -784,8 +705,9 @@ void invalidate_node_instances(uuid_t *host_id, uuid_t *claim_id) error_report("Failed to finalize the prepared statement when invalidating node instance information"); } -#define SQL_GET_NODE_INSTANCE_LIST "SELECT ni.node_id, ni.host_id, h.hostname " \ - "FROM node_instance ni, host h WHERE ni.host_id = h.host_id AND h.hops >=0;" +#define SQL_GET_NODE_INSTANCE_LIST \ + "SELECT ni.node_id, ni.host_id, h.hostname " \ + "FROM node_instance ni, host h WHERE ni.host_id = h.host_id AND h.hops >=0" struct node_instance_list *get_node_list(void) { @@ -826,6 +748,8 @@ struct node_instance_list *get_node_list(void) uuid_t *host_id = (uuid_t *)sqlite3_column_blob(res, 1); uuid_unparse_lower(*host_id, host_guid); RRDHOST *host = rrdhost_find_by_guid(host_guid); + if (!host) + continue; if (rrdhost_flag_check(host, RRDHOST_FLAG_PENDING_CONTEXT_LOAD)) { netdata_log_info("ACLK: 'host:%s' skipping get node list because context is initializing", rrdhost_hostname(host)); continue; @@ -852,11 +776,11 @@ struct node_instance_list *get_node_list(void) return node_list; }; -#define SQL_GET_HOST_NODE_ID "select node_id from node_instance where host_id = @host_id;" +#define SQL_GET_HOST_NODE_ID "SELECT node_id FROM node_instance WHERE host_id = @host_id" void sql_load_node_id(RRDHOST *host) { - static __thread sqlite3_stmt *res = NULL; + sqlite3_stmt *res = NULL; int rc; if (unlikely(!db_meta)) { @@ -865,13 +789,11 @@ void sql_load_node_id(RRDHOST *host) return; } - if (unlikely(!res)) { - rc = prepare_statement(db_meta, SQL_GET_HOST_NODE_ID, &res); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement to fetch node id"); - return; - }; - } + rc = sqlite3_prepare_v2(db_meta, SQL_GET_HOST_NODE_ID, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to fetch node id"); + return; + }; rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { @@ -888,12 +810,12 @@ void sql_load_node_id(RRDHOST *host) } failed: - if (unlikely(sqlite3_reset(res) != SQLITE_OK)) - error_report("Failed to reset the prepared statement when loading node instance information"); + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("Failed to finalize the prepared statement when loading node instance information"); }; -#define SELECT_HOST_INFO "SELECT system_key, system_value FROM host_info WHERE host_id = @host_id;" +#define SELECT_HOST_INFO "SELECT system_key, system_value FROM host_info WHERE host_id = @host_id" void sql_build_host_system_info(uuid_t *host_id, struct rrdhost_system_info *system_info) { @@ -924,13 +846,13 @@ void sql_build_host_system_info(uuid_t *host_id, struct rrdhost_system_info *sys } #define SELECT_HOST_LABELS "SELECT label_key, label_value, source_type FROM host_label WHERE host_id = @host_id " \ - "AND label_key IS NOT NULL AND label_value IS NOT NULL;" + "AND label_key IS NOT NULL AND label_value IS NOT NULL" -DICTIONARY *sql_load_host_labels(uuid_t *host_id) +RRDLABELS *sql_load_host_labels(uuid_t *host_id) { int rc; - DICTIONARY *labels = NULL; + RRDLABELS *labels = NULL; sqlite3_stmt *res = NULL; rc = sqlite3_prepare_v2(db_meta, SELECT_HOST_LABELS, -1, &res, 0); @@ -948,11 +870,7 @@ DICTIONARY *sql_load_host_labels(uuid_t *host_id) labels = rrdlabels_create(); while (sqlite3_step_monitored(res) == SQLITE_ROW) { - rrdlabels_add( - labels, - (const char *)sqlite3_column_text(res, 0), - (const char *)sqlite3_column_text(res, 1), - sqlite3_column_int(res, 2)); + rrdlabels_add(labels, (const char *)sqlite3_column_text(res, 0), (const char *)sqlite3_column_text(res, 1), sqlite3_column_int(res, 2)); } skip: @@ -984,7 +902,7 @@ int sql_metadata_cache_stats(int op) return count; } -#define SQL_DROP_TABLE "DROP table %s;" +#define SQL_DROP_TABLE "DROP table %s" void sql_drop_table(const char *table) { @@ -992,7 +910,7 @@ void sql_drop_table(const char *table) return; char wstr[255]; - snprintfz(wstr, 254, SQL_DROP_TABLE, table); + snprintfz(wstr, sizeof(wstr) - 1, SQL_DROP_TABLE, table); int rc = sqlite3_exec_monitored(db_meta, wstr, 0, 0, NULL); if (rc != SQLITE_OK) { diff --git a/database/sqlite/sqlite_functions.h b/database/sqlite/sqlite_functions.h index 407ed1eff7daf5..9cd1f7ad473f27 100644 --- a/database/sqlite/sqlite_functions.h +++ b/database/sqlite/sqlite_functions.h @@ -6,6 +6,8 @@ #include "daemon/common.h" #include "sqlite3.h" +void analytics_set_data_str(char **name, const char *value); + // return a node list struct node_instance_list { uuid_t node_id; @@ -17,11 +19,10 @@ struct node_instance_list { }; typedef enum db_check_action_type { - DB_CHECK_NONE = 0x0000, - DB_CHECK_INTEGRITY = 0x0001, - DB_CHECK_FIX_DB = 0x0002, - DB_CHECK_RECLAIM_SPACE = 0x0004, - DB_CHECK_CONT = 0x00008 + DB_CHECK_NONE = (1 << 0), + DB_CHECK_RECLAIM_SPACE = (1 << 1), + DB_CHECK_CONT = (1 << 2), + DB_CHECK_RECOVER = (1 << 3), } db_check_action_type_t; #define SQL_MAX_RETRY (100) @@ -46,9 +47,10 @@ SQLITE_API int sqlite3_exec_monitored( ); // Initialization and shutdown -int init_database_batch(sqlite3 *database, int rebuild, int init_type, const char *batch[]); +int init_database_batch(sqlite3 *database, const char *batch[]); int sql_init_database(db_check_action_type_t rebuild, int memory); void sql_close_database(void); +int configure_sqlite_database(sqlite3 *database, int target_version); // Helpers int bind_text_null(sqlite3_stmt *res, int position, const char *text, bool can_be_null); @@ -60,14 +62,12 @@ void initialize_thread_key_pool(void); // Look up functions int get_node_id(uuid_t *host_id, uuid_t *node_id); -int get_host_id(uuid_t *node_id, uuid_t *host_id); struct node_instance_list *get_node_list(void); void sql_load_node_id(RRDHOST *host); -char *get_hostname_by_node_id(char *node_id); // Help build archived hosts in memory when agent starts void sql_build_host_system_info(uuid_t *host_id, struct rrdhost_system_info *system_info); -DICTIONARY *sql_load_host_labels(uuid_t *host_id); +RRDLABELS *sql_load_host_labels(uuid_t *host_id); // TODO: move to metadata int update_node_id(uuid_t *host_id, uuid_t *node_id); diff --git a/database/sqlite/sqlite_health.c b/database/sqlite/sqlite_health.c index 9c103f09839143..27f37596bb11d2 100644 --- a/database/sqlite/sqlite_health.c +++ b/database/sqlite/sqlite_health.c @@ -5,13 +5,26 @@ #include "sqlite_db_migration.h" #define MAX_HEALTH_SQL_SIZE 2048 -#define sqlite3_bind_string_or_null(res,key,param) ((key) ? sqlite3_bind_text(res, param, string2str(key), -1, SQLITE_STATIC) : sqlite3_bind_null(res, param)) +#define SQLITE3_BIND_STRING_OR_NULL(res, key, param) \ + ((key) ? sqlite3_bind_text(res, param, string2str(key), -1, SQLITE_STATIC) : sqlite3_bind_null(res, param)) + +#define SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, param) \ + ({ \ + int _param = (param); \ + sqlite3_column_type((res), (_param)) != SQLITE_NULL ? \ + string_strdupz((char *)sqlite3_column_text((res), (_param))) : \ + NULL; \ + }) /* Health related SQL queries Updates an entry in the table */ -#define SQL_UPDATE_HEALTH_LOG "UPDATE health_log_detail set updated_by_id = ?, flags = ?, exec_run_timestamp = ?, exec_code = ? where unique_id = ? AND alarm_id = ? and transition_id = ?;" -void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae) { +#define SQL_UPDATE_HEALTH_LOG \ + "UPDATE health_log_detail SET updated_by_id = @updated_by, flags = @flags, exec_run_timestamp = @exec_time, " \ + "exec_code = @exec_code WHERE unique_id = @unique_id AND alarm_id = @alarm_id AND transition_id = @transaction" + +static void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae) +{ sqlite3_stmt *res = NULL; int rc; @@ -82,17 +95,23 @@ void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae) { /* Health related SQL queries Inserts an entry in the table */ -#define SQL_INSERT_HEALTH_LOG "INSERT INTO health_log (host_id, alarm_id, " \ - "config_hash_id, name, chart, family, exec, recipient, units, chart_context, last_transition_id, chart_name) " \ - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?) " \ - "ON CONFLICT (host_id, alarm_id) DO UPDATE SET last_transition_id = excluded.last_transition_id, " \ - "chart_name = excluded.chart_name RETURNING health_log_id; " - -#define SQL_INSERT_HEALTH_LOG_DETAIL "INSERT INTO health_log_detail (health_log_id, unique_id, alarm_id, alarm_event_id, " \ - "updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, " \ - "info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, transition_id, global_id) " \ - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,@global_id); " -void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) { + +#define SQL_INSERT_HEALTH_LOG \ + "INSERT INTO health_log (host_id, alarm_id, " \ + "config_hash_id, name, chart, exec, recipient, units, chart_context, last_transition_id, chart_name) " \ + "VALUES (@host_id,@alarm_id, @config_hash_id,@name,@chart,@exec,@recipient,@units,@chart_context," \ + "@last_transition_id,@chart_name) ON CONFLICT (host_id, alarm_id) DO UPDATE " \ + "SET last_transition_id = excluded.last_transition_id, chart_name = excluded.chart_name RETURNING health_log_id" + +#define SQL_INSERT_HEALTH_LOG_DETAIL \ + "INSERT INTO health_log_detail (health_log_id, unique_id, alarm_id, alarm_event_id, " \ + "updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, " \ + "info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, transition_id, global_id, summary) " \ + "VALUES (@health_log_id,@unique_id,@alarm_id,@alarm_event_id,@updated_by_id,@updates_id,@when_key,@duration," \ + "@non_clear_duration,@flags,@exec_run_timestamp,@delay_up_to_timestamp, @info,@exec_code,@new_status,@old_status," \ + "@delay,@new_value,@old_value,@last_repeat,@transition_id,@global_id,@summary)" + +static void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) { sqlite3_stmt *res = NULL; int rc; uint64_t health_log_id = 0; @@ -127,55 +146,49 @@ void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) { goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->name, 4); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->name, 4); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind name parameter for SQL_INSERT_HEALTH_LOG"); goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->chart, 5); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->chart, 5); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind chart parameter for SQL_INSERT_HEALTH_LOG"); goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->family, 6); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind family parameter for SQL_INSERT_HEALTH_LOG"); - goto failed; - } - - rc = sqlite3_bind_string_or_null(res, ae->exec, 7); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->exec, 6); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind exec parameter for SQL_INSERT_HEALTH_LOG"); goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->recipient, 8); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->recipient, 7); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind recipient parameter for SQL_INSERT_HEALTH_LOG"); goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->units, 9); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->units, 8); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id parameter to store node instance information"); goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->chart_context, 10); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->chart_context, 9); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind chart_context parameter for SQL_INSERT_HEALTH_LOG"); goto failed; } - rc = sqlite3_bind_blob(res, 11, &ae->transition_id, sizeof(ae->transition_id), SQLITE_STATIC); + rc = sqlite3_bind_blob(res, 10, &ae->transition_id, sizeof(ae->transition_id), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind transition_id parameter for SQL_INSERT_HEALTH_LOG"); goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->chart_name, 12); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->chart_name, 11); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind chart_name parameter for SQL_INSERT_HEALTH_LOG"); goto failed; @@ -271,7 +284,7 @@ void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) { goto failed; } - rc = sqlite3_bind_string_or_null(res, ae->info, 13); + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->info, 13); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind info parameter for SQL_INSERT_HEALTH_LOG_DETAIL"); goto failed; @@ -331,6 +344,12 @@ void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) { goto failed; } + rc = SQLITE3_BIND_STRING_OR_NULL(res, ae->summary, 23); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind summary parameter for SQL_INSERT_HEALTH_LOG_DETAIL"); + goto failed; + } + rc = execute_insert(res); if (unlikely(rc != SQLITE_DONE)) { error_report("HEALTH [%s]: Failed to execute SQL_INSERT_HEALTH_LOG_DETAIL, rc = %d", rrdhost_hostname(host), rc); @@ -353,7 +372,7 @@ void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) sql_health_alarm_log_insert(host, ae); #ifdef ENABLE_ACLK if (netdata_cloud_enabled) { - sql_queue_alarm_to_aclk(host, ae, 0); + sql_queue_alarm_to_aclk(host, ae, false); } #endif } @@ -362,46 +381,70 @@ void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) /* Health related SQL queries Get a count of rows from health log table */ -#define SQL_COUNT_HEALTH_LOG_DETAIL "SELECT count(1) FROM health_log_detail hld, health_log hl where hl.host_id = @host_id and hl.health_log_id = hld.health_log_id;" -void sql_health_alarm_log_count(RRDHOST *host) { +#define SQL_COUNT_HEALTH_LOG_DETAIL "SELECT count(1) FROM health_log_detail hld, health_log hl " \ + "where hl.host_id = @host_id and hl.health_log_id = hld.health_log_id" + +static int sql_health_alarm_log_count(RRDHOST *host) { sqlite3_stmt *res = NULL; int rc; if (unlikely(!db_meta)) { if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) error_report("Database has not been initialized"); - return; + return -1; } + int entries_in_db = -1; + rc = sqlite3_prepare_v2(db_meta, SQL_COUNT_HEALTH_LOG_DETAIL, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to prepare statement to count health log entries from db"); - return; + goto done; } rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id for SQL_COUNT_HEALTH_LOG."); - sqlite3_finalize(res); - return; + goto done; } rc = sqlite3_step_monitored(res); if (likely(rc == SQLITE_ROW)) - host->health.health_log_entries_written = (size_t) sqlite3_column_int64(res, 0); + entries_in_db = (int) sqlite3_column_int64(res, 0); +done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize the prepared statement to count health log entries from db"); - netdata_log_info("HEALTH [%s]: Table health_log_detail contains %lu entries.", rrdhost_hostname(host), (unsigned long int) host->health.health_log_entries_written); + return entries_in_db; } -/* Health related SQL queries - Cleans up the health_log_detail table on a non-claimed host -*/ -#define SQL_CLEANUP_HEALTH_LOG_DETAIL_NOT_CLAIMED "DELETE FROM health_log_detail WHERE health_log_id IN (SELECT health_log_id FROM health_log WHERE host_id = ?1) AND when_key + ?2 < unixepoch() AND updated_by_id <> 0 AND transition_id NOT IN (SELECT last_transition_id FROM health_log hl WHERE hl.host_id = ?3);" -void sql_health_alarm_log_cleanup_not_claimed(RRDHOST *host) { +/* + * + * Health related SQL queries + * Cleans up the health_log_detail table on a non-claimed or claimed host + * + */ + +#define SQL_CLEANUP_HEALTH_LOG_DETAIL_NOT_CLAIMED \ + "DELETE FROM health_log_detail WHERE health_log_id IN " \ + "(SELECT health_log_id FROM health_log WHERE host_id = @host_id) AND when_key < UNIXEPOCH() - @history " \ + "AND updated_by_id <> 0 AND transition_id NOT IN " \ + "(SELECT last_transition_id FROM health_log hl WHERE hl.host_id = @host_id)" + +#define SQL_CLEANUP_HEALTH_LOG_DETAIL_CLAIMED(guid) \ + "DELETE from health_log_detail WHERE unique_id NOT IN " \ + "(SELECT filtered_alert_unique_id FROM aclk_alert_%s) " \ + "AND unique_id IN (SELECT hld.unique_id FROM health_log hl, health_log_detail hld WHERE " \ + "hl.host_id = @host_id AND hl.health_log_id = hld.health_log_id) " \ + "AND health_log_id IN (SELECT health_log_id FROM health_log WHERE host_id = @host_id) " \ + "AND when_key < unixepoch() - @history " \ + "AND updated_by_id <> 0 AND transition_id NOT IN " \ + "(SELECT last_transition_id FROM health_log hl WHERE hl.host_id = @host_id)", \ + guid + +void sql_health_alarm_log_cleanup(RRDHOST *host, bool claimed) { sqlite3_stmt *res = NULL; int rc; char command[MAX_HEALTH_SQL_SIZE + 1]; @@ -414,77 +457,18 @@ void sql_health_alarm_log_cleanup_not_claimed(RRDHOST *host) { char uuid_str[UUID_STR_LEN]; uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + snprintfz(command, sizeof(command) - 1, "aclk_alert_%s", uuid_str); - rc = sqlite3_prepare_v2(db_meta, SQL_CLEANUP_HEALTH_LOG_DETAIL_NOT_CLAIMED, -1, &res, 0); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement to cleanup health log detail table (un-claimed)"); - return; - } - - rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id for SQL_CLEANUP_HEALTH_LOG_NOT_CLAIMED."); - sqlite3_finalize(res); - return; - } - - rc = sqlite3_bind_int64(res, 2, (sqlite3_int64)host->health_log.health_log_history); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind health log history for SQL_CLEANUP_HEALTH_LOG_NOT_CLAIMED."); - sqlite3_finalize(res); - return; - } - - rc = sqlite3_bind_blob(res, 3, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id for SQL_CLEANUP_HEALTH_LOG_NOT_CLAIMED."); - sqlite3_finalize(res); - return; - } - - rc = sqlite3_step_monitored(res); - if (unlikely(rc != SQLITE_DONE)) - error_report("Failed to cleanup health log detail table, rc = %d", rc); - - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize the prepared statement to cleanup health log detail table (un-claimed)"); + bool aclk_table_exists = table_exists_in_database(db_meta, command); - sql_health_alarm_log_count(host); + char *sql = SQL_CLEANUP_HEALTH_LOG_DETAIL_NOT_CLAIMED; - snprintfz(command, MAX_HEALTH_SQL_SIZE, "aclk_alert_%s", uuid_str); - if (unlikely(table_exists_in_database(command))) { - sql_aclk_alert_clean_dead_entries(host); + if (claimed && aclk_table_exists) { + snprintfz(command, sizeof(command) - 1, SQL_CLEANUP_HEALTH_LOG_DETAIL_CLAIMED(uuid_str)); + sql = command; } -} - -/* Health related SQL queries - Cleans up the health_log_detail table on a claimed host -*/ -#define SQL_CLEANUP_HEALTH_LOG_DETAIL_CLAIMED(guid) "DELETE from health_log_detail WHERE unique_id NOT IN (SELECT filtered_alert_unique_id FROM aclk_alert_%s) AND unique_id IN (SELECT hld.unique_id FROM health_log hl, health_log_detail hld WHERE hl.host_id = ?1 AND hl.health_log_id = hld.health_log_id) AND health_log_id IN (SELECT health_log_id FROM health_log WHERE host_id = ?2) AND when_key + ?3 < unixepoch() AND updated_by_id <> 0 AND transition_id NOT IN (SELECT last_transition_id FROM health_log hl WHERE hl.host_id = ?4);", guid -void sql_health_alarm_log_cleanup_claimed(RRDHOST *host) { - sqlite3_stmt *res = NULL; - int rc; - char command[MAX_HEALTH_SQL_SIZE + 1]; - if (unlikely(!db_meta)) { - if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) - error_report("Database has not been initialized"); - return; - } - - char uuid_str[UUID_STR_LEN]; - uuid_unparse_lower_fix(&host->host_uuid, uuid_str); - snprintfz(command, MAX_HEALTH_SQL_SIZE, "aclk_alert_%s", uuid_str); - - if (!table_exists_in_database(command)) { - sql_health_alarm_log_cleanup_not_claimed(host); - return; - } - - snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CLEANUP_HEALTH_LOG_DETAIL_CLAIMED(uuid_str)); - - rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); + rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to prepare statement to cleanup health log detail table (claimed)"); return; @@ -492,60 +476,53 @@ void sql_health_alarm_log_cleanup_claimed(RRDHOST *host) { rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind first host_id for SQL_CLEANUP_HEALTH_LOG_CLAIMED."); - sqlite3_finalize(res); - return; - } - - rc = sqlite3_bind_blob(res, 2, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind second host_id for SQL_CLEANUP_HEALTH_LOG_CLAIMED."); - sqlite3_finalize(res); - return; - } - - rc = sqlite3_bind_int64(res, 3, (sqlite3_int64)host->health_log.health_log_history); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind health log history for SQL_CLEANUP_HEALTH_LOG_CLAIMED."); - sqlite3_finalize(res); - return; + error_report("Failed to bind first host_id for sql_health_alarm_log_cleanup."); + goto done; } - rc = sqlite3_bind_blob(res, 4, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); + rc = sqlite3_bind_int64(res, 2, (sqlite3_int64)host->health_log.health_log_history); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind second host_id for SQL_CLEANUP_HEALTH_LOG_CLAIMED."); - sqlite3_finalize(res); - return; + error_report("Failed to bind health log history for sql_health_alarm_log_cleanup."); + goto done; } rc = sqlite3_step_monitored(res); if (unlikely(rc != SQLITE_DONE)) error_report("Failed to cleanup health log detail table, rc = %d", rc); + int rows = sql_health_alarm_log_count(host); + if (rows >= 0) + host->health.health_log_entries_written = rows; + + if (aclk_table_exists) + sql_aclk_alert_clean_dead_entries(host); + +done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize the prepared statement to cleanup health log detail table (claimed)"); - - sql_health_alarm_log_count(host); - - sql_aclk_alert_clean_dead_entries(host); - } -/* Health related SQL queries - Cleans up the health_log table. -*/ -void sql_health_alarm_log_cleanup(RRDHOST *host) { - if (!claimed()) { - sql_health_alarm_log_cleanup_not_claimed(host); - } else - sql_health_alarm_log_cleanup_claimed(host); -} - -#define SQL_INJECT_REMOVED "insert into health_log_detail (health_log_id, unique_id, alarm_id, alarm_event_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, transition_id, global_id) select health_log_id, ?1, ?2, ?3, 0, ?4, unixepoch(), 0, 0, flags, exec_run_timestamp, unixepoch(), info, exec_code, -2, new_status, delay, NULL, new_value, 0, ?5, now_usec(0) from health_log_detail where unique_id = ?6 and transition_id = ?7;" -#define SQL_INJECT_REMOVED_UPDATE_DETAIL "update health_log_detail set flags = flags | ?1, updated_by_id = ?2 where unique_id = ?3 and transition_id = ?4;" -#define SQL_INJECT_REMOVED_UPDATE_LOG "update health_log set last_transition_id = ?1 where alarm_id = ?2 and last_transition_id = ?3 and host_id = ?4;" -void sql_inject_removed_status(RRDHOST *host, uint32_t alarm_id, uint32_t alarm_event_id, uint32_t unique_id, uint32_t max_unique_id, uuid_t *prev_transition_id) +#define SQL_INJECT_REMOVED \ + "INSERT INTO health_log_detail (health_log_id, unique_id, alarm_id, alarm_event_id, updated_by_id, updates_id, when_key, " \ + "duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, info, exec_code, new_status, old_status, " \ + "delay, new_value, old_value, last_repeat, transition_id, global_id, summary) " \ + "SELECT health_log_id, ?1, ?2, ?3, 0, ?4, UNIXEPOCH(), 0, 0, flags, exec_run_timestamp, UNIXEPOCH(), info, exec_code, -2, " \ + "new_status, delay, NULL, new_value, 0, ?5, NOW_USEC(0), summary FROM health_log_detail WHERE unique_id = ?6 AND transition_id = ?7" + +#define SQL_INJECT_REMOVED_UPDATE_DETAIL \ + "UPDATE health_log_detail SET flags = flags | ?1, updated_by_id = ?2 WHERE unique_id = ?3 AND transition_id = ?4" + +#define SQL_INJECT_REMOVED_UPDATE_LOG \ + "UPDATE health_log SET last_transition_id = ?1 WHERE alarm_id = ?2 AND last_transition_id = ?3 AND host_id = ?4" + +void sql_inject_removed_status( + RRDHOST *host, + uint32_t alarm_id, + uint32_t alarm_event_id, + uint32_t unique_id, + uint32_t max_unique_id, + uuid_t *prev_transition_id) { int rc; @@ -682,10 +659,8 @@ void sql_inject_removed_status(RRDHOST *host, uint32_t alarm_id, uint32_t alarm_ } rc = execute_insert(res); - if (unlikely(rc != SQLITE_DONE)) { + if (unlikely(rc != SQLITE_DONE)) error_report("HEALTH [N/A]: Failed to execute SQL_INJECT_REMOVED_UPDATE_DETAIL, rc = %d", rc); - goto failed; - } failed: if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) @@ -727,7 +702,10 @@ uint32_t sql_get_max_unique_id (RRDHOST *host) return max_unique_id; } -#define SQL_SELECT_LAST_STATUSES "SELECT hld.new_status, hld.unique_id, hld.alarm_id, hld.alarm_event_id, hld.transition_id from health_log hl, health_log_detail hld where hl.host_id = @host_id and hl.last_transition_id = hld.transition_id;" +#define SQL_SELECT_LAST_STATUSES \ + "SELECT hld.new_status, hld.unique_id, hld.alarm_id, hld.alarm_event_id, hld.transition_id FROM health_log hl, " \ + "health_log_detail hld WHERE hl.host_id = @host_id AND hl.last_transition_id = hld.transition_id" + void sql_check_removed_alerts_state(RRDHOST *host) { int rc; @@ -752,34 +730,39 @@ void sql_check_removed_alerts_state(RRDHOST *host) uint32_t alarm_id, alarm_event_id, unique_id; RRDCALC_STATUS status; - status = (RRDCALC_STATUS) sqlite3_column_int(res, 0); - unique_id = (uint32_t) sqlite3_column_int64(res, 1); - alarm_id = (uint32_t) sqlite3_column_int64(res, 2); - alarm_event_id = (uint32_t) sqlite3_column_int64(res, 3); - uuid_copy(transition_id, *((uuid_t *) sqlite3_column_blob(res, 4))); + status = (RRDCALC_STATUS)sqlite3_column_int(res, 0); + unique_id = (uint32_t)sqlite3_column_int64(res, 1); + alarm_id = (uint32_t)sqlite3_column_int64(res, 2); + alarm_event_id = (uint32_t)sqlite3_column_int64(res, 3); + uuid_copy(transition_id, *((uuid_t *)sqlite3_column_blob(res, 4))); + if (unlikely(status != RRDCALC_STATUS_REMOVED)) { - if (unlikely(!max_unique_id)) - max_unique_id = sql_get_max_unique_id (host); - sql_inject_removed_status (host, alarm_id, alarm_event_id, unique_id, ++max_unique_id, &transition_id); + if (unlikely(!max_unique_id)) + max_unique_id = sql_get_max_unique_id(host); + + sql_inject_removed_status(host, alarm_id, alarm_event_id, unique_id, ++max_unique_id, &transition_id); } } - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize the statement"); + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the statement"); } /* Health related SQL queries Load from the health log table */ -#define SQL_LOAD_HEALTH_LOG "SELECT hld.unique_id, hld.alarm_id, hld.alarm_event_id, hl.config_hash_id, hld.updated_by_id, " \ - "hld.updates_id, hld.when_key, hld.duration, hld.non_clear_duration, hld.flags, hld.exec_run_timestamp, " \ - "hld.delay_up_to_timestamp, hl.name, hl.chart, hl.family, hl.exec, hl.recipient, ah.source, hl.units, " \ - "hld.info, hld.exec_code, hld.new_status, hld.old_status, hld.delay, hld.new_value, hld.old_value, " \ - "hld.last_repeat, ah.class, ah.component, ah.type, hl.chart_context, hld.transition_id, hld.global_id, hl.chart_name " \ - "FROM health_log hl, alert_hash ah, health_log_detail hld " \ - "WHERE hl.config_hash_id = ah.hash_id and hl.host_id = @host_id and hl.last_transition_id = hld.transition_id;" -void sql_health_alarm_log_load(RRDHOST *host) { +#define SQL_LOAD_HEALTH_LOG \ + "SELECT hld.unique_id, hld.alarm_id, hld.alarm_event_id, hl.config_hash_id, hld.updated_by_id, " \ + "hld.updates_id, hld.when_key, hld.duration, hld.non_clear_duration, hld.flags, hld.exec_run_timestamp, " \ + "hld.delay_up_to_timestamp, hl.name, hl.chart, hl.exec, hl.recipient, ah.source, hl.units, " \ + "hld.info, hld.exec_code, hld.new_status, hld.old_status, hld.delay, hld.new_value, hld.old_value, " \ + "hld.last_repeat, ah.class, ah.component, ah.type, hl.chart_context, hld.transition_id, hld.global_id, " \ + "hl.chart_name, hld.summary FROM health_log hl, alert_hash ah, health_log_detail hld " \ + "WHERE hl.config_hash_id = ah.hash_id and hl.host_id = @host_id and hl.last_transition_id = hld.transition_id" + +void sql_health_alarm_log_load(RRDHOST *host) +{ sqlite3_stmt *res = NULL; int ret; ssize_t errored = 0, loaded = 0; @@ -835,7 +818,7 @@ void sql_health_alarm_log_load(RRDHOST *host) { continue; } - //need name, chart and family + //need name and chart if (sqlite3_column_type(res, 12) == SQLITE_NULL) { error_report("HEALTH [%s]: Got null name field. Ignoring it.", rrdhost_hostname(host)); errored++; @@ -848,14 +831,8 @@ void sql_health_alarm_log_load(RRDHOST *host) { continue; } - if (sqlite3_column_type(res, 14) == SQLITE_NULL) { - error_report("HEALTH [%s]: Got null family field. Ignoring it.", rrdhost_hostname(host)); - errored++; - continue; - } - // Check if we got last_repeat field - time_t last_repeat = (time_t)sqlite3_column_int64(res, 26); + time_t last_repeat = (time_t)sqlite3_column_int64(res, 25); rc = dictionary_get(all_rrdcalcs, (char *) sqlite3_column_text(res, 13)); if(unlikely(rc)) { @@ -892,73 +869,36 @@ void sql_health_alarm_log_load(RRDHOST *host) { ae->name = string_strdupz((char *) sqlite3_column_text(res, 12)); ae->chart = string_strdupz((char *) sqlite3_column_text(res, 13)); - ae->family = string_strdupz((char *) sqlite3_column_text(res, 14)); - if (sqlite3_column_type(res, 15) != SQLITE_NULL) - ae->exec = string_strdupz((char *) sqlite3_column_text(res, 15)); - else - ae->exec = NULL; - - if (sqlite3_column_type(res, 16) != SQLITE_NULL) - ae->recipient = string_strdupz((char *) sqlite3_column_text(res, 16)); - else - ae->recipient = NULL; - - if (sqlite3_column_type(res, 17) != SQLITE_NULL) - ae->source = string_strdupz((char *) sqlite3_column_text(res, 17)); - else - ae->source = NULL; + ae->exec = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 14); + ae->recipient = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 15); + ae->source = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 16); + ae->units = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 17); + ae->info = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 18); - if (sqlite3_column_type(res, 18) != SQLITE_NULL) - ae->units = string_strdupz((char *) sqlite3_column_text(res, 18)); - else - ae->units = NULL; - - if (sqlite3_column_type(res, 19) != SQLITE_NULL) - ae->info = string_strdupz((char *) sqlite3_column_text(res, 19)); - else - ae->info = NULL; + ae->exec_code = (int) sqlite3_column_int(res, 19); + ae->new_status = (RRDCALC_STATUS) sqlite3_column_int(res, 20); + ae->old_status = (RRDCALC_STATUS)sqlite3_column_int(res, 21); + ae->delay = (int) sqlite3_column_int(res, 22); - ae->exec_code = (int) sqlite3_column_int(res, 20); - ae->new_status = (RRDCALC_STATUS) sqlite3_column_int(res, 21); - ae->old_status = (RRDCALC_STATUS)sqlite3_column_int(res, 22); - ae->delay = (int) sqlite3_column_int(res, 23); - - ae->new_value = (NETDATA_DOUBLE) sqlite3_column_double(res, 24); - ae->old_value = (NETDATA_DOUBLE) sqlite3_column_double(res, 25); + ae->new_value = (NETDATA_DOUBLE) sqlite3_column_double(res, 23); + ae->old_value = (NETDATA_DOUBLE) sqlite3_column_double(res, 24); ae->last_repeat = last_repeat; - if (sqlite3_column_type(res, 27) != SQLITE_NULL) - ae->classification = string_strdupz((char *) sqlite3_column_text(res, 27)); - else - ae->classification = NULL; - - if (sqlite3_column_type(res, 28) != SQLITE_NULL) - ae->component = string_strdupz((char *) sqlite3_column_text(res, 28)); - else - ae->component = NULL; - - if (sqlite3_column_type(res, 29) != SQLITE_NULL) - ae->type = string_strdupz((char *) sqlite3_column_text(res, 29)); - else - ae->type = NULL; + ae->classification = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 26); + ae->component = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 27); + ae->type = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 28); + ae->chart_context = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 29); if (sqlite3_column_type(res, 30) != SQLITE_NULL) - ae->chart_context = string_strdupz((char *) sqlite3_column_text(res, 30)); - else - ae->chart_context = NULL; + uuid_copy(ae->transition_id, *((uuid_t *)sqlite3_column_blob(res, 30))); if (sqlite3_column_type(res, 31) != SQLITE_NULL) - uuid_copy(ae->transition_id, *((uuid_t *)sqlite3_column_blob(res, 31))); + ae->global_id = sqlite3_column_int64(res, 31); - if (sqlite3_column_type(res, 32) != SQLITE_NULL) - ae->global_id = sqlite3_column_int64(res, 32); - - if (sqlite3_column_type(res, 33) != SQLITE_NULL) - ae->chart_name = string_strdupz((char *) sqlite3_column_text(res, 33)); - else - ae->chart_name = NULL; + ae->chart_name = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 32); + ae->summary = SQLITE3_COLUMN_STRINGDUP_OR_NULL(res, 33); char value_string[100 + 1]; string_freez(ae->old_value_string); @@ -990,24 +930,33 @@ void sql_health_alarm_log_load(RRDHOST *host) { if (unlikely(!host->health_log.next_alarm_id || host->health_log.next_alarm_id <= host->health_max_alarm_id)) host->health_log.next_alarm_id = host->health_max_alarm_id + 1; - netdata_log_health("[%s]: Table health_log, loaded %zd alarm entries, errors in %zd entries.", rrdhost_hostname(host), loaded, errored); + nd_log(NDLS_DAEMON, errored ? NDLP_WARNING : NDLP_DEBUG, + "[%s]: Table health_log, loaded %zd alarm entries, errors in %zd entries.", + rrdhost_hostname(host), loaded, errored); ret = sqlite3_finalize(res); if (unlikely(ret != SQLITE_OK)) error_report("Failed to finalize the health log read statement"); - sql_health_alarm_log_count(host); + int rows = sql_health_alarm_log_count(host); + + if (rows >= 0) + host->health.health_log_entries_written = rows; } /* * Store an alert config hash in the database */ -#define SQL_STORE_ALERT_CONFIG_HASH "insert or replace into alert_hash (hash_id, date_updated, alarm, template, " \ - "on_key, class, component, type, os, hosts, lookup, every, units, calc, families, plugin, module, " \ - "charts, green, red, warn, crit, exec, to_key, info, delay, options, repeat, host_labels, " \ - "p_db_lookup_dimensions, p_db_lookup_method, p_db_lookup_options, p_db_lookup_after, " \ - "p_db_lookup_before, p_update_every, source, chart_labels) values (?1,unixepoch(),?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12," \ - "?13,?14,?15,?16,?17,?18,?19,?20,?21,?22,?23,?24,?25,?26,?27,?28,?29,?30,?31,?32,?33,?34,?35,?36);" +#define SQL_STORE_ALERT_CONFIG_HASH \ + "insert or replace into alert_hash (hash_id, date_updated, alarm, template, " \ + "on_key, class, component, type, os, hosts, lookup, every, units, calc, plugin, module, " \ + "charts, green, red, warn, crit, exec, to_key, info, delay, options, repeat, host_labels, " \ + "p_db_lookup_dimensions, p_db_lookup_method, p_db_lookup_options, p_db_lookup_after, " \ + "p_db_lookup_before, p_update_every, source, chart_labels, summary) values (@hash_id,UNIXEPOCH(),@alarm,@template," \ + "@on_key,@class,@component,@type,@os,@hosts,@lookup,@every,@units,@calc,@plugin,@module," \ + "@charts,@green,@red,@warn,@crit,@exec,@to_key,@info,@delay,@options,@repeat,@host_labels," \ + "@p_db_lookup_dimensions,@p_db_lookup_method,@p_db_lookup_options,@p_db_lookup_after," \ + "@p_db_lookup_before,@p_update_every,@source,@chart_labels,@summary)" int sql_store_alert_config_hash(uuid_t *hash_id, struct alert_config *cfg) { @@ -1033,120 +982,116 @@ int sql_store_alert_config_hash(uuid_t *hash_id, struct alert_config *cfg) if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->alarm, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->alarm, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->template_key, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->template_key, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->on, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->on, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->classification, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->classification, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->component, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->component, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->type, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->type, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->os, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->os, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->host, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->host, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->lookup, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->lookup, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->every, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->every, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->units, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->units, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->calc, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->calc, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->families, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->plugin, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->plugin, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->module, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->module, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->charts, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->charts, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->green, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->green, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->red, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->red, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->warn, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->warn, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->crit, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->crit, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->exec, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->exec, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->to, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->to, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->info, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->info, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->delay, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->delay, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->options, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->options, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->repeat, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->repeat, ++param); - if (unlikely(rc != SQLITE_OK)) - goto bind_fail; - - rc = sqlite3_bind_string_or_null(res, cfg->host_labels, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->host_labels, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; if (cfg->p_db_lookup_after) { - rc = sqlite3_bind_string_or_null(res, cfg->p_db_lookup_dimensions, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->p_db_lookup_dimensions, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->p_db_lookup_method, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->p_db_lookup_method, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; @@ -1187,11 +1132,15 @@ int sql_store_alert_config_hash(uuid_t *hash_id, struct alert_config *cfg) if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->source, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->source, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; - rc = sqlite3_bind_string_or_null(res, cfg->chart_labels, ++param); + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->chart_labels, ++param); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = SQLITE3_BIND_STRING_OR_NULL(res, cfg->summary, ++param); if (unlikely(rc != SQLITE_OK)) goto bind_fail; @@ -1238,7 +1187,6 @@ int alert_hash_and_store_config( DIGEST_ALERT_CONFIG_VAL(cfg->os); DIGEST_ALERT_CONFIG_VAL(cfg->host); DIGEST_ALERT_CONFIG_VAL(cfg->on); - DIGEST_ALERT_CONFIG_VAL(cfg->families); DIGEST_ALERT_CONFIG_VAL(cfg->plugin); DIGEST_ALERT_CONFIG_VAL(cfg->module); DIGEST_ALERT_CONFIG_VAL(cfg->charts); @@ -1261,6 +1209,7 @@ int alert_hash_and_store_config( DIGEST_ALERT_CONFIG_VAL(cfg->repeat); DIGEST_ALERT_CONFIG_VAL(cfg->host_labels); DIGEST_ALERT_CONFIG_VAL(cfg->chart_labels); + DIGEST_ALERT_CONFIG_VAL(cfg->summary); EVP_DigestFinal_ex(evpctx, hash_value, &hash_len); EVP_MD_CTX_destroy(evpctx); @@ -1282,16 +1231,17 @@ int alert_hash_and_store_config( return 1; } -#define SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT "SELECT hld.new_status FROM health_log hl, health_log_detail hld WHERE hl.alarm_id = %u AND hld.unique_id != %u AND hld.flags & %u AND hl.host_id = @host_id and hl.health_log_id = hld.health_log_id ORDER BY hld.unique_id DESC LIMIT 1;" +#define SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT \ + "SELECT hld.new_status FROM health_log hl, health_log_detail hld " \ + "WHERE hl.host_id = @host_id AND hl.alarm_id = @alarm_id AND hld.unique_id != @unique_id AND hld.flags & @flags " \ + "AND hl.health_log_id = hld.health_log_id ORDER BY hld.unique_id DESC LIMIT 1" + int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_STATUS *last_executed_status) { int rc = 0, ret = -1; - char command[MAX_HEALTH_SQL_SIZE + 1]; sqlite3_stmt *res = NULL; - snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT, ae->alarm_id, ae->unique_id, (uint32_t) HEALTH_ENTRY_FLAG_EXEC_RUN); - - rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); + rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT, -1, &res, 0); if (rc != SQLITE_OK) { error_report("Failed to prepare statement when trying to get last executed status"); return ret; @@ -1300,8 +1250,25 @@ int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_S rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id parameter for SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT."); - sqlite3_finalize(res); - return ret; + goto done; + } + + rc = sqlite3_bind_int(res, 2, (int) ae->alarm_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind alarm_id parameter for SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT."); + goto done; + } + + rc = sqlite3_bind_int(res, 3, (int) ae->unique_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind unique_id parameter for SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT."); + goto done; + } + + rc = sqlite3_bind_int(res, 4, (uint32_t) HEALTH_ENTRY_FLAG_EXEC_RUN); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind unique_id parameter for SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT."); + goto done; } ret = 0; @@ -1310,6 +1277,7 @@ int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_S ret = 1; } +done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize the statement."); @@ -1317,191 +1285,169 @@ int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_S return ret; } -#define SQL_SELECT_HEALTH_LOG "SELECT hld.unique_id, hld.alarm_id, hld.alarm_event_id, hl.config_hash_id, hld.updated_by_id, hld.updates_id, hld.when_key, hld.duration, hld.non_clear_duration, hld.flags, hld.exec_run_timestamp, hld.delay_up_to_timestamp, hl.name, hl.chart, hl.family, hl.exec, hl.recipient, ah.source, hl.units, hld.info, hld.exec_code, hld.new_status, hld.old_status, hld.delay, hld.new_value, hld.old_value, hld.last_repeat, ah.class, ah.component, ah.type, hl.chart_context, hld.transition_id FROM health_log hl, alert_hash ah, health_log_detail hld WHERE hl.config_hash_id = ah.hash_id and hl.health_log_id = hld.health_log_id and hl.host_id = @host_id " -void sql_health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after, char *chart) { +#define SQL_SELECT_HEALTH_LOG \ + "SELECT hld.unique_id, hld.alarm_id, hld.alarm_event_id, hl.config_hash_id, hld.updated_by_id, hld.updates_id, " \ + "hld.when_key, hld.duration, hld.non_clear_duration, hld.flags, hld.exec_run_timestamp, " \ + "hld.delay_up_to_timestamp, hl.name, hl.chart, hl.exec, hl.recipient, ah.source, " \ + "hl.units, hld.info, hld.exec_code, hld.new_status, hld.old_status, hld.delay, hld.new_value, hld.old_value, " \ + "hld.last_repeat, ah.class, ah.component, ah.type, hl.chart_context, hld.transition_id, hld.summary " \ + "FROM health_log hl, alert_hash ah, health_log_detail hld WHERE hl.config_hash_id = ah.hash_id and " \ + "hl.health_log_id = hld.health_log_id and hl.host_id = @host_id AND hld.unique_id > @after " - buffer_strcat(wb, "["); +void sql_health_alarm_log2json(RRDHOST *host, BUFFER *wb, time_t after, const char *chart) +{ + unsigned int max = host->health_log.max; - unsigned int max = host->health_log.max; - unsigned int count = 0; + static __thread sqlite3_stmt *stmt_no_chart = NULL; + static __thread sqlite3_stmt *stmt_with_chart = NULL; - sqlite3_stmt *res = NULL; - int rc; + sqlite3_stmt **active_stmt; + sqlite3_stmt *stmt_query; - BUFFER *command = buffer_create(MAX_HEALTH_SQL_SIZE, NULL); - buffer_sprintf(command, SQL_SELECT_HEALTH_LOG); + int rc; - if (chart) { - char chart_sql[MAX_HEALTH_SQL_SIZE + 1]; - snprintfz(chart_sql, MAX_HEALTH_SQL_SIZE, "AND hl.chart = '%s' ", chart); - buffer_strcat(command, chart_sql); - } + active_stmt = chart ? &stmt_with_chart : &stmt_no_chart; - if (after) { - char after_sql[MAX_HEALTH_SQL_SIZE + 1]; - snprintfz(after_sql, MAX_HEALTH_SQL_SIZE, "AND hld.unique_id > %u ", after); - buffer_strcat(command, after_sql); - } + if (!*active_stmt) { - { - char limit_sql[MAX_HEALTH_SQL_SIZE + 1]; - snprintfz(limit_sql, MAX_HEALTH_SQL_SIZE, "ORDER BY hld.unique_id DESC LIMIT %u ", max); - buffer_strcat(command, limit_sql); - } + BUFFER *command = buffer_create(MAX_HEALTH_SQL_SIZE, NULL); + buffer_sprintf(command, SQL_SELECT_HEALTH_LOG); - rc = sqlite3_prepare_v2(db_meta, buffer_tostring(command), -1, &res, 0); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement SQL_SELECT_HEALTH_LOG"); - buffer_free(command); - return; - } + if (chart) + buffer_strcat(command, " AND hl.chart = @chart "); - rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id for SQL_SELECT_HEALTH_LOG."); - sqlite3_finalize(res); - buffer_free(command); - return; - } + buffer_strcat(command, " ORDER BY hld.unique_id DESC LIMIT @limit"); - while (sqlite3_step(res) == SQLITE_ROW) { + rc = prepare_statement(db_meta, buffer_tostring(command), active_stmt); + buffer_free(command); - char old_value_string[100 + 1]; - char new_value_string[100 + 1]; + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement SQL_SELECT_HEALTH_LOG"); + return; + } + } - char config_hash_id[UUID_STR_LEN]; - uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, 3)), config_hash_id); + stmt_query = *active_stmt; - char transition_id[UUID_STR_LEN] = {0}; - if (sqlite3_column_type(res, 31) != SQLITE_NULL) - uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, 31)), transition_id); - - char *edit_command = sqlite3_column_bytes(res, 17) > 0 ? health_edit_command_from_source((char *)sqlite3_column_text(res, 17)) : strdupz("UNKNOWN=0=UNKNOWN"); - - if (count) - buffer_sprintf(wb, ","); - - count++; - - buffer_sprintf( - wb, - "\n\t{\n" - "\t\t\"hostname\": \"%s\",\n" - "\t\t\"utc_offset\": %d,\n" - "\t\t\"timezone\": \"%s\",\n" - "\t\t\"unique_id\": %u,\n" - "\t\t\"alarm_id\": %u,\n" - "\t\t\"alarm_event_id\": %u,\n" - "\t\t\"config_hash_id\": \"%s\",\n" - "\t\t\"transition_id\": \"%s\",\n" - "\t\t\"name\": \"%s\",\n" - "\t\t\"chart\": \"%s\",\n" - "\t\t\"context\": \"%s\",\n" - "\t\t\"family\": \"%s\",\n" - "\t\t\"class\": \"%s\",\n" - "\t\t\"component\": \"%s\",\n" - "\t\t\"type\": \"%s\",\n" - "\t\t\"processed\": %s,\n" - "\t\t\"updated\": %s,\n" - "\t\t\"exec_run\": %lu,\n" - "\t\t\"exec_failed\": %s,\n" - "\t\t\"exec\": \"%s\",\n" - "\t\t\"recipient\": \"%s\",\n" - "\t\t\"exec_code\": %d,\n" - "\t\t\"source\": \"%s\",\n" - "\t\t\"command\": \"%s\",\n" - "\t\t\"units\": \"%s\",\n" - "\t\t\"when\": %lu,\n" - "\t\t\"duration\": %lu,\n" - "\t\t\"non_clear_duration\": %lu,\n" - "\t\t\"status\": \"%s\",\n" - "\t\t\"old_status\": \"%s\",\n" - "\t\t\"delay\": %d,\n" - "\t\t\"delay_up_to_timestamp\": %lu,\n" - "\t\t\"updated_by_id\": %u,\n" - "\t\t\"updates_id\": %u,\n" - "\t\t\"value_string\": \"%s\",\n" - "\t\t\"old_value_string\": \"%s\",\n" - "\t\t\"last_repeat\": \"%lu\",\n" - "\t\t\"silenced\": \"%s\",\n", - rrdhost_hostname(host), - host->utc_offset, - rrdhost_abbrev_timezone(host), - (unsigned int) sqlite3_column_int64(res, 0), - (unsigned int) sqlite3_column_int64(res, 1), - (unsigned int) sqlite3_column_int64(res, 2), - config_hash_id, - transition_id, - sqlite3_column_text(res, 12), - sqlite3_column_text(res, 13), - sqlite3_column_text(res, 30), - sqlite3_column_text(res, 14), - sqlite3_column_text(res, 27) ? (const char *) sqlite3_column_text(res, 27) : (char *) "Unknown", - sqlite3_column_text(res, 28) ? (const char *) sqlite3_column_text(res, 28) : (char *) "Unknown", - sqlite3_column_text(res, 29) ? (const char *) sqlite3_column_text(res, 29) : (char *) "Unknown", - (sqlite3_column_int64(res, 9) & HEALTH_ENTRY_FLAG_PROCESSED)?"true":"false", - (sqlite3_column_int64(res, 9) & HEALTH_ENTRY_FLAG_UPDATED)?"true":"false", - (long unsigned int)sqlite3_column_int64(res, 10), - (sqlite3_column_int64(res, 9) & HEALTH_ENTRY_FLAG_EXEC_FAILED)?"true":"false", - sqlite3_column_text(res, 15) ? (const char *) sqlite3_column_text(res, 15) : string2str(host->health.health_default_exec), - sqlite3_column_text(res, 16) ? (const char *) sqlite3_column_text(res, 16) : string2str(host->health.health_default_recipient), - sqlite3_column_int(res, 20), - sqlite3_column_text(res, 17) ? (const char *) sqlite3_column_text(res, 17) : (char *) "Unknown", - edit_command, - sqlite3_column_text(res, 18), - (long unsigned int)sqlite3_column_int64(res, 6), - (long unsigned int)sqlite3_column_int64(res, 7), - (long unsigned int)sqlite3_column_int64(res, 8), - rrdcalc_status2string(sqlite3_column_int(res, 21)), - rrdcalc_status2string(sqlite3_column_int(res, 22)), - sqlite3_column_int(res, 23), - (long unsigned int)sqlite3_column_int64(res, 11), - (unsigned int)sqlite3_column_int64(res, 4), - (unsigned int)sqlite3_column_int64(res, 5), - sqlite3_column_type(res, 24) == SQLITE_NULL ? "-" : format_value_and_unit(new_value_string, 100, sqlite3_column_double(res, 24), (char *) sqlite3_column_text(res, 18), -1), - sqlite3_column_type(res, 25) == SQLITE_NULL ? "-" : format_value_and_unit(old_value_string, 100, sqlite3_column_double(res, 25), (char *) sqlite3_column_text(res, 18), -1), - (long unsigned int)sqlite3_column_int64(res, 26), - (sqlite3_column_int64(res, 9) & HEALTH_ENTRY_FLAG_SILENCED)?"true":"false"); - - health_string2json(wb, "\t\t", "info", (char *) sqlite3_column_text(res, 19), ",\n"); - - if(unlikely(sqlite3_column_int64(res, 9) & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION)) { - buffer_strcat(wb, "\t\t\"no_clear_notification\": true,\n"); - } + int param = 0; + rc = sqlite3_bind_blob(stmt_query, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host_id for SQL_SELECT_HEALTH_LOG."); + goto finish; + } - buffer_strcat(wb, "\t\t\"value\":"); - if (sqlite3_column_type(res, 24) == SQLITE_NULL) - buffer_strcat(wb, "null"); - else - buffer_print_netdata_double(wb, sqlite3_column_double(res, 24)); - buffer_strcat(wb, ",\n"); + rc = sqlite3_bind_int64(stmt_query, ++param, after); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind after for SQL_SELECT_HEALTH_LOG."); + goto finish; + } - buffer_strcat(wb, "\t\t\"old_value\":"); - if (sqlite3_column_type(res, 25) == SQLITE_NULL) - buffer_strcat(wb, "null"); + if (chart) { + rc = sqlite3_bind_text(stmt_query, ++param, chart, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind after for SQL_SELECT_HEALTH_LOG."); + goto finish; + } + } + + rc = sqlite3_bind_int64(stmt_query, ++param, max); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind max lines for SQL_SELECT_HEALTH_LOG."); + goto finish; + } + + buffer_json_initialize(wb, "\"", "\"", 0, false, BUFFER_JSON_OPTIONS_DEFAULT); + buffer_json_member_add_array(wb, NULL); + + while (sqlite3_step(stmt_query) == SQLITE_ROW) { + char old_value_string[100 + 1]; + char new_value_string[100 + 1]; + + char config_hash_id[UUID_STR_LEN]; + uuid_unparse_lower(*((uuid_t *)sqlite3_column_blob(stmt_query, 3)), config_hash_id); + + char transition_id[UUID_STR_LEN] = {0}; + if (sqlite3_column_type(stmt_query, 30) != SQLITE_NULL) + uuid_unparse_lower(*((uuid_t *)sqlite3_column_blob(stmt_query, 30)), transition_id); + + char *edit_command = sqlite3_column_bytes(stmt_query, 16) > 0 ? + health_edit_command_from_source((char *)sqlite3_column_text(stmt_query, 16)) : + strdupz("UNKNOWN=0=UNKNOWN"); + + buffer_json_add_array_item_object(wb); // this node + + buffer_json_member_add_string_or_empty(wb, "hostname", rrdhost_hostname(host)); + buffer_json_member_add_int64(wb, "utc_offset", (int64_t)host->utc_offset); + buffer_json_member_add_string_or_empty(wb, "timezone", rrdhost_abbrev_timezone(host)); + buffer_json_member_add_int64(wb, "unique_id", (int64_t) sqlite3_column_int64(stmt_query, 0)); + buffer_json_member_add_int64(wb, "alarm_id", (int64_t) sqlite3_column_int64(stmt_query, 1)); + buffer_json_member_add_int64(wb, "alarm_event_id", (int64_t) sqlite3_column_int64(stmt_query, 2)); + buffer_json_member_add_string_or_empty(wb, "config_hash_id", config_hash_id); + buffer_json_member_add_string_or_empty(wb, "transition_id", transition_id); + buffer_json_member_add_string_or_empty(wb, "name", (const char *) sqlite3_column_text(stmt_query, 12)); + buffer_json_member_add_string_or_empty(wb, "chart", (const char *) sqlite3_column_text(stmt_query, 13)); + buffer_json_member_add_string_or_empty(wb, "context", (const char *) sqlite3_column_text(stmt_query, 29)); + buffer_json_member_add_string_or_empty(wb, "class", sqlite3_column_text(stmt_query, 26) ? (const char *) sqlite3_column_text(stmt_query, 26) : (char *) "Unknown"); + buffer_json_member_add_string_or_empty(wb, "component", sqlite3_column_text(stmt_query, 27) ? (const char *) sqlite3_column_text(stmt_query, 27) : (char *) "Unknown"); + buffer_json_member_add_string_or_empty(wb, "type", sqlite3_column_text(stmt_query, 28) ? (const char *) sqlite3_column_text(stmt_query, 28) : (char *) "Unknown"); + buffer_json_member_add_boolean(wb, "processed", (sqlite3_column_int64(stmt_query, 9) & HEALTH_ENTRY_FLAG_PROCESSED)); + buffer_json_member_add_boolean(wb, "updated", (sqlite3_column_int64(stmt_query, 9) & HEALTH_ENTRY_FLAG_UPDATED)); + buffer_json_member_add_int64(wb, "exec_run", (int64_t)sqlite3_column_int64(stmt_query, 10)); + buffer_json_member_add_boolean(wb, "exec_failed", (sqlite3_column_int64(stmt_query, 9) & HEALTH_ENTRY_FLAG_EXEC_FAILED)); + buffer_json_member_add_string_or_empty(wb, "exec", sqlite3_column_text(stmt_query, 14) ? (const char *) sqlite3_column_text(stmt_query, 14) : string2str(host->health.health_default_exec)); + buffer_json_member_add_string_or_empty(wb, "recipient", sqlite3_column_text(stmt_query, 15) ? (const char *) sqlite3_column_text(stmt_query, 15) : string2str(host->health.health_default_recipient)); + buffer_json_member_add_int64(wb, "exec_code", sqlite3_column_int(stmt_query, 19)); + buffer_json_member_add_string_or_empty(wb, "source", sqlite3_column_text(stmt_query, 16) ? (const char *) sqlite3_column_text(stmt_query, 16) : (char *) "Unknown"); + buffer_json_member_add_string_or_empty(wb, "command", edit_command); + buffer_json_member_add_string_or_empty(wb, "units", (const char *) sqlite3_column_text(stmt_query, 17)); + buffer_json_member_add_int64(wb, "when", (int64_t)sqlite3_column_int64(stmt_query, 6)); + buffer_json_member_add_int64(wb, "duration", (int64_t)sqlite3_column_int64(stmt_query, 7)); + buffer_json_member_add_int64(wb, "non_clear_duration", (int64_t)sqlite3_column_int64(stmt_query, 8)); + buffer_json_member_add_string_or_empty(wb, "status", rrdcalc_status2string(sqlite3_column_int(stmt_query, 20))); + buffer_json_member_add_string_or_empty(wb, "old_status", rrdcalc_status2string(sqlite3_column_int(stmt_query, 21))); + buffer_json_member_add_int64(wb, "delay", sqlite3_column_int(stmt_query, 22)); + buffer_json_member_add_int64(wb, "delay_up_to_timestamp",(int64_t)sqlite3_column_int64(stmt_query, 11)); + buffer_json_member_add_int64(wb, "updated_by_id", (unsigned int)sqlite3_column_int64(stmt_query, 4)); + buffer_json_member_add_int64(wb, "updates_id", (unsigned int)sqlite3_column_int64(stmt_query, 5)); + buffer_json_member_add_string_or_empty(wb, "value_string", sqlite3_column_type(stmt_query, 23) == SQLITE_NULL ? "-" : + format_value_and_unit(new_value_string, 100, sqlite3_column_double(stmt_query, 23), (char *) sqlite3_column_text(stmt_query, 17), -1)); + buffer_json_member_add_string_or_empty(wb, "old_value_string", sqlite3_column_type(stmt_query, 24) == SQLITE_NULL ? "-" : + format_value_and_unit(old_value_string, 100, sqlite3_column_double(stmt_query, 24), (char *) sqlite3_column_text(stmt_query, 17), -1)); + buffer_json_member_add_int64(wb, "last_repeat", (int64_t)sqlite3_column_int64(stmt_query, 25)); + buffer_json_member_add_boolean(wb, "silenced", (sqlite3_column_int64(stmt_query, 9) & HEALTH_ENTRY_FLAG_SILENCED)); + buffer_json_member_add_string_or_empty(wb, "summary", (const char *) sqlite3_column_text(stmt_query, 31)); + buffer_json_member_add_string_or_empty(wb, "info", (const char *) sqlite3_column_text(stmt_query, 18)); + buffer_json_member_add_boolean(wb, "no_clear_notification",(sqlite3_column_int64(stmt_query, 9) & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION)); + + if (sqlite3_column_type(stmt_query, 23) == SQLITE_NULL) + buffer_json_member_add_string(wb, "value", NULL); else - buffer_print_netdata_double(wb, sqlite3_column_double(res, 25)); - buffer_strcat(wb, "\n"); + buffer_json_member_add_double(wb, "value", sqlite3_column_double(stmt_query, 23)); - buffer_strcat(wb, "\t}"); + if (sqlite3_column_type(stmt_query, 24) == SQLITE_NULL) + buffer_json_member_add_string(wb, "old_value", NULL); + else + buffer_json_member_add_double(wb, "old_value", sqlite3_column_double(stmt_query, 23)); freez(edit_command); - } - buffer_strcat(wb, "\n]"); + buffer_json_object_close(wb); + } - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize statement for SQL_SELECT_HEALTH_LOG"); + buffer_json_array_close(wb); + buffer_json_finalize(wb); - buffer_free(command); +finish: + rc = sqlite3_reset(stmt_query); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement for SQL_SELECT_HEALTH_LOG"); } -#define SQL_COPY_HEALTH_LOG(table) "INSERT OR IGNORE INTO health_log (host_id, alarm_id, config_hash_id, name, chart, family, exec, recipient, units, chart_context) SELECT ?1, alarm_id, config_hash_id, name, chart, family, exec, recipient, units, chart_context from %s;", table -#define SQL_COPY_HEALTH_LOG_DETAIL(table) "INSERT INTO health_log_detail (unique_id, alarm_id, alarm_event_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, transition_id, global_id, host_id) SELECT unique_id, alarm_id, alarm_event_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, transition_id, now_usec(1), ?1 from %s;", table -#define SQL_UPDATE_HEALTH_LOG_DETAIL_TRANSITION_ID "update health_log_detail set transition_id = uuid_random() where transition_id is null;" -#define SQL_UPDATE_HEALTH_LOG_DETAIL_HEALTH_LOG_ID "update health_log_detail set health_log_id = (select health_log_id from health_log where host_id = ?1 and alarm_id = health_log_detail.alarm_id) where health_log_id is null and host_id = ?2;" -#define SQL_UPDATE_HEALTH_LOG_LAST_TRANSITION_ID "update health_log set last_transition_id = (select transition_id from health_log_detail where health_log_id = health_log.health_log_id and alarm_id = health_log.alarm_id group by (alarm_id) having max(alarm_event_id)) where host_id = ?1;" +#define SQL_COPY_HEALTH_LOG(table) "INSERT OR IGNORE INTO health_log (host_id, alarm_id, config_hash_id, name, chart, family, exec, recipient, units, chart_context) SELECT ?1, alarm_id, config_hash_id, name, chart, family, exec, recipient, units, chart_context from %s", table +#define SQL_COPY_HEALTH_LOG_DETAIL(table) "INSERT INTO health_log_detail (unique_id, alarm_id, alarm_event_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, transition_id, global_id, host_id) SELECT unique_id, alarm_id, alarm_event_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, transition_id, now_usec(1), ?1 from %s", table +#define SQL_UPDATE_HEALTH_LOG_DETAIL_TRANSITION_ID "update health_log_detail set transition_id = uuid_random() where transition_id is null" +#define SQL_UPDATE_HEALTH_LOG_DETAIL_HEALTH_LOG_ID "update health_log_detail set health_log_id = (select health_log_id from health_log where host_id = ?1 and alarm_id = health_log_detail.alarm_id) where health_log_id is null and host_id = ?2" +#define SQL_UPDATE_HEALTH_LOG_LAST_TRANSITION_ID "update health_log set last_transition_id = (select transition_id from health_log_detail where health_log_id = health_log.health_log_id and alarm_id = health_log.alarm_id group by (alarm_id) having max(alarm_event_id)) where host_id = ?1" int health_migrate_old_health_log_table(char *table) { if (!table) return 0; @@ -1523,7 +1469,7 @@ int health_migrate_old_health_log_table(char *table) { int rc; char command[MAX_HEALTH_SQL_SIZE + 1]; sqlite3_stmt *res = NULL; - snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_COPY_HEALTH_LOG(table)); + snprintfz(command, sizeof(command) - 1, SQL_COPY_HEALTH_LOG(table)); rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to prepare statement to copy health log, rc = %d", rc); @@ -1550,7 +1496,7 @@ int health_migrate_old_health_log_table(char *table) { } //detail - snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_COPY_HEALTH_LOG_DETAIL(table)); + snprintfz(command, sizeof(command) - 1, SQL_COPY_HEALTH_LOG_DETAIL(table)); rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to prepare statement to copy health log detail, rc = %d", rc); @@ -1647,8 +1593,50 @@ int health_migrate_old_health_log_table(char *table) { return 1; } -#define SQL_GET_ALARM_ID "select alarm_id, health_log_id from health_log where host_id = @host_id and chart = @chart and name = @name and config_hash_id = @config_hash_id" -#define SQL_GET_EVENT_ID "select max(alarm_event_id) + 1 from health_log_detail where health_log_id = @health_log_id and alarm_id = @alarm_id" +#define SQL_GET_EVENT_ID \ + "SELECT MAX(alarm_event_id)+1 FROM health_log_detail WHERE health_log_id = @health_log_id AND alarm_id = @alarm_id" + +static uint32_t get_next_alarm_event_id(uint64_t health_log_id, uint32_t alarm_id) +{ + int rc; + sqlite3_stmt *res = NULL; + uint32_t next_event_id = 0; + + rc = sqlite3_prepare_v2(db_meta, SQL_GET_EVENT_ID, -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement when trying to get an event id"); + return alarm_id; + } + + rc = sqlite3_bind_int64(res, 1, (sqlite3_int64) health_log_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host_id parameter for SQL_GET_EVENT_ID."); + sqlite3_finalize(res); + return alarm_id; + } + + rc = sqlite3_bind_int64(res, 2, (sqlite3_int64) alarm_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind char parameter for SQL_GET_EVENT_ID."); + sqlite3_finalize(res); + return alarm_id; + } + + while (sqlite3_step_monitored(res) == SQLITE_ROW) { + next_event_id = (uint32_t) sqlite3_column_int64(res, 0); + } + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the statement while getting an alarm id."); + + return next_event_id; +} + +#define SQL_GET_ALARM_ID \ + "SELECT alarm_id, health_log_id FROM health_log WHERE host_id = @host_id AND chart = @chart " \ + "AND name = @name AND config_hash_id = @config_hash_id" + uint32_t sql_get_alarm_id(RRDHOST *host, STRING *chart, STRING *name, uint32_t *next_event_id, uuid_t *config_hash_id) { int rc = 0; @@ -1669,14 +1657,14 @@ uint32_t sql_get_alarm_id(RRDHOST *host, STRING *chart, STRING *name, uint32_t * return alarm_id; } - rc = sqlite3_bind_string_or_null(res, chart, 2); + rc = SQLITE3_BIND_STRING_OR_NULL(res, chart, 2); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind char parameter for SQL_GET_ALARM_ID."); sqlite3_finalize(res); return alarm_id; } - rc = sqlite3_bind_string_or_null(res, name, 3); + rc = SQLITE3_BIND_STRING_OR_NULL(res, name, 3); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind name parameter for SQL_GET_ALARM_ID."); sqlite3_finalize(res); @@ -1699,40 +1687,16 @@ uint32_t sql_get_alarm_id(RRDHOST *host, STRING *chart, STRING *name, uint32_t * if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize the statement while getting an alarm id."); - if (alarm_id) { - rc = sqlite3_prepare_v2(db_meta, SQL_GET_EVENT_ID, -1, &res, 0); - if (rc != SQLITE_OK) { - error_report("Failed to prepare statement when trying to get an event id"); - return alarm_id; - } - - rc = sqlite3_bind_int64(res, 1, (sqlite3_int64) health_log_id); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id parameter for SQL_GET_EVENT_ID."); - sqlite3_finalize(res); - return alarm_id; - } - - rc = sqlite3_bind_int64(res, 2, (sqlite3_int64) alarm_id); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind char parameter for SQL_GET_EVENT_ID."); - sqlite3_finalize(res); - return alarm_id; - } - - while (sqlite3_step_monitored(res) == SQLITE_ROW) { - *next_event_id = (uint32_t) sqlite3_column_int64(res, 0); - } - - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize the statement while getting an alarm id."); - } + if (alarm_id) + *next_event_id = get_next_alarm_event_id(health_log_id, alarm_id); return alarm_id; } -#define SQL_UPDATE_ALARM_ID_WITH_CONFIG_HASH "update health_log set config_hash_id = @config_hash_id where host_id = @host_id and alarm_id = @alarm_id and health_log_id = @health_log_id" +#define SQL_UPDATE_ALARM_ID_WITH_CONFIG_HASH \ + "UPDATE health_log SET config_hash_id = @config_hash_id WHERE host_id = @host_id AND alarm_id = @alarm_id " \ + "AND health_log_id = @health_log_id" + void sql_update_alarm_with_config_hash(RRDHOST *host, uint32_t alarm_id, uint64_t health_log_id, uuid_t *config_hash_id) { int rc = 0; @@ -1747,42 +1711,42 @@ void sql_update_alarm_with_config_hash(RRDHOST *host, uint32_t alarm_id, uint64_ rc = sqlite3_bind_blob(res, 1, config_hash_id, sizeof(*config_hash_id), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind config_hash_id parameter for SQL_UPDATE_ALARM_ID_WITH_CONFIG_HASH."); - sqlite3_finalize(res); - return; + goto done; } rc = sqlite3_bind_blob(res, 2, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id parameter for SQL_UPDATE_ALARM_ID_WITH_CONFIG_HASH."); - sqlite3_finalize(res); - return; + goto done; } rc = sqlite3_bind_int64(res, 3, (sqlite3_int64) alarm_id); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind alarm_id parameter for SQL_GET_ALARM_ID."); - sqlite3_finalize(res); - return; + goto done; } rc = sqlite3_bind_int64(res, 4, (sqlite3_int64) health_log_id); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind alarm_id parameter for SQL_GET_ALARM_ID."); - sqlite3_finalize(res); - return; + goto done; } rc = execute_insert(res); - if (unlikely(rc != SQLITE_DONE)) { + if (unlikely(rc != SQLITE_DONE)) error_report("Failed to execute SQL_UPDATE_ALARM_ID_WITH_CONFIG_HASH, rc = %d", rc); - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to reset statement to update health log detail table with config hash ids, rc = %d", rc); - return; - } + +done: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement to update health log detail table with config hash ids, rc = %d", rc); + } -#define SQL_GET_ALARM_ID_CHECK_ZERO_HASH "select alarm_id, health_log_id from health_log where host_id = @host_id and chart = @chart and name = @name and (config_hash_id is null or config_hash_id = zeroblob(16))" +#define SQL_GET_ALARM_ID_CHECK_ZERO_HASH \ + "SELECT alarm_id, health_log_id FROM health_log WHERE host_id = @host_id AND chart = @chart " \ + "AND name = @name AND (config_hash_id IS NULL OR config_hash_id = ZEROBLOB(16))" + uint32_t sql_get_alarm_id_check_zero_hash(RRDHOST *host, STRING *chart, STRING *name, uint32_t *next_event_id, uuid_t *config_hash_id) { int rc = 0; @@ -1803,14 +1767,14 @@ uint32_t sql_get_alarm_id_check_zero_hash(RRDHOST *host, STRING *chart, STRING * return alarm_id; } - rc = sqlite3_bind_string_or_null(res, chart, 2); + rc = SQLITE3_BIND_STRING_OR_NULL(res, chart, 2); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind char parameter for SQL_GET_ALARM_ID_CHECK_ZERO_HASH."); sqlite3_finalize(res); return alarm_id; } - rc = sqlite3_bind_string_or_null(res, name, 3); + rc = SQLITE3_BIND_STRING_OR_NULL(res, name, 3); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind name parameter for SQL_GET_ALARM_ID_CHECK_ZERO_HASH."); sqlite3_finalize(res); @@ -1828,44 +1792,21 @@ uint32_t sql_get_alarm_id_check_zero_hash(RRDHOST *host, STRING *chart, STRING * if (alarm_id) { sql_update_alarm_with_config_hash(host, alarm_id, health_log_id, config_hash_id); - - rc = sqlite3_prepare_v2(db_meta, SQL_GET_EVENT_ID, -1, &res, 0); - if (rc != SQLITE_OK) { - error_report("Failed to prepare statement when trying to get an event id"); - return alarm_id; - } - - rc = sqlite3_bind_int64(res, 1, (sqlite3_int64) health_log_id); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id parameter for SQL_GET_EVENT_ID."); - sqlite3_finalize(res); - return alarm_id; - } - - rc = sqlite3_bind_int64(res, 2, (sqlite3_int64) alarm_id); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind char parameter for SQL_GET_EVENT_ID."); - sqlite3_finalize(res); - return alarm_id; - } - - while (sqlite3_step_monitored(res) == SQLITE_ROW) { - *next_event_id = (uint32_t) sqlite3_column_int64(res, 0); - } - - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize the statement while getting an alarm id."); + *next_event_id = get_next_alarm_event_id(health_log_id, alarm_id); } return alarm_id; } -#define SQL_GET_ALARM_ID_FROM_TRANSITION_ID "SELECT hld.alarm_id, hl.host_id, hl.chart_context FROM " \ - "health_log_detail hld, health_log hl WHERE hld.transition_id = @transition_id " \ - "and hld.health_log_id = hl.health_log_id" +#define SQL_GET_ALARM_ID_FROM_TRANSITION_ID \ + "SELECT hld.alarm_id, hl.host_id, hl.chart_context FROM health_log_detail hld, health_log hl " \ + "WHERE hld.transition_id = @transition_id " \ + "AND hld.health_log_id = hl.health_log_id" -bool sql_find_alert_transition(const char *transition, void (*cb)(const char *machine_guid, const char *context, time_t alert_id, void *data), void *data) +bool sql_find_alert_transition( + const char *transition, + void (*cb)(const char *machine_guid, const char *context, time_t alert_id, void *data), + void *data) { static __thread sqlite3_stmt *res = NULL; @@ -1889,7 +1830,7 @@ bool sql_find_alert_transition(const char *transition, void (*cb)(const char *ma rc = sqlite3_bind_blob(res, 1, &transition_uuid, sizeof(transition_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind transition"); - goto fail; + goto done; } while (sqlite3_step_monitored(res) == SQLITE_ROW) { @@ -1898,7 +1839,7 @@ bool sql_find_alert_transition(const char *transition, void (*cb)(const char *ma cb(machine_guid, (const char *) sqlite3_column_text(res, 2), sqlite3_column_int(res, 0), data); } -fail: +done: rc = sqlite3_reset(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to reset the statement when trying to find transition"); @@ -1910,20 +1851,24 @@ bool sql_find_alert_transition(const char *transition, void (*cb)(const char *ma #define SQL_POPULATE_TEMP_ALERT_TRANSITION_TABLE "INSERT INTO v_%p (host_id) VALUES (@host_id)" -#define SQL_SEARCH_ALERT_TRANSITION_SELECT "SELECT " \ - "h.host_id, h.alarm_id, h.config_hash_id, h.name, h.chart, h.chart_name, h.family, h.recipient, h.units, h.exec, " \ - "h.chart_context, d.when_key, d.duration, d.non_clear_duration, d.flags, d.delay_up_to_timestamp, " \ - "d.info, d.exec_code, d.new_status, d.old_status, d.delay, d.new_value, d.old_value, d.last_repeat, " \ - "d.transition_id, d.global_id, ah.class, ah.type, ah.component, d.exec_run_timestamp" +#define SQL_SEARCH_ALERT_TRANSITION_SELECT \ + "SELECT h.host_id, h.alarm_id, h.config_hash_id, h.name, h.chart, h.chart_name, h.family, h.recipient, h.units, h.exec, " \ + "h.chart_context, d.when_key, d.duration, d.non_clear_duration, d.flags, d.delay_up_to_timestamp, " \ + "d.info, d.exec_code, d.new_status, d.old_status, d.delay, d.new_value, d.old_value, d.last_repeat, " \ + "d.transition_id, d.global_id, ah.class, ah.type, ah.component, d.exec_run_timestamp, d.summary" -#define SQL_SEARCH_ALERT_TRANSITION_COMMON_WHERE \ - "h.config_hash_id = ah.hash_id AND h.health_log_id = d.health_log_id" +#define SQL_SEARCH_ALERT_TRANSITION_COMMON_WHERE "h.config_hash_id = ah.hash_id AND h.health_log_id = d.health_log_id" -#define SQL_SEARCH_ALERT_TRANSITION SQL_SEARCH_ALERT_TRANSITION_SELECT " FROM health_log h, health_log_detail d, v_%p t, alert_hash ah " \ - " WHERE h.host_id = t.host_id AND " SQL_SEARCH_ALERT_TRANSITION_COMMON_WHERE " AND ( d.new_status > 2 OR d.old_status > 2 ) AND d.global_id BETWEEN @after AND @before " +#define SQL_SEARCH_ALERT_TRANSITION \ + SQL_SEARCH_ALERT_TRANSITION_SELECT \ + " FROM health_log h, health_log_detail d, v_%p t, alert_hash ah " \ + " WHERE h.host_id = t.host_id AND " SQL_SEARCH_ALERT_TRANSITION_COMMON_WHERE \ + " AND ( d.new_status > 2 OR d.old_status > 2 ) AND d.global_id BETWEEN @after AND @before " -#define SQL_SEARCH_ALERT_TRANSITION_DIRECT SQL_SEARCH_ALERT_TRANSITION_SELECT " FROM health_log h, health_log_detail d, alert_hash ah " \ - " WHERE " SQL_SEARCH_ALERT_TRANSITION_COMMON_WHERE " AND transition_id = @transition " +#define SQL_SEARCH_ALERT_TRANSITION_DIRECT \ + SQL_SEARCH_ALERT_TRANSITION_SELECT " FROM health_log h, health_log_detail d, alert_hash ah " \ + " WHERE " SQL_SEARCH_ALERT_TRANSITION_COMMON_WHERE \ + " AND transition_id = @transition " void sql_alert_transitions( DICTIONARY *nodes, @@ -1956,23 +1901,23 @@ void sql_alert_transitions( rc = sqlite3_bind_blob(res, 1, &transition_uuid, sizeof(transition_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind transition_id parameter"); - goto fail; + goto done; } goto run_query; } - snprintfz(sql, 511, SQL_BUILD_ALERT_TRANSITION, nodes); + snprintfz(sql, sizeof(sql) - 1, SQL_BUILD_ALERT_TRANSITION, nodes); rc = db_execute(db_meta, sql); if (rc) return; - snprintfz(sql, 511, SQL_POPULATE_TEMP_ALERT_TRANSITION_TABLE, nodes); + snprintfz(sql, sizeof(sql) - 1, SQL_POPULATE_TEMP_ALERT_TRANSITION_TABLE, nodes); // Prepare statement to add things rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to prepare statement to INSERT into v_%p", nodes); - goto fail_only_drop; + goto done_only_drop; } void *t; @@ -2015,27 +1960,27 @@ void sql_alert_transitions( rc = sqlite3_prepare_v2(db_meta, buffer_tostring(command), -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to prepare statement sql_alert_transitions"); - goto fail_only_drop; + goto done_only_drop; } int param = 1; rc = sqlite3_bind_int64(res, param++, (sqlite3_int64)(after * USEC_PER_SEC)); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind after parameter"); - goto fail; + goto done; } rc = sqlite3_bind_int64(res, param++, (sqlite3_int64)(before * USEC_PER_SEC)); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind before parameter"); - goto fail; + goto done; } if (context) { rc = sqlite3_bind_text(res, param++, context, -1, SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind context parameter"); - goto fail; + goto done; } } @@ -2043,7 +1988,7 @@ void sql_alert_transitions( rc = sqlite3_bind_text(res, param++, alert_name, -1, SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind alert_name parameter"); - goto fail; + goto done; } } @@ -2082,18 +2027,19 @@ run_query:; atd.type = (const char *) sqlite3_column_text(res, 27); atd.component = (const char *) sqlite3_column_text(res, 28); atd.exec_run_timestamp = sqlite3_column_int64(res, 29); + atd.summary = (const char *) sqlite3_column_text(res, 30); cb(&atd, data); } -fail: +done: rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to finalize statement for sql_alert_transitions"); -fail_only_drop: +done_only_drop: if (likely(!transition)) { - (void)snprintfz(sql, 511, "DROP TABLE IF EXISTS v_%p", nodes); + (void)snprintfz(sql, sizeof(sql) - 1, "DROP TABLE IF EXISTS v_%p", nodes); (void)db_execute(db_meta, sql); buffer_free(command); } @@ -2103,10 +2049,11 @@ run_query:; #define SQL_POPULATE_TEMP_CONFIG_TARGET_TABLE "INSERT INTO c_%p (hash_id) VALUES (@hash_id)" -#define SQL_SEARCH_CONFIG_LIST "SELECT ah.hash_id, alarm, template, on_key, class, component, type, os, hosts, lookup, every, " \ - " units, calc, families, plugin, module, charts, green, red, warn, crit, " \ - " exec, to_key, info, delay, options, repeat, host_labels, p_db_lookup_dimensions, p_db_lookup_method, " \ - " p_db_lookup_options, p_db_lookup_after, p_db_lookup_before, p_update_every, source, chart_labels " \ +#define SQL_SEARCH_CONFIG_LIST \ + "SELECT ah.hash_id, alarm, template, on_key, class, component, type, os, hosts, lookup, every, " \ + " units, calc, families, plugin, module, charts, green, red, warn, crit, " \ + " exec, to_key, info, delay, options, repeat, host_labels, p_db_lookup_dimensions, p_db_lookup_method, " \ + " p_db_lookup_options, p_db_lookup_after, p_db_lookup_before, p_update_every, source, chart_labels, summary " \ " FROM alert_hash ah, c_%p t where ah.hash_id = t.hash_id" int sql_get_alert_configuration( @@ -2124,12 +2071,12 @@ int sql_get_alert_configuration( if (unlikely(!configs)) return added; - snprintfz(sql, 511, SQL_BUILD_CONFIG_TARGET_LIST, configs); + snprintfz(sql, sizeof(sql) - 1, SQL_BUILD_CONFIG_TARGET_LIST, configs); rc = db_execute(db_meta, sql); if (rc) return added; - snprintfz(sql, 511, SQL_POPULATE_TEMP_CONFIG_TARGET_TABLE, configs); + snprintfz(sql, sizeof(sql) - 1, SQL_POPULATE_TEMP_CONFIG_TARGET_TABLE, configs); // Prepare statement to add things rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0); @@ -2215,6 +2162,7 @@ int sql_get_alert_configuration( acd.value.update_every = (int32_t) sqlite3_column_int(res, param++); acd.source = (const char *) sqlite3_column_text(res, param++); acd.selectors.chart_labels = (const char *) sqlite3_column_text(res, param++); + acd.summary = (const char *) sqlite3_column_text(res, param++); cb(&acd, data); added++; @@ -2225,74 +2173,8 @@ int sql_get_alert_configuration( error_report("Failed to finalize statement for sql_get_alert_configuration"); fail_only_drop: - (void)snprintfz(sql, 511, "DROP TABLE IF EXISTS c_%p", configs); + (void)snprintfz(sql, sizeof(sql) - 1, "DROP TABLE IF EXISTS c_%p", configs); (void)db_execute(db_meta, sql); buffer_free(command); return added; } - -#define SQL_FETCH_CHART_NAME "SELECT chart_name FROM health_log where host_id = @host_id LIMIT 1;" -bool is_chart_name_populated(uuid_t *host_uuid) -{ - sqlite3_stmt *res = NULL; - int rc; - - bool status = true; - - rc = sqlite3_prepare_v2(db_meta, SQL_FETCH_CHART_NAME, -1, &res, 0); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement to check health_log chart_name"); - return true; - } - - rc = sqlite3_bind_blob(res, 1, host_uuid, sizeof(*host_uuid), SQLITE_STATIC); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id for health_log chart_name check"); - goto fail; - } - - rc = sqlite3_step_monitored(res); - if (likely(rc == SQLITE_ROW)) - status = sqlite3_column_type(res, 0) != SQLITE_NULL; -fail: - - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize the prepared statement for health_log chart_name check"); - - return status; -} - -#define SQL_POPULATE_CHART_NAME " UPDATE health_log SET chart_name = upd.chart_name FROM " \ - "(SELECT c.type || '.' || IFNULL(c.name, c.id) AS chart_name, hl.host_id, hl.health_log_id FROM " \ - "chart c, health_log hl WHERE (c.type || '.' || c.id) = hl.chart AND c.host_id = hl.host_id " \ - "AND hl.host_id = @host_id) AS upd WHERE health_log.host_id = upd.host_id " \ - "AND health_log.health_log_id = upd.health_log_id" - -void chart_name_populate(uuid_t *host_uuid) -{ - sqlite3_stmt *res = NULL; - int rc; - - rc = sqlite3_prepare_v2(db_meta, SQL_POPULATE_CHART_NAME, -1, &res, 0); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement to update health_log chart_name"); - return; - } - - rc = sqlite3_bind_blob(res, 1, host_uuid, sizeof(*host_uuid), SQLITE_STATIC); - if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id for health_log chart_name update"); - goto fail; - } - - rc = execute_insert(res); - if (unlikely(rc != SQLITE_DONE)) - error_report("Failed to update chart name in health_log, rc = %d", rc); - -fail: - - rc = sqlite3_finalize(res); - if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize the prepared statement for health_log chart_name update"); -} diff --git a/database/sqlite/sqlite_health.h b/database/sqlite/sqlite_health.h index 3aebb94b7cc584..5549b75258c76a 100644 --- a/database/sqlite/sqlite_health.h +++ b/database/sqlite/sqlite_health.h @@ -7,16 +7,13 @@ struct sql_alert_transition_data; struct sql_alert_config_data; -extern sqlite3 *db_meta; void sql_health_alarm_log_load(RRDHOST *host); -void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae); -void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae); void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae); -void sql_health_alarm_log_cleanup(RRDHOST *host); +void sql_health_alarm_log_cleanup(RRDHOST *host, bool claimed); int alert_hash_and_store_config(uuid_t hash_id, struct alert_config *cfg, int store_hash); void sql_aclk_alert_clean_dead_entries(RRDHOST *host); int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_STATUS *last_executed_status); -void sql_health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after, char *chart); +void sql_health_alarm_log2json(RRDHOST *host, BUFFER *wb, time_t after, const char *chart); int health_migrate_old_health_log_table(char *table); uint32_t sql_get_alarm_id(RRDHOST *host, STRING *chart, STRING *name, uint32_t *next_event_id, uuid_t *config_hash_id); uint32_t sql_get_alarm_id_check_zero_hash(RRDHOST *host, STRING *chart, STRING *name, uint32_t *next_event_id, uuid_t *config_hash_id); @@ -38,6 +35,4 @@ int sql_get_alert_configuration( bool debug __maybe_unused); bool sql_find_alert_transition(const char *transition, void (*cb)(const char *machine_guid, const char *context, time_t alert_id, void *data), void *data); -bool is_chart_name_populated(uuid_t *host_uuid); -void chart_name_populate(uuid_t *host_uuid); #endif //NETDATA_SQLITE_HEALTH_H diff --git a/database/sqlite/sqlite_metadata.c b/database/sqlite/sqlite_metadata.c index 697772bf514c8a..76bfe391ea989a 100644 --- a/database/sqlite/sqlite_metadata.c +++ b/database/sqlite/sqlite_metadata.c @@ -4,59 +4,75 @@ // SQL statements -#define SQL_STORE_CLAIM_ID "INSERT INTO node_instance " \ - "(host_id, claim_id, date_created) VALUES (@host_id, @claim_id, unixepoch()) " \ - "ON CONFLICT(host_id) DO UPDATE SET claim_id = excluded.claim_id;" +#define SQL_STORE_CLAIM_ID \ + "INSERT INTO node_instance " \ + "(host_id, claim_id, date_created) VALUES (@host_id, @claim_id, UNIXEPOCH()) " \ + "ON CONFLICT(host_id) DO UPDATE SET claim_id = excluded.claim_id" -#define SQL_DELETE_HOST_LABELS "DELETE FROM host_label WHERE host_id = @uuid;" +#define SQL_DELETE_HOST_LABELS "DELETE FROM host_label WHERE host_id = @uuid" #define STORE_HOST_LABEL \ - "INSERT OR REPLACE INTO host_label (host_id, source_type, label_key, label_value, date_created) VALUES " + "INSERT INTO host_label (host_id, source_type, label_key, label_value, date_created) VALUES " #define STORE_CHART_LABEL \ - "INSERT OR REPLACE INTO chart_label (chart_id, source_type, label_key, label_value, date_created) VALUES " + "INSERT INTO chart_label (chart_id, source_type, label_key, label_value, date_created) VALUES " #define STORE_HOST_OR_CHART_LABEL_VALUE "(u2h('%s'), %d,'%s','%s', unixepoch())" -#define DELETE_DIMENSION_UUID "DELETE FROM dimension WHERE dim_id = @uuid;" - -#define SQL_STORE_HOST_INFO "INSERT OR REPLACE INTO host " \ - "(host_id, hostname, registry_hostname, update_every, os, timezone," \ - "tags, hops, memory_mode, abbrev_timezone, utc_offset, program_name, program_version," \ - "entries, health_enabled) " \ - "values (@host_id, @hostname, @registry_hostname, @update_every, @os, @timezone, @tags, @hops, @memory_mode, " \ - "@abbrev_timezone, @utc_offset, @program_name, @program_version, " \ - "@entries, @health_enabled);" - -#define SQL_STORE_CHART "insert or replace into chart (chart_id, host_id, type, id, " \ - "name, family, context, title, unit, plugin, module, priority, update_every , chart_type , memory_mode , " \ - "history_entries) values (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12,?13,?14,?15,?16);" - -#define SQL_STORE_DIMENSION "INSERT OR REPLACE INTO dimension (dim_id, chart_id, id, name, multiplier, divisor , algorithm, options) " \ - "VALUES (@dim_id, @chart_id, @id, @name, @multiplier, @divisor, @algorithm, @options);" +#define DELETE_DIMENSION_UUID "DELETE FROM dimension WHERE dim_id = @uuid" + +#define SQL_STORE_HOST_INFO \ + "INSERT OR REPLACE INTO host (host_id, hostname, registry_hostname, update_every, os, timezone, tags, hops, " \ + "memory_mode, abbrev_timezone, utc_offset, program_name, program_version, entries, health_enabled, last_connected) " \ + "VALUES (@host_id, @hostname, @registry_hostname, @update_every, @os, @timezone, @tags, @hops, " \ + "@memory_mode, @abbrev_tz, @utc_offset, @prog_name, @prog_version, @entries, @health_enabled, @last_connected)" + +#define SQL_STORE_CHART \ + "INSERT INTO chart (chart_id, host_id, type, id, name, family, context, title, unit, plugin, module, priority, " \ + "update_every, chart_type, memory_mode, history_entries) " \ + "values (@chart_id, @host_id, @type, @id, @name, @family, @context, @title, @unit, @plugin, @module, @priority, " \ + "@update_every, @chart_type, @memory_mode, @history_entries) " \ + "ON CONFLICT(chart_id) DO UPDATE SET type=excluded.type, id=excluded.id, name=excluded.name, " \ + "family=excluded.family, context=excluded.context, title=excluded.title, unit=excluded.unit, " \ + "plugin=excluded.plugin, module=excluded.module, priority=excluded.priority, update_every=excluded.update_every, " \ + "chart_type=excluded.chart_type, memory_mode = excluded.memory_mode, history_entries = excluded.history_entries" + +#define SQL_STORE_DIMENSION \ + "INSERT INTO dimension (dim_id, chart_id, id, name, multiplier, divisor , algorithm, options) " \ + "VALUES (@dim_id, @chart_id, @id, @name, @multiplier, @divisor, @algorithm, @options) " \ + "ON CONFLICT(dim_id) DO UPDATE SET id=excluded.id, name=excluded.name, multiplier=excluded.multiplier, " \ + "divisor=excluded.divisor, algorithm=excluded.algorithm, options=excluded.options" #define SELECT_DIMENSION_LIST "SELECT dim_id, rowid FROM dimension WHERE rowid > @row_id" +#define SELECT_CHART_LIST "SELECT chart_id, rowid FROM chart WHERE rowid > @row_id" +#define SELECT_CHART_LABEL_LIST "SELECT chart_id, rowid FROM chart_label WHERE rowid > @row_id" -#define SQL_STORE_HOST_SYSTEM_INFO_VALUES "INSERT OR REPLACE INTO host_info (host_id, system_key, system_value, date_created) VALUES " \ - "(@uuid, @name, @value, unixepoch())" +#define SQL_STORE_HOST_SYSTEM_INFO_VALUES \ + "INSERT OR REPLACE INTO host_info (host_id, system_key, system_value, date_created) VALUES " \ + "(@uuid, @name, @value, UNIXEPOCH())" #define MIGRATE_LOCALHOST_TO_NEW_MACHINE_GUID \ - "UPDATE chart SET host_id = @host_id WHERE host_id in (SELECT host_id FROM host where host_id <> @host_id and hops = 0);" -#define DELETE_NON_EXISTING_LOCALHOST "DELETE FROM host WHERE hops = 0 AND host_id <> @host_id;" -#define DELETE_MISSING_NODE_INSTANCES "DELETE FROM node_instance WHERE host_id NOT IN (SELECT host_id FROM host);" + "UPDATE chart SET host_id = @host_id WHERE host_id in (SELECT host_id FROM host where host_id <> @host_id and hops = 0)" +#define DELETE_NON_EXISTING_LOCALHOST "DELETE FROM host WHERE hops = 0 AND host_id <> @host_id" +#define DELETE_MISSING_NODE_INSTANCES "DELETE FROM node_instance WHERE host_id NOT IN (SELECT host_id FROM host)" -#define METADATA_CMD_Q_MAX_SIZE (1024) // Max queue size; callers will block until there is room #define METADATA_MAINTENANCE_FIRST_CHECK (1800) // Maintenance first run after agent startup in seconds -#define METADATA_MAINTENANCE_RETRY (60) // Retry run if already running or last run did actual work -#define METADATA_MAINTENANCE_INTERVAL (3600) // Repeat maintenance after latest successful +#define METADATA_MAINTENANCE_REPEAT (60) // Repeat if last run for dimensions, charts, labels needs more work +#define METADATA_HEALTH_LOG_INTERVAL (3600) // Repeat maintenance for health +#define METADATA_DIM_CHECK_INTERVAL (3600) // Repeat maintenance for dimensions +#define METADATA_CHART_CHECK_INTERVAL (3600) // Repeat maintenance for charts +#define METADATA_LABEL_CHECK_INTERVAL (3600) // Repeat maintenance for labels +#define METADATA_RUNTIME_THRESHOLD (5) // Run time threshold for cleanup task #define METADATA_HOST_CHECK_FIRST_CHECK (5) // First check for pending metadata #define METADATA_HOST_CHECK_INTERVAL (30) // Repeat check for pending metadata #define METADATA_HOST_CHECK_IMMEDIATE (5) // Repeat immediate run because we have more metadata to write - #define MAX_METADATA_CLEANUP (500) // Maximum metadata write operations (e.g deletes before retrying) #define METADATA_MAX_BATCH_SIZE (512) // Maximum commands to execute before running the event loop +#define DATABASE_FREE_PAGES_THRESHOLD_PC (5) // Percentage of free pages to trigger vacuum +#define DATABASE_FREE_PAGES_VACUUM_PC (10) // Percentage of free pages to vacuum + enum metadata_opcode { METADATA_DATABASE_NOOP = 0, METADATA_DATABASE_TIMER, @@ -65,10 +81,10 @@ enum metadata_opcode { METADATA_ADD_HOST_INFO, METADATA_SCAN_HOSTS, METADATA_LOAD_HOST_CONTEXT, + METADATA_DELETE_HOST_CHART_LABELS, METADATA_MAINTENANCE, METADATA_SYNC_SHUTDOWN, METADATA_UNITTEST, - METADATA_ML_LOAD_MODELS, // leave this last // we need it to check for worker utilization METADATA_MAX_ENUMERATIONS_DEFINED @@ -79,42 +95,34 @@ struct metadata_cmd { enum metadata_opcode opcode; struct completion *completion; const void *param[MAX_PARAM_LIST]; -}; - -struct metadata_database_cmdqueue { - unsigned head, tail; - struct metadata_cmd cmd_array[METADATA_CMD_Q_MAX_SIZE]; + struct metadata_cmd *prev, *next; }; typedef enum { - METADATA_FLAG_CLEANUP = (1 << 0), // Cleanup is running - METADATA_FLAG_SCANNING_HOSTS = (1 << 1), // Scanning of hosts in worker thread - METADATA_FLAG_SHUTDOWN = (1 << 2), // Shutting down + METADATA_FLAG_PROCESSING = (1 << 0), // store or cleanup + METADATA_FLAG_SHUTDOWN = (1 << 1), // Shutting down } METADATA_FLAG; -#define METADATA_WORKER_BUSY (METADATA_FLAG_CLEANUP | METADATA_FLAG_SCANNING_HOSTS) - struct metadata_wc { uv_thread_t thread; uv_loop_t *loop; uv_async_t async; uv_timer_t timer_req; - time_t check_metadata_after; - time_t check_hosts_after; - volatile unsigned queue_size; + time_t metadata_check_after; METADATA_FLAG flags; - uint64_t row_id; - struct completion init_complete; + struct completion start_stop_complete; + struct completion *scan_complete; /* FIFO command queue */ - uv_mutex_t cmd_mutex; - uv_cond_t cmd_cond; - struct metadata_database_cmdqueue cmd_queue; + SPINLOCK cmd_queue_lock; + struct metadata_cmd *cmd_base; }; #define metadata_flag_check(target_flags, flag) (__atomic_load_n(&((target_flags)->flags), __ATOMIC_SEQ_CST) & (flag)) #define metadata_flag_set(target_flags, flag) __atomic_or_fetch(&((target_flags)->flags), (flag), __ATOMIC_SEQ_CST) #define metadata_flag_clear(target_flags, flag) __atomic_and_fetch(&((target_flags)->flags), ~(flag), __ATOMIC_SEQ_CST) +struct metadata_wc metasync_worker = {.loop = NULL}; + // // For unittest // @@ -134,13 +142,40 @@ struct query_build { char uuid_str[UUID_STR_LEN]; }; +#define SQL_DELETE_CHART_LABELS_BY_HOST \ + "DELETE FROM chart_label WHERE chart_id in (SELECT chart_id FROM chart WHERE host_id = @host_id)" + +static void delete_host_chart_labels(uuid_t *host_uuid) +{ + sqlite3_stmt *res = NULL; + + int rc = sqlite3_prepare_v2(db_meta, SQL_DELETE_CHART_LABELS_BY_HOST, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to delete chart labels by host"); + return; + } + + rc = sqlite3_bind_blob(res, 1, host_uuid, sizeof(*host_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host_id parameter to host chart labels"); + goto failed; + } + rc = sqlite3_step_monitored(res); + if (unlikely(rc != SQLITE_DONE)) + error_report("Failed to execute command to remove host chart labels"); + +failed: + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("Failed to finalize statement to remove host chart labels"); +} + static int host_label_store_to_sql_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { struct query_build *lb = data; if (unlikely(!lb->count)) buffer_sprintf(lb->sql, STORE_HOST_LABEL); else buffer_strcat(lb->sql, ", "); - buffer_sprintf(lb->sql, STORE_HOST_OR_CHART_LABEL_VALUE, lb->uuid_str, (int)ls & ~(RRDLABEL_FLAG_INTERNAL), name, value); + buffer_sprintf(lb->sql, STORE_HOST_OR_CHART_LABEL_VALUE, lb->uuid_str, (int) (ls & ~(RRDLABEL_FLAG_INTERNAL)), name, value); lb->count++; return 1; } @@ -151,13 +186,13 @@ static int chart_label_store_to_sql_callback(const char *name, const char *value buffer_sprintf(lb->sql, STORE_CHART_LABEL); else buffer_strcat(lb->sql, ", "); - buffer_sprintf(lb->sql, STORE_HOST_OR_CHART_LABEL_VALUE, lb->uuid_str, ls, name, value); + buffer_sprintf(lb->sql, STORE_HOST_OR_CHART_LABEL_VALUE, lb->uuid_str, (int) (ls & ~(RRDLABEL_FLAG_INTERNAL)), name, value); lb->count++; return 1; } -#define SQL_DELETE_CHART_LABEL "DELETE FROM chart_label WHERE chart_id = @chart_id;" -#define SQL_DELETE_CHART_LABEL_HISTORY "DELETE FROM chart_label WHERE date_created < %ld AND chart_id = @chart_id;" +#define SQL_DELETE_CHART_LABEL "DELETE FROM chart_label WHERE chart_id = @chart_id" +#define SQL_DELETE_CHART_LABEL_HISTORY "DELETE FROM chart_label WHERE date_created < %ld AND chart_id = @chart_id" static void clean_old_chart_labels(RRDSET *st) { @@ -165,9 +200,9 @@ static void clean_old_chart_labels(RRDSET *st) time_t first_time_s = rrdset_first_entry_s(st); if (unlikely(!first_time_s)) - snprintfz(sql, 511,SQL_DELETE_CHART_LABEL); + snprintfz(sql, sizeof(sql) - 1, SQL_DELETE_CHART_LABEL); else - snprintfz(sql, 511,SQL_DELETE_CHART_LABEL_HISTORY, first_time_s); + snprintfz(sql, sizeof(sql) - 1, SQL_DELETE_CHART_LABEL_HISTORY, first_time_s); int rc = exec_statement_with_uuid(sql, &st->chart_uuid); if (unlikely(rc)) @@ -177,7 +212,7 @@ static void clean_old_chart_labels(RRDSET *st) static int check_and_update_chart_labels(RRDSET *st, BUFFER *work_buffer, size_t *query_counter) { size_t old_version = st->rrdlabels_last_saved_version; - size_t new_version = dictionary_version(st->rrdlabels); + size_t new_version = rrdlabels_version(st->rrdlabels); if (new_version == old_version) return 0; @@ -185,6 +220,7 @@ static int check_and_update_chart_labels(RRDSET *st, BUFFER *work_buffer, size_t struct query_build tmp = {.sql = work_buffer, .count = 0}; uuid_unparse_lower(st->chart_uuid, tmp.uuid_str); rrdlabels_walkthrough_read(st->rrdlabels, chart_label_store_to_sql_callback, &tmp); + buffer_strcat(work_buffer, " ON CONFLICT (chart_id, label_key) DO UPDATE SET source_type = excluded.source_type, label_value=excluded.label_value, date_created=UNIXEPOCH()"); int rc = db_execute(db_meta, buffer_tostring(work_buffer)); if (likely(!rc)) { st->rrdlabels_last_saved_version = new_version; @@ -252,7 +288,7 @@ static int store_claim_id(uuid_t *host_id, uuid_t *claim_id) return rc != SQLITE_DONE; } -static void delete_dimension_uuid(uuid_t *dimension_uuid) +static void delete_dimension_uuid(uuid_t *dimension_uuid, sqlite3_stmt **action_res __maybe_unused, bool flag __maybe_unused) { static __thread sqlite3_stmt *res = NULL; int rc; @@ -265,7 +301,7 @@ static void delete_dimension_uuid(uuid_t *dimension_uuid) } } - rc = sqlite3_bind_blob(res, 1, dimension_uuid, sizeof(*dimension_uuid), SQLITE_STATIC); + rc = sqlite3_bind_blob(res, 1, dimension_uuid, sizeof(*dimension_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) goto skip_execution; @@ -286,13 +322,6 @@ static int store_host_metadata(RRDHOST *host) static __thread sqlite3_stmt *res = NULL; int rc, param = 0; - if (unlikely(!db_meta)) { - if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) - return 0; - error_report("Database has not been initialized"); - return 1; - } - if (unlikely((!res))) { rc = prepare_statement(db_meta, SQL_STORE_HOST_INFO, &res); if (unlikely(rc != SQLITE_OK)) { @@ -361,6 +390,10 @@ static int store_host_metadata(RRDHOST *host) if (unlikely(rc != SQLITE_OK)) goto bind_fail; + rc = sqlite3_bind_int64(res, ++param, (sqlite3_int64) host->last_connected); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + int store_rc = sqlite3_step_monitored(res); if (unlikely(store_rc != SQLITE_DONE)) error_report("Failed to store host %s, rc = %d", rrdhost_hostname(host), rc); @@ -474,13 +507,6 @@ static int store_chart_metadata(RRDSET *st) static __thread sqlite3_stmt *res = NULL; int rc, param = 0, store_rc = 0; - if (unlikely(!db_meta)) { - if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) - return 0; - error_report("Database has not been initialized"); - return 1; - } - if (unlikely(!res)) { rc = prepare_statement(db_meta, SQL_STORE_CHART, &res); if (unlikely(rc != SQLITE_OK)) { @@ -583,13 +609,6 @@ static int store_dimension_metadata(RRDDIM *rd) static __thread sqlite3_stmt *res = NULL; int rc, param = 0; - if (unlikely(!db_meta)) { - if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) - return 0; - error_report("Database has not been initialized"); - return 1; - } - if (unlikely(!res)) { rc = prepare_statement(db_meta, SQL_STORE_DIMENSION, &res); if (unlikely(rc != SQLITE_OK)) { @@ -650,7 +669,7 @@ static int store_dimension_metadata(RRDDIM *rd) return 1; } -static bool dimension_can_be_deleted(uuid_t *dim_uuid __maybe_unused) +static bool dimension_can_be_deleted(uuid_t *dim_uuid __maybe_unused, sqlite3_stmt **res __maybe_unused, bool flag __maybe_unused) { #ifdef ENABLE_DBENGINE if(dbengine_enabled) { @@ -675,8 +694,173 @@ static bool dimension_can_be_deleted(uuid_t *dim_uuid __maybe_unused) #endif } +int get_pragma_value(sqlite3 *database, const char *sql) +{ + sqlite3_stmt *res = NULL; + int rc = sqlite3_prepare_v2(database, sql, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) + return -1; + + int result = -1; + rc = sqlite3_step_monitored(res); + if (likely(rc == SQLITE_ROW)) + result = sqlite3_column_int(res, 0); + + rc = sqlite3_finalize(res); + (void) rc; + + return result; +} + + +int get_free_page_count(sqlite3 *database) +{ + return get_pragma_value(database, "PRAGMA freelist_count"); +} + +int get_database_page_count(sqlite3 *database) +{ + return get_pragma_value(database, "PRAGMA page_count"); +} + +static bool run_cleanup_loop( + sqlite3_stmt *res, + struct metadata_wc *wc, + bool (*check_cb)(uuid_t *, sqlite3_stmt **, bool), + void (*action_cb)(uuid_t *, sqlite3_stmt **, bool), + uint32_t *total_checked, + uint32_t *total_deleted, + uint64_t *row_id, + sqlite3_stmt **check_stmt, + sqlite3_stmt **action_stmt, + bool check_flag, + bool action_flag) +{ + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) + return true; + + int rc = sqlite3_bind_int64(res, 1, (sqlite3_int64) *row_id); + if (unlikely(rc != SQLITE_OK)) + return true; + + time_t start_running = now_monotonic_sec(); + bool time_expired = false; + while (!time_expired && sqlite3_step_monitored(res) == SQLITE_ROW && + (*total_deleted < MAX_METADATA_CLEANUP && *total_checked < MAX_METADATA_CLEANUP)) { + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) + break; + + *row_id = sqlite3_column_int64(res, 1); + rc = check_cb((uuid_t *)sqlite3_column_blob(res, 0), check_stmt, check_flag); + + if (rc == true) { + action_cb((uuid_t *)sqlite3_column_blob(res, 0), action_stmt, action_flag); + (*total_deleted)++; + } + + (*total_checked)++; + time_expired = ((now_monotonic_sec() - start_running) > METADATA_RUNTIME_THRESHOLD); + } + return time_expired || (*total_checked == MAX_METADATA_CLEANUP) || (*total_deleted == MAX_METADATA_CLEANUP); +} + + +#define SQL_CHECK_CHART_EXISTENCE_IN_DIMENSION "SELECT count(1) FROM dimension WHERE chart_id = @chart_id" +#define SQL_CHECK_CHART_EXISTENCE_IN_CHART "SELECT count(1) FROM chart WHERE chart_id = @chart_id" + +static bool chart_can_be_deleted(uuid_t *chart_uuid, sqlite3_stmt **check_res, bool check_in_dimension) +{ + int rc, result = 1; + sqlite3_stmt *res = check_res ? *check_res : NULL; + + if (!res) { + if (check_in_dimension) + rc = sqlite3_prepare_v2(db_meta, SQL_CHECK_CHART_EXISTENCE_IN_DIMENSION, -1, &res, 0); + else + rc = sqlite3_prepare_v2(db_meta, SQL_CHECK_CHART_EXISTENCE_IN_CHART, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to check for chart existence, rc = %d", rc); + return 0; + } + if (check_res) + *check_res = res; + } + + rc = sqlite3_bind_blob(res, 1, chart_uuid, sizeof(*chart_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind chart uuid parameter, rc = %d", rc); + goto skip; + } + + rc = sqlite3_step_monitored(res); + if (likely(rc == SQLITE_ROW)) + result = sqlite3_column_int(res, 0); + +skip: + if (check_res) + rc = sqlite3_reset(res); + else + rc = sqlite3_finalize(res); + + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to %s statement that checks chart uuid existence rc = %d", check_res ? "reset" : "finalize", rc); + return result == 0; +} + +#define SQL_DELETE_CHART_BY_UUID "DELETE FROM chart WHERE chart_id = @chart_id" +#define SQL_DELETE_CHART_LABEL_BY_UUID "DELETE FROM chart_label WHERE chart_id = @chart_id" + +static void delete_chart_uuid(uuid_t *chart_uuid, sqlite3_stmt **action_res, bool label_only) +{ + int rc; + sqlite3_stmt *res = action_res ? *action_res : NULL; + + if (!res) { + if (label_only) + rc = sqlite3_prepare_v2(db_meta, SQL_DELETE_CHART_LABEL_BY_UUID, -1, &res, 0); + else + rc = sqlite3_prepare_v2(db_meta, SQL_DELETE_CHART_BY_UUID, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to check for chart existence, rc = %d", rc); + return; + } + if (action_res) + *action_res = res; + } + + rc = sqlite3_bind_blob(res, 1, chart_uuid, sizeof(*chart_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind chart uuid parameter, rc = %d", rc); + goto skip; + } + + rc = sqlite3_step_monitored(res); + if (unlikely(rc != SQLITE_DONE)) + error_report("Failed to delete a chart uuid from the %s table, rc = %d", label_only ? "labels" : "chart", rc); + +skip: + if (action_res) + rc = sqlite3_reset(res); + else + rc = sqlite3_finalize(res); + + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to %s statement that deletes a chart uuid rc = %d", action_res ? "reset" : "finalize", rc); +} + static void check_dimension_metadata(struct metadata_wc *wc) { + static time_t next_execution_t = 0; + static uint64_t last_row_id = 0; + + time_t now = now_realtime_sec(); + + if (!next_execution_t) + next_execution_t = now + METADATA_MAINTENANCE_FIRST_CHECK; + + if (next_execution_t && next_execution_t > now) + return; + int rc; sqlite3_stmt *res = NULL; @@ -686,161 +870,267 @@ static void check_dimension_metadata(struct metadata_wc *wc) return; } - rc = sqlite3_bind_int64(res, 1, (sqlite3_int64) wc->row_id); + uint32_t total_checked = 0; + uint32_t total_deleted = 0; + + internal_error(true, "METADATA: Checking dimensions starting after row %"PRIu64, last_row_id); + + bool more_to_do = run_cleanup_loop( + res, + wc, + dimension_can_be_deleted, + delete_dimension_uuid, + &total_checked, + &total_deleted, + &last_row_id, + NULL, + NULL, + false, + false); + + now = now_realtime_sec(); + if (more_to_do) + next_execution_t = now + METADATA_MAINTENANCE_REPEAT; + else { + last_row_id = 0; + next_execution_t = now + METADATA_DIM_CHECK_INTERVAL; + } + + internal_error(true, + "METADATA: Dimensions checked %u, deleted %u. Checks will %s in %lld seconds", + total_checked, + total_deleted, + last_row_id ? "resume" : "restart", + (long long)(next_execution_t - now)); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the prepared statement to check dimensions"); +} + +static void check_chart_metadata(struct metadata_wc *wc) +{ + static time_t next_execution_t = 0; + static uint64_t last_row_id = 0; + + time_t now = now_realtime_sec(); + + if (!next_execution_t) + next_execution_t = now + METADATA_MAINTENANCE_FIRST_CHECK; + + if (next_execution_t && next_execution_t > now) + return; + + sqlite3_stmt *res = NULL; + + int rc = sqlite3_prepare_v2(db_meta, SELECT_CHART_LIST, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to row parameter"); - goto skip_run; + error_report("Failed to prepare statement to fetch charts"); + return; } uint32_t total_checked = 0; - uint32_t total_deleted= 0; - uint64_t last_row_id = wc->row_id; + uint32_t total_deleted = 0; + + internal_error(true, "METADATA: Checking charts starting after row %"PRIu64, last_row_id); + + sqlite3_stmt *check_res = NULL; + sqlite3_stmt *action_res = NULL; + bool more_to_do = run_cleanup_loop( + res, + wc, + chart_can_be_deleted, + delete_chart_uuid, + &total_checked, + &total_deleted, + &last_row_id, + &check_res, + &action_res, + true, + false); + + if (check_res) + sqlite3_finalize(check_res); + + if (action_res) + sqlite3_finalize(action_res); + + now = now_realtime_sec(); + if (more_to_do) + next_execution_t = now + METADATA_MAINTENANCE_REPEAT; + else { + last_row_id = 0; + next_execution_t = now + METADATA_CHART_CHECK_INTERVAL; + } - netdata_log_info("METADATA: Checking dimensions starting after row %"PRIu64, wc->row_id); + internal_error(true, + "METADATA: Charts checked %u, deleted %u. Checks will %s in %lld seconds", + total_checked, + total_deleted, + last_row_id ? "resume" : "restart", + (long long)(next_execution_t - now)); - while (sqlite3_step_monitored(res) == SQLITE_ROW && total_deleted < MAX_METADATA_CLEANUP) { - if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) - break; + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the prepared statement when reading charts"); +} + +static void check_label_metadata(struct metadata_wc *wc) +{ + static time_t next_execution_t = 0; + static uint64_t last_row_id = 0; - last_row_id = sqlite3_column_int64(res, 1); - rc = dimension_can_be_deleted((uuid_t *)sqlite3_column_blob(res, 0)); - if (rc == true) { - delete_dimension_uuid((uuid_t *)sqlite3_column_blob(res, 0)); - total_deleted++; - } - total_checked++; - } - wc->row_id = last_row_id; time_t now = now_realtime_sec(); - if (total_deleted > 0) { - wc->check_metadata_after = now + METADATA_MAINTENANCE_RETRY; - } else - wc->row_id = 0; - netdata_log_info("METADATA: Checked %u, deleted %u -- will resume after row %"PRIu64" in %lld seconds", total_checked, total_deleted, wc->row_id, - (long long)(wc->check_metadata_after - now)); - -skip_run: + + if (!next_execution_t) + next_execution_t = now + METADATA_MAINTENANCE_FIRST_CHECK; + + if (next_execution_t && next_execution_t > now) + return; + + int rc; + sqlite3_stmt *res = NULL; + + rc = sqlite3_prepare_v2(db_meta, SELECT_CHART_LABEL_LIST, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to fetch charts"); + return; + } + + uint32_t total_checked = 0; + uint32_t total_deleted = 0; + + internal_error(true,"METADATA: Checking charts labels starting after row %"PRIu64, last_row_id); + + sqlite3_stmt *check_res = NULL; + sqlite3_stmt *action_res = NULL; + + bool more_to_do = run_cleanup_loop( + res, + wc, + chart_can_be_deleted, + delete_chart_uuid, + &total_checked, + &total_deleted, + &last_row_id, + &check_res, + &action_res, + false, + true); + + if (check_res) + sqlite3_finalize(check_res); + + if (action_res) + sqlite3_finalize(action_res); + + now = now_realtime_sec(); + if (more_to_do) + next_execution_t = now + METADATA_MAINTENANCE_REPEAT; + else { + last_row_id = 0; + next_execution_t = now + METADATA_LABEL_CHECK_INTERVAL; + } + + internal_error(true, + "METADATA: Chart labels checked %u, deleted %u. Checks will %s in %lld seconds", + total_checked, + total_deleted, + last_row_id ? "resume" : "restart", + (long long)(next_execution_t - now)); + rc = sqlite3_finalize(res); if (unlikely(rc != SQLITE_OK)) - error_report("Failed to finalize the prepared statement when reading dimensions"); + error_report("Failed to finalize the prepared statement when checking charts"); } -static void cleanup_health_log(void) + +static void cleanup_health_log(struct metadata_wc *wc) { + static time_t next_execution_t = 0; + + time_t now = now_realtime_sec(); + + if (!next_execution_t) + next_execution_t = now + METADATA_MAINTENANCE_FIRST_CHECK; + + if (next_execution_t && next_execution_t > now) + return; + + next_execution_t = now + METADATA_HEALTH_LOG_INTERVAL; + RRDHOST *host; - dfe_start_reentrant(rrdhost_root_index, host) { + + bool is_claimed = claimed(); + dfe_start_reentrant(rrdhost_root_index, host){ if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED)) continue; - sql_health_alarm_log_cleanup(host); + sql_health_alarm_log_cleanup(host, is_claimed); + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) + break; } dfe_done(host); + + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) + return; + + (void) db_execute(db_meta,"DELETE FROM health_log WHERE host_id NOT IN (SELECT host_id FROM host)"); + (void) db_execute(db_meta,"DELETE FROM health_log_detail WHERE health_log_id NOT IN (SELECT health_log_id FROM health_log)"); } // // EVENT LOOP STARTS HERE // -static void metadata_init_cmd_queue(struct metadata_wc *wc) +static void metadata_free_cmd_queue(struct metadata_wc *wc) { - wc->cmd_queue.head = wc->cmd_queue.tail = 0; - wc->queue_size = 0; - fatal_assert(0 == uv_cond_init(&wc->cmd_cond)); - fatal_assert(0 == uv_mutex_init(&wc->cmd_mutex)); + spinlock_lock(&wc->cmd_queue_lock); + while(wc->cmd_base) { + struct metadata_cmd *t = wc->cmd_base; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(wc->cmd_base, t, prev, next); + freez(t); + } + spinlock_unlock(&wc->cmd_queue_lock); } -int metadata_enq_cmd_noblock(struct metadata_wc *wc, struct metadata_cmd *cmd) +static void metadata_enq_cmd(struct metadata_wc *wc, struct metadata_cmd *cmd) { - unsigned queue_size; - - /* wait for free space in queue */ - uv_mutex_lock(&wc->cmd_mutex); - if (cmd->opcode == METADATA_SYNC_SHUTDOWN) { metadata_flag_set(wc, METADATA_FLAG_SHUTDOWN); - uv_mutex_unlock(&wc->cmd_mutex); - return 0; + goto wakeup_event_loop; } - if (unlikely((queue_size = wc->queue_size) == METADATA_CMD_Q_MAX_SIZE || - metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) { - uv_mutex_unlock(&wc->cmd_mutex); - return 1; - } + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) + goto wakeup_event_loop; - fatal_assert(queue_size < METADATA_CMD_Q_MAX_SIZE); - /* enqueue command */ - wc->cmd_queue.cmd_array[wc->cmd_queue.tail] = *cmd; - wc->cmd_queue.tail = wc->cmd_queue.tail != METADATA_CMD_Q_MAX_SIZE - 1 ? - wc->cmd_queue.tail + 1 : 0; - wc->queue_size = queue_size + 1; - uv_mutex_unlock(&wc->cmd_mutex); - return 0; -} + struct metadata_cmd *t = mallocz(sizeof(*t)); + *t = *cmd; + t->prev = t->next = NULL; -static void metadata_enq_cmd(struct metadata_wc *wc, struct metadata_cmd *cmd) -{ - unsigned queue_size; - - /* wait for free space in queue */ - uv_mutex_lock(&wc->cmd_mutex); - if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) { - uv_mutex_unlock(&wc->cmd_mutex); - (void) uv_async_send(&wc->async); - return; - } + spinlock_lock(&wc->cmd_queue_lock); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(wc->cmd_base, t, prev, next); + spinlock_unlock(&wc->cmd_queue_lock); - if (cmd->opcode == METADATA_SYNC_SHUTDOWN) { - metadata_flag_set(wc, METADATA_FLAG_SHUTDOWN); - uv_mutex_unlock(&wc->cmd_mutex); - (void) uv_async_send(&wc->async); - return; - } - - while ((queue_size = wc->queue_size) == METADATA_CMD_Q_MAX_SIZE) { - if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) { - uv_mutex_unlock(&wc->cmd_mutex); - return; - } - uv_cond_wait(&wc->cmd_cond, &wc->cmd_mutex); - } - fatal_assert(queue_size < METADATA_CMD_Q_MAX_SIZE); - /* enqueue command */ - wc->cmd_queue.cmd_array[wc->cmd_queue.tail] = *cmd; - wc->cmd_queue.tail = wc->cmd_queue.tail != METADATA_CMD_Q_MAX_SIZE - 1 ? - wc->cmd_queue.tail + 1 : 0; - wc->queue_size = queue_size + 1; - uv_mutex_unlock(&wc->cmd_mutex); - - /* wake up event loop */ +wakeup_event_loop: (void) uv_async_send(&wc->async); } static struct metadata_cmd metadata_deq_cmd(struct metadata_wc *wc) { struct metadata_cmd ret; - unsigned queue_size; - uv_mutex_lock(&wc->cmd_mutex); - queue_size = wc->queue_size; - if (queue_size == 0) { - memset(&ret, 0, sizeof(ret)); + spinlock_lock(&wc->cmd_queue_lock); + if(wc->cmd_base) { + struct metadata_cmd *t = wc->cmd_base; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(wc->cmd_base, t, prev, next); + ret = *t; + freez(t); + } + else { ret.opcode = METADATA_DATABASE_NOOP; ret.completion = NULL; - } else { - /* dequeue command */ - ret = wc->cmd_queue.cmd_array[wc->cmd_queue.head]; - - if (queue_size == 1) { - wc->cmd_queue.head = wc->cmd_queue.tail = 0; - } else { - wc->cmd_queue.head = wc->cmd_queue.head != METADATA_CMD_Q_MAX_SIZE - 1 ? - wc->cmd_queue.head + 1 : 0; - } - wc->queue_size = queue_size - 1; - /* wake up producers */ - uv_cond_signal(&wc->cmd_cond); } - uv_mutex_unlock(&wc->cmd_mutex); + spinlock_unlock(&wc->cmd_queue_lock); return ret; } @@ -863,45 +1153,56 @@ static void timer_cb(uv_timer_t* handle) struct metadata_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - time_t now = now_realtime_sec(); - - if (wc->check_metadata_after && wc->check_metadata_after < now) { - cmd.opcode = METADATA_MAINTENANCE; - if (!metadata_enq_cmd_noblock(wc, &cmd)) - wc->check_metadata_after = now + METADATA_MAINTENANCE_INTERVAL; - } - - if (wc->check_hosts_after && wc->check_hosts_after < now) { + if (wc->metadata_check_after < now_realtime_sec()) { cmd.opcode = METADATA_SCAN_HOSTS; - if (!metadata_enq_cmd_noblock(wc, &cmd)) - wc->check_hosts_after = now + METADATA_HOST_CHECK_INTERVAL; + metadata_enq_cmd(wc, &cmd); } } -static void after_metadata_cleanup(uv_work_t *req, int status) +void vacuum_database(sqlite3 *database, const char *db_alias, int threshold, int vacuum_pc) { - UNUSED(status); + int free_pages = get_free_page_count(database); + int total_pages = get_database_page_count(database); + + if (!threshold) + threshold = DATABASE_FREE_PAGES_THRESHOLD_PC; + + if (!vacuum_pc) + vacuum_pc = DATABASE_FREE_PAGES_VACUUM_PC; - struct metadata_wc *wc = req->data; - metadata_flag_clear(wc, METADATA_FLAG_CLEANUP); + if (free_pages > (total_pages * threshold / 100)) { + + int do_free_pages = (int) (free_pages * vacuum_pc / 100); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "%s: Freeing %d database pages", db_alias, do_free_pages); + + char sql[128]; + snprintfz(sql, sizeof(sql) - 1, "PRAGMA incremental_vacuum(%d)", do_free_pages); + (void) db_execute(database, sql); + } } -static void start_metadata_cleanup(uv_work_t *req) +void run_metadata_cleanup(struct metadata_wc *wc) { - register_libuv_worker_jobs(); + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) + return; - worker_is_busy(UV_EVENT_METADATA_CLEANUP); - struct metadata_wc *wc = req->data; check_dimension_metadata(wc); - cleanup_health_log(); + check_chart_metadata(wc); + check_label_metadata(wc); + cleanup_health_log(wc); + + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) + return; + + vacuum_database(db_meta, "METADATA", DATABASE_FREE_PAGES_THRESHOLD_PC, DATABASE_FREE_PAGES_VACUUM_PC); + (void) sqlite3_wal_checkpoint(db_meta, NULL); - worker_is_idle(); } struct scan_metadata_payload { uv_work_t request; struct metadata_wc *wc; - struct completion *completion; + void *data; BUFFER *work_buffer; uint32_t max_count; }; @@ -979,7 +1280,7 @@ static void start_all_host_load_context(uv_work_t *req __maybe_unused) register_libuv_worker_jobs(); struct scan_metadata_payload *data = req->data; - UNUSED(data); + struct metadata_wc *wc = data->wc; worker_is_busy(UV_EVENT_HOST_CONTEXT_LOAD); usec_t started_ut = now_monotonic_usec(); (void)started_ut; @@ -987,6 +1288,9 @@ static void start_all_host_load_context(uv_work_t *req __maybe_unused) RRDHOST *host; size_t max_threads = MIN(get_netdata_cpus() / 2, 6); + if (max_threads < 1) + max_threads = 1; + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Using %zu threads for context loading", max_threads); struct host_context_load_thread *hclt = callocz(max_threads, sizeof(*hclt)); size_t thread_index; @@ -998,25 +1302,28 @@ static void start_all_host_load_context(uv_work_t *req __maybe_unused) rrdhost_flag_set(host, RRDHOST_FLAG_CONTEXT_LOAD_IN_PROGRESS); internal_error(true, "METADATA: 'host:%s' loading context", rrdhost_hostname(host)); - cleanup_finished_threads(hclt, max_threads, false); - bool found_slot = find_available_thread_slot(hclt, max_threads, &thread_index); + bool found_slot = false; + do { + if (metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN)) + break; - if (unlikely(!found_slot)) { - struct host_context_load_thread hclt_sync = {.host = host}; - restore_host_context(&hclt_sync); - } - else { - __atomic_store_n(&hclt[thread_index].busy, true, __ATOMIC_RELAXED); - hclt[thread_index].host = host; - assert(0 == uv_thread_create(&hclt[thread_index].thread, restore_host_context, &hclt[thread_index])); - } + cleanup_finished_threads(hclt, max_threads, false); + found_slot = find_available_thread_slot(hclt, max_threads, &thread_index); + } while (!found_slot); + + if (metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN)) + break; + + __atomic_store_n(&hclt[thread_index].busy, true, __ATOMIC_RELAXED); + hclt[thread_index].host = host; + assert(0 == uv_thread_create(&hclt[thread_index].thread, restore_host_context, &hclt[thread_index])); } dfe_done(host); cleanup_finished_threads(hclt, max_threads, true); freez(hclt); usec_t ended_ut = now_monotonic_usec(); (void)ended_ut; - internal_error(true, "METADATA: 'host:ALL' contexts loaded in %0.2f ms", (double)(ended_ut - started_ut) / USEC_PER_MS); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: host contexts loaded in %0.2f ms", (double)(ended_ut - started_ut) / USEC_PER_MS); worker_is_idle(); } @@ -1027,10 +1334,10 @@ static void after_metadata_hosts(uv_work_t *req, int status __maybe_unused) struct scan_metadata_payload *data = req->data; struct metadata_wc *wc = data->wc; - metadata_flag_clear(wc, METADATA_FLAG_SCANNING_HOSTS); + metadata_flag_clear(wc, METADATA_FLAG_PROCESSING); internal_error(true, "METADATA: scanning hosts complete"); - if (unlikely(data->completion)) { - completion_mark_complete(data->completion); + if (unlikely(wc->scan_complete)) { + completion_mark_complete(wc->scan_complete); internal_error(true, "METADATA: Sending completion done"); } freez(data); @@ -1043,8 +1350,12 @@ static bool metadata_scan_host(RRDHOST *host, uint32_t max_count, bool use_trans bool more_to_do = false; uint32_t scan_count = 1; + sqlite3_stmt *ml_load_stmt = NULL; + + bool load_ml_models = max_count; + if (use_transaction) - (void)db_execute(db_meta, "BEGIN TRANSACTION;"); + (void)db_execute(db_meta, "BEGIN TRANSACTION"); rrdset_foreach_reentrant(st, host) { if (scan_count == max_count) { @@ -1087,13 +1398,26 @@ static bool metadata_scan_host(RRDHOST *host, uint32_t max_count, bool use_trans rrdhost_hostname(host), rrdset_name(st), rrddim_name(rd)); } + + if(rrddim_flag_check(rd, RRDDIM_FLAG_ML_MODEL_LOAD)) { + rrddim_flag_clear(rd, RRDDIM_FLAG_ML_MODEL_LOAD); + if (likely(load_ml_models)) + (void) ml_dimension_load_models(rd, &ml_load_stmt); + } + + worker_is_idle(); } rrddim_foreach_done(rd); } rrdset_foreach_done(st); if (use_transaction) - (void)db_execute(db_meta, "COMMIT TRANSACTION;"); + (void)db_execute(db_meta, "COMMIT TRANSACTION"); + + if (ml_load_stmt) { + sqlite3_finalize(ml_load_stmt); + ml_load_stmt = NULL; + } return more_to_do; } @@ -1119,6 +1443,11 @@ static void store_host_and_system_info(RRDHOST *host, size_t *query_counter) } } +struct host_chart_label_cleanup { + Pvoid_t JudyL; + Word_t count; +}; + // Worker thread to scan hosts for pending metadata to store static void start_metadata_hosts(uv_work_t *req __maybe_unused) { @@ -1135,11 +1464,33 @@ static void start_metadata_hosts(uv_work_t *req __maybe_unused) internal_error(true, "METADATA: checking all hosts..."); usec_t started_ut = now_monotonic_usec(); (void)started_ut; + struct host_chart_label_cleanup *cl_cleanup_data = data->data; + + if (cl_cleanup_data) { + Word_t Index = 0; + bool first = true; + Pvoid_t *PValue; + while ((PValue = JudyLFirstThenNext(cl_cleanup_data->JudyL, &Index, &first))) { + char *machine_guid = *PValue; + + host = rrdhost_find_by_guid(machine_guid); + if (likely(!host)) { + uuid_t host_uuid; + if (!uuid_parse(machine_guid, host_uuid)) + delete_host_chart_labels(&host_uuid); + } + + freez(machine_guid); + } + JudyLFreeArray(&cl_cleanup_data->JudyL, PJE0); + freez(cl_cleanup_data); + } + bool run_again = false; worker_is_busy(UV_EVENT_METADATA_STORE); if (!data->max_count) - transaction_started = !db_execute(db_meta, "BEGIN TRANSACTION;"); + transaction_started = !db_execute(db_meta, "BEGIN TRANSACTION"); dfe_start_reentrant(rrdhost_root_index, host) { if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED) || !rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_UPDATE)) @@ -1160,6 +1511,7 @@ static void start_metadata_hosts(uv_work_t *req __maybe_unused) struct query_build tmp = {.sql = work_buffer, .count = 0}; uuid_unparse_lower(host->host_uuid, tmp.uuid_str); rrdlabels_walkthrough_read(host->rrdlabels, host_label_store_to_sql_callback, &tmp); + buffer_strcat(work_buffer, " ON CONFLICT (host_id, label_key) DO UPDATE SET source_type = excluded.source_type, label_value=excluded.label_value, date_created=UNIXEPOCH()"); rc = db_execute(db_meta, buffer_tostring(work_buffer)); if (unlikely(rc)) { @@ -1208,16 +1560,18 @@ static void start_metadata_hosts(uv_work_t *req __maybe_unused) dfe_done(host); if (!data->max_count && transaction_started) - transaction_started = db_execute(db_meta, "COMMIT TRANSACTION;"); + transaction_started = db_execute(db_meta, "COMMIT TRANSACTION"); usec_t all_ended_ut = now_monotonic_usec(); (void)all_ended_ut; internal_error(true, "METADATA: checking all hosts completed in %0.2f ms", (double)(all_ended_ut - all_started_ut) / USEC_PER_MS); if (unlikely(run_again)) - wc->check_hosts_after = now_realtime_sec() + METADATA_HOST_CHECK_IMMEDIATE; - else - wc->check_hosts_after = now_realtime_sec() + METADATA_HOST_CHECK_INTERVAL; + wc->metadata_check_after = now_realtime_sec() + METADATA_HOST_CHECK_IMMEDIATE; + else { + wc->metadata_check_after = now_realtime_sec() + METADATA_HOST_CHECK_INTERVAL; + run_metadata_cleanup(wc); + } worker_is_idle(); } @@ -1230,17 +1584,14 @@ static void metadata_event_loop(void *arg) worker_register_job_name(METADATA_STORE_CLAIM_ID, "add claim id"); worker_register_job_name(METADATA_ADD_HOST_INFO, "add host info"); worker_register_job_name(METADATA_MAINTENANCE, "maintenance"); - worker_register_job_name(METADATA_ML_LOAD_MODELS, "ml load models"); int ret; uv_loop_t *loop; unsigned cmd_batch_size; struct metadata_wc *wc = arg; enum metadata_opcode opcode; - uv_work_t metadata_cleanup_worker; uv_thread_set_name_np(wc->thread, "METASYNC"); -// service_register(SERVICE_THREAD_TYPE_EVENT_LOOP, NULL, NULL, NULL, true); loop = wc->loop = mallocz(sizeof(uv_loop_t)); ret = uv_loop_init(loop); if (ret) { @@ -1264,23 +1615,21 @@ static void metadata_event_loop(void *arg) wc->timer_req.data = wc; fatal_assert(0 == uv_timer_start(&wc->timer_req, timer_cb, TIMER_INITIAL_PERIOD_MS, TIMER_REPEAT_PERIOD_MS)); - netdata_log_info("Starting metadata sync thread with %d entries command queue", METADATA_CMD_Q_MAX_SIZE); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "Starting metadata sync thread"); struct metadata_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - metadata_flag_clear(wc, METADATA_FLAG_CLEANUP); - metadata_flag_clear(wc, METADATA_FLAG_SCANNING_HOSTS); + metadata_flag_clear(wc, METADATA_FLAG_PROCESSING); - wc->check_metadata_after = now_realtime_sec() + METADATA_MAINTENANCE_FIRST_CHECK; - wc->check_hosts_after = now_realtime_sec() + METADATA_HOST_CHECK_FIRST_CHECK; + wc->metadata_check_after = now_realtime_sec() + METADATA_HOST_CHECK_FIRST_CHECK; int shutdown = 0; - wc->row_id = 0; - completion_mark_complete(&wc->init_complete); + completion_mark_complete(&wc->start_stop_complete); BUFFER *work_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite); struct scan_metadata_payload *data; + struct host_chart_label_cleanup *cl_cleanup_data = NULL; - while (shutdown == 0 || (wc->flags & METADATA_WORKER_BUSY)) { + while (shutdown == 0 || (wc->flags & METADATA_FLAG_PROCESSING)) { uuid_t *uuid; RRDHOST *host = NULL; @@ -1310,16 +1659,10 @@ static void metadata_event_loop(void *arg) case METADATA_DATABASE_NOOP: case METADATA_DATABASE_TIMER: break; - - case METADATA_ML_LOAD_MODELS: { - RRDDIM *rd = (RRDDIM *) cmd.param[0]; - ml_dimension_load_models(rd); - break; - } case METADATA_DEL_DIMENSION: uuid = (uuid_t *) cmd.param[0]; - if (likely(dimension_can_be_deleted(uuid))) - delete_dimension_uuid(uuid); + if (likely(dimension_can_be_deleted(uuid, NULL, false))) + delete_dimension_uuid(uuid, NULL, false); freez(uuid); break; case METADATA_STORE_CLAIM_ID: @@ -1332,7 +1675,7 @@ static void metadata_event_loop(void *arg) store_host_and_system_info(host, NULL); break; case METADATA_SCAN_HOSTS: - if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SCANNING_HOSTS))) + if (unlikely(metadata_flag_check(wc, METADATA_FLAG_PROCESSING))) break; if (unittest_running) @@ -1341,8 +1684,9 @@ static void metadata_event_loop(void *arg) data = mallocz(sizeof(*data)); data->request.data = data; data->wc = wc; - data->completion = cmd.completion; // Completion by the worker + data->data = cl_cleanup_data; data->work_buffer = work_buffer; + cl_cleanup_data = NULL; if (unlikely(cmd.completion)) { data->max_count = 0; // 0 will process all pending updates @@ -1351,15 +1695,16 @@ static void metadata_event_loop(void *arg) else data->max_count = 5000; - metadata_flag_set(wc, METADATA_FLAG_SCANNING_HOSTS); + metadata_flag_set(wc, METADATA_FLAG_PROCESSING); if (unlikely( uv_queue_work(loop,&data->request, start_metadata_hosts, after_metadata_hosts))) { // Failed to launch worker -- let the event loop handle completion - cmd.completion = data->completion; + cmd.completion = wc->scan_complete; + cl_cleanup_data = data->data; freez(data); - metadata_flag_clear(wc, METADATA_FLAG_SCANNING_HOSTS); + metadata_flag_clear(wc, METADATA_FLAG_PROCESSING); } break; case METADATA_LOAD_HOST_CONTEXT:; @@ -1375,16 +1720,14 @@ static void metadata_event_loop(void *arg) freez(data); } break; - case METADATA_MAINTENANCE: - if (unlikely(metadata_flag_check(wc, METADATA_FLAG_CLEANUP))) - break; + case METADATA_DELETE_HOST_CHART_LABELS:; + if (!cl_cleanup_data) + cl_cleanup_data = callocz(1,sizeof(*cl_cleanup_data)); + + Pvoid_t *PValue = JudyLIns(&cl_cleanup_data->JudyL, (Word_t) ++cl_cleanup_data->count, PJE0); + if (PValue) + *PValue = (void *) cmd.param[0]; - metadata_cleanup_worker.data = wc; - metadata_flag_set(wc, METADATA_FLAG_CLEANUP); - if (unlikely( - uv_queue_work(loop, &metadata_cleanup_worker, start_metadata_cleanup, after_metadata_cleanup))) { - metadata_flag_clear(wc, METADATA_FLAG_CLEANUP); - } break; case METADATA_UNITTEST:; struct thread_unittest *tu = (struct thread_unittest *) cmd.param[0]; @@ -1404,7 +1747,6 @@ static void metadata_event_loop(void *arg) uv_close((uv_handle_t *)&wc->timer_req, NULL); uv_close((uv_handle_t *)&wc->async, NULL); - uv_cond_destroy(&wc->cmd_cond); int rc; do { rc = uv_loop_close(loop); @@ -1414,8 +1756,13 @@ static void metadata_event_loop(void *arg) freez(loop); worker_unregister(); - netdata_log_info("METADATA: Shutting down event loop"); - completion_mark_complete(&wc->init_complete); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "Shutting down event loop"); + completion_mark_complete(&wc->start_stop_complete); + if (wc->scan_complete) { + completion_destroy(wc->scan_complete); + freez(wc->scan_complete); + } + metadata_free_cmd_queue(wc); return; error_after_timer_init: @@ -1427,23 +1774,21 @@ static void metadata_event_loop(void *arg) worker_unregister(); } -struct metadata_wc metasync_worker = {.loop = NULL}; - void metadata_sync_shutdown(void) { - completion_init(&metasync_worker.init_complete); + completion_init(&metasync_worker.start_stop_complete); struct metadata_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - netdata_log_info("METADATA: Sending a shutdown command"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Sending a shutdown command"); cmd.opcode = METADATA_SYNC_SHUTDOWN; metadata_enq_cmd(&metasync_worker, &cmd); /* wait for metadata thread to shut down */ - netdata_log_info("METADATA: Waiting for shutdown ACK"); - completion_wait_for(&metasync_worker.init_complete); - completion_destroy(&metasync_worker.init_complete); - netdata_log_info("METADATA: Shutdown complete"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Waiting for shutdown ACK"); + completion_wait_for(&metasync_worker.start_stop_complete); + completion_destroy(&metasync_worker.start_stop_complete); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Shutdown complete"); } void metadata_sync_shutdown_prepare(void) @@ -1454,25 +1799,26 @@ void metadata_sync_shutdown_prepare(void) struct metadata_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - struct completion compl; - completion_init(&compl); + struct metadata_wc *wc = &metasync_worker; + + struct completion *compl = mallocz(sizeof(*compl)); + completion_init(compl); + __atomic_store_n(&wc->scan_complete, compl, __ATOMIC_RELAXED); - netdata_log_info("METADATA: Sending a scan host command"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Sending a scan host command"); uint32_t max_wait_iterations = 2000; - while (unlikely(metadata_flag_check(&metasync_worker, METADATA_FLAG_SCANNING_HOSTS)) && max_wait_iterations--) { + while (unlikely(metadata_flag_check(&metasync_worker, METADATA_FLAG_PROCESSING)) && max_wait_iterations--) { if (max_wait_iterations == 1999) - netdata_log_info("METADATA: Current worker is running; waiting to finish"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Current worker is running; waiting to finish"); sleep_usec(1000); } cmd.opcode = METADATA_SCAN_HOSTS; - cmd.completion = &compl; metadata_enq_cmd(&metasync_worker, &cmd); - netdata_log_info("METADATA: Waiting for host scan completion"); - completion_wait_for(&compl); - completion_destroy(&compl); - netdata_log_info("METADATA: Host scan complete; can continue with shutdown"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Waiting for host scan completion"); + completion_wait_for(wc->scan_complete); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "METADATA: Host scan complete; can continue with shutdown"); } // ------------------------------------------------------------- @@ -1483,15 +1829,14 @@ void metadata_sync_init(void) struct metadata_wc *wc = &metasync_worker; memset(wc, 0, sizeof(*wc)); - metadata_init_cmd_queue(wc); - completion_init(&wc->init_complete); + completion_init(&wc->start_stop_complete); fatal_assert(0 == uv_thread_create(&(wc->thread), metadata_event_loop, wc)); - completion_wait_for(&wc->init_complete); - completion_destroy(&wc->init_complete); + completion_wait_for(&wc->start_stop_complete); + completion_destroy(&wc->start_stop_complete); - netdata_log_info("SQLite metadata sync initialization complete"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "SQLite metadata sync initialization complete"); } @@ -1542,9 +1887,7 @@ void metaqueue_host_update_info(RRDHOST *host) void metaqueue_ml_load_models(RRDDIM *rd) { - if (unlikely(!metasync_worker.loop)) - return; - queue_metadata_cmd(METADATA_ML_LOAD_MODELS, rd, NULL); + rrddim_flag_set(rd, RRDDIM_FLAG_ML_MODEL_LOAD); } void metadata_queue_load_host_context(RRDHOST *host) @@ -1552,8 +1895,22 @@ void metadata_queue_load_host_context(RRDHOST *host) if (unlikely(!metasync_worker.loop)) return; queue_metadata_cmd(METADATA_LOAD_HOST_CONTEXT, host, NULL); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queued command to load host contexts"); +} + +void metadata_delete_host_chart_labels(char *machine_guid) +{ + if (unlikely(!metasync_worker.loop)) { + freez(machine_guid); + return; + } + + // Node machine guid is already strdup-ed + queue_metadata_cmd(METADATA_DELETE_HOST_CHART_LABELS, machine_guid, NULL); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queued command delete chart labels for host %s", machine_guid); } + // // unitests // @@ -1601,7 +1958,7 @@ static void *metadata_unittest_threads(void) tu.join = 0; for (int i = 0; i < threads_to_create; i++) { char buf[100 + 1]; - snprintf(buf, 100, "META[%d]", i); + snprintf(buf, sizeof(buf) - 1, "META[%d]", i); netdata_thread_create( &threads[i], buf, @@ -1631,7 +1988,6 @@ int metadata_unittest(void) // Queue items for a specific period of time metadata_unittest_threads(); - fprintf(stderr, "Items still in queue %u\n", metasync_worker.queue_size); metadata_sync_shutdown(); return 0; diff --git a/database/sqlite/sqlite_metadata.h b/database/sqlite/sqlite_metadata.h index 6b0676ee7df90a..6860cfedf825ab 100644 --- a/database/sqlite/sqlite_metadata.h +++ b/database/sqlite/sqlite_metadata.h @@ -17,6 +17,8 @@ void metaqueue_host_update_info(RRDHOST *host); void metaqueue_ml_load_models(RRDDIM *rd); void migrate_localhost(uuid_t *host_uuid); void metadata_queue_load_host_context(RRDHOST *host); +void metadata_delete_host_chart_labels(char *machine_guid); +void vacuum_database(sqlite3 *database, const char *db_alias, int threshold, int vacuum_pc); // UNIT TEST int metadata_unittest(void); diff --git a/docs/category-overview-pages/accessing-netdata-dashboards.md b/docs/category-overview-pages/accessing-netdata-dashboards.md index 024d0bd7b43923..97df8b8352c1a8 100644 --- a/docs/category-overview-pages/accessing-netdata-dashboards.md +++ b/docs/category-overview-pages/accessing-netdata-dashboards.md @@ -7,7 +7,7 @@ A user accessing the Netdata dashboard **from the Cloud** will always be present A user accessing the Netdata dashboard **from the Agent** will, by default, be presented with the latest Netdata dashboard version (the same as Netdata Cloud) except in the following scenarios: * Agent doesn't have Internet access, and is unable to get the latest Netdata dashboards, as a result it falls back to the Netdata dashboard version that was shipped with the agent. -* Users have defined, e.g. through URL bookmark, that they wants to see the previous version of the dashboard (accessible `http://NODE:19999/v1`, replacing `NODE` with the IP address or hostname of your Agent). +* Users have defined, e.g. through URL bookmark, that they want to see the previous version of the dashboard (accessible `http://NODE:19999/v1`, replacing `NODE` with the IP address or hostname of your Agent). ## Main sections @@ -16,12 +16,23 @@ The Netdata dashboard consists of the following main sections: * [Infrastructure Overview](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) * [Nodes view](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md) * [Custom dashboards](https://learn.netdata.cloud/docs/visualizations/custom-dashboards) -* [Alerts](https://github.com/netdata/netdata/blob/master/docs/monitor/view-active-alarms.md) +* [Alerts](https://github.com/netdata/netdata/blob/master/docs/monitor/view-active-alerts.md) * [Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.md) * [Functions](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) * [Events feed](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/events-feed.md) -> ⚠️ Some sections of the dashboard, when accessed through the agent, may require the user to be signed-in to Netdata Cloud or having the Agent claimed to Netdata Cloud for their full functionality. Examples include saving visualization settings on charts or custom dashboards, claiming the node to Netdata Cloud, or executing functions on an Agent. +> ⚠️ Some sections of the dashboard, when accessed through the agent, may require the user to be signed in to Netdata Cloud or having the Agent claimed to Netdata Cloud for their full functionality. Examples include saving visualization settings on charts or custom dashboards, claiming the node to Netdata Cloud, or executing functions on an Agent. + +## How to access the dashboards? + +### Netdata Cloud + +You can access the dashboard at https://app.netdata.cloud/ and [sign-in](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.md) with an account or [sign-up](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.md#dont-have-a-netdata-cloud-account-yet) if you don't have an account yet. + +### Netdata Agent + +Netdata starts a web server for its dashboard at port `19999`. Open up your web browser of choice and +navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If installed on localhost, you can access it through `http://localhost:19999`. Documentation for previous Agent dashboard can still be found [here](https://github.com/netdata/netdata/blob/master/web/gui/README.md). \ No newline at end of file diff --git a/docs/category-overview-pages/deployment-strategies.md b/docs/category-overview-pages/deployment-strategies.md index f8a68b46ff2ba4..69daaf9fd13916 100644 --- a/docs/category-overview-pages/deployment-strategies.md +++ b/docs/category-overview-pages/deployment-strategies.md @@ -265,4 +265,4 @@ We also suggest that you: 3. [Use host labels](https://github.com/netdata/netdata/blob/master/docs/guides/using-host-labels.md) - To organize systems, metrics, and alarms. + To organize systems, metrics, and alerts. diff --git a/docs/category-overview-pages/logs.md b/docs/category-overview-pages/logs.md new file mode 100644 index 00000000000000..fbaf856314f8b0 --- /dev/null +++ b/docs/category-overview-pages/logs.md @@ -0,0 +1,3 @@ +# Logs + +This section talks about ways Netdata collects and visualizes logs, while also providing useful guides on log centralization setups that can be used with Netdata. diff --git a/docs/cloud/alerts-notifications/add-discord-notification.md b/docs/cloud/alerts-notifications/add-discord-notification.md deleted file mode 100644 index 3edf5002b409bf..00000000000000 --- a/docs/cloud/alerts-notifications/add-discord-notification.md +++ /dev/null @@ -1,44 +0,0 @@ -# Add Discord notification configuration - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord. - -## Prerequisites - -To enable Discord notifications you need: - -- A Netdata Cloud account -- Access to the space as an **administrator** -- Have a Discord server able to receive webhook integrations. For more details check [how to configure this on Discord](#settings-on-discord) - -## Steps - -1. Click on the **Space settings** cog (located above your profile icon) -1. Click on the **Notification** tab -1. Click on the **+ Add configuration** button (near the top-right corner of your screen) -1. On the **Discord** card click on **+ Add** -1. A modal will be presented to you to enter the required details to enable the configuration: - 1. **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord: - - Define the type channel you want to send notifications to: **Text channel** or **Forum channel** - - Webhook URL - URL provided on Discord for the channel you want to receive your notifications. For more details check [how to configure this on Discord](#settings-on-discord) - - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well - -## Settings on Discord - -## Enable webhook integrations on Discord server - -To enable the webhook integrations on Discord you need: -1. Go to *Integrations** under your **Server Settings - - ![image](https://user-images.githubusercontent.com/82235632/214091719-89372894-d67f-4ec5-98d0-57c7d4256ebf.png) - -1. **Create Webhook** or **View Webhooks** if you already have some defined -1. When you create a new webhook you specify: Name and Channel -1. Once you have this configured you will need the Webhook URL to add your notification configuration on Netdata UI - - ![image](https://user-images.githubusercontent.com/82235632/214092713-d16389e3-080f-4e1c-b150-c0fccbf4570e.png) - -For more details please read this article from Discord: [Intro to Webhooks](https://support.discord.com/hc/en-us/articles/228383668). diff --git a/docs/cloud/alerts-notifications/add-mattermost-notification-configuration.md b/docs/cloud/alerts-notifications/add-mattermost-notification-configuration.md deleted file mode 100644 index 79bc9861925692..00000000000000 --- a/docs/cloud/alerts-notifications/add-mattermost-notification-configuration.md +++ /dev/null @@ -1,51 +0,0 @@ -# Add Mattermost notification configuration - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Mattermost. - -## Prerequisites - -To add Mattermost notification configurations you need: - -- A Netdata Cloud account -- Access to the space as an **administrator** -- Space needs to be on **Business** plan or higher -- Have a Mattermost app on your workspace to receive the webhooks, for more details check [how to configure this on Mattermost](#settings-on-mattermost) - -## Steps - -1. Click on the **Space settings** cog (located above your profile icon) -1. Click on the **Notification** tab -1. Click on the **+ Add configuration** button (near the top-right corner of your screen) -1. On the **Mattermost** card click on **+ Add** -1. A modal will be presented to you to enter the required details to enable the configuration: - 1. **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost: - - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications. For more details check [how to configure this on Mattermost](#settings-on-mattermost) - -## Settings on Mattermost - -To enable the webhook integrations on Mattermost you need: -1. In Mattermost, go to Product menu > Integrations > Incoming Webhook. - -![image](https://user-images.githubusercontent.com/26550862/243394526-6d45f6c2-c3cc-4d5f-a9cb-85d8170fc8ac.png) - - - If you don’t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below - -![image](https://user-images.githubusercontent.com/26550862/243394734-f911ccf7-bb18-41b2-ab52-31195861dd1b.png) - -2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters - -3. Select the channel to receive webhook payloads, then select Add to create the webhook - -![image](https://user-images.githubusercontent.com/26550862/243394626-363b7cbc-3550-47ef-b2f3-ce929919145f.png) - -4. You will end up with a webhook endpoint that looks like so: -``` -https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx -``` - - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance. - -For more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/). diff --git a/docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md b/docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md deleted file mode 100644 index 0a80311efebb19..00000000000000 --- a/docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md +++ /dev/null @@ -1,37 +0,0 @@ -# Add Opsgenie notification configuration - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Opsgenie. - -## Prerequisites - -To add Opsgenie notification configurations you need: - -- A Netdata Cloud account -- Access to the space as an **administrator** -- Space on **Business** plan or higher -- Have a permission to add new integrations in Opsgenie. - -## Steps - -1. Click on the **Space settings** cog (located above your profile icon) -1. Click on the **Notification** tab -1. Click on the **+ Add configuration** button (near the top-right corner of your screen) -1. On the **Opsgenie** card click on **+ Add** -1. A modal will be presented to you to enter the required details to enable the configuration: - 1. **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie: - - API Key - a key provided on Opsgenie for the channel you want to receive your notifications. For more details check [how to configure this on Opsgenie](#settings-on-opsgenie) - -## Settings on Opsgenie - -To enable the Netdata integration on Opsgenie you need: -1. Go to integrations tab of your team, click **Add integration**. - - ![image](https://user-images.githubusercontent.com/93676586/230361479-cb73919c-452d-47ec-8066-ed99be5f05e2.png) - -1. Pick **API** from available integrations. Copy your API Key and press **Save Integration**. - -1. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata. diff --git a/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md b/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md deleted file mode 100644 index eec4f94c18139d..00000000000000 --- a/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md +++ /dev/null @@ -1,43 +0,0 @@ -# Add PagerDuty notification configuration - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on PagerDuty. - -## Prerequisites - -To add PagerDuty notification configurations you need: - -- A Cloud account -- Access to the space as and **administrator** -- Space needs to be on **Business** plan or higher -- Have a PagerDuty service to receive events, for more details check [how to configure this on PagerDuty](#settings-on-pagerduty) - -## Steps - -1. Click on the **Space settings** cog (located above your profile icon) -1. Click on the **Notification** tab -1. Click on the **+ Add configuration** button (near the top-right corner of your screen) -1. On the **PagerDuty** card click on **+ Add** -1. A modal will be presented to you to enter the required details to enable the configuration: - 1. **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty: - - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service. For more details check [how to configure this on PagerDuty](#settings-on-pagerduty) - -## Settings on PagerDuty - -## Enable webhook integrations on PagerDuty - -To enable the webhook integrations on PagerDuty you need: -1. Create a service to receive events from your services directory page: - - ![image](https://user-images.githubusercontent.com/2930882/214254148-03714f31-7943-4444-9b63-7b83c9daa025.png) - -1. At step 3, select `Events API V2` Integration:or **View Webhooks** if you already have some defined - - ![image](https://user-images.githubusercontent.com/2930882/214254466-423cf493-037d-47bd-b9e6-fc894897f333.png) - -1. Once the service is created you will be redirected to its configuration page, where you can copy the **integration key**, that you will need need to add to your notification configuration on Netdata UI: - - ![image](https://user-images.githubusercontent.com/2930882/214255916-0d2e53d5-87cc-408a-9f5b-0308a3262d5c.png) diff --git a/docs/cloud/alerts-notifications/add-rocketchat-notification-configuration.md b/docs/cloud/alerts-notifications/add-rocketchat-notification-configuration.md deleted file mode 100644 index 6a81ad1d727fbc..00000000000000 --- a/docs/cloud/alerts-notifications/add-rocketchat-notification-configuration.md +++ /dev/null @@ -1,49 +0,0 @@ -# Add RocketChat notification configuration - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on RocketChat. - -## Prerequisites - -To add RocketChat notification configurations you need: - -- A Netdata Cloud account -- Access to the space as an **administrator** -- Space needs to be on **Business** plan or higher -- Have a RocketChat app on your workspace to receive the webhooks, for more details check [how to configure this on RocketChat](#settings-on-rocketchat) - -## Steps - -1. Click on the **Space settings** cog (located above your profile icon) -1. Click on the **Notification** tab -1. Click on the **+ Add configuration** button (near the top-right corner of your screen) -1. On the **RocketChat** card click on **+ Add** -1. A modal will be presented to you to enter the required details to enable the configuration: - 1. **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat: - - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications. For more details check [how to configure this on RocketChat](#settings-on-rocketchat) - -## Settings on RocketChat - -To enable the webhook integrations on RocketChat you need: -1. In RocketChat, Navigate to Administration > Workspace > Integrations. - -2. Click +New at the top right corner. - -![image](https://user-images.githubusercontent.com/26550862/246235250-84f45b35-03f3-4ab4-96ac-6ab4d1c662b5.png) - -3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook). - -4. After configuring integration, click Save. - -![image](https://user-images.githubusercontent.com/26550862/246235321-25ed9a7b-92ac-4956-9d76-e83ffc51b5c1.png) - -4. You will end up with a webhook endpoint that looks like so: -``` -https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -``` - - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance. - -For more details please check RocketChat's article [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/). diff --git a/docs/cloud/alerts-notifications/add-slack-notification-configuration.md b/docs/cloud/alerts-notifications/add-slack-notification-configuration.md deleted file mode 100644 index ed845b4d3f905f..00000000000000 --- a/docs/cloud/alerts-notifications/add-slack-notification-configuration.md +++ /dev/null @@ -1,47 +0,0 @@ -# Add Slack notification configuration - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Slack. - -## Prerequisites - -To add slack notification configurations you need: - -- A Netdata Cloud account -- Access to the space as an **administrator** -- Space needs to be on **Business** plan or higher -- Have a Slack app on your workspace to receive the webhooks, for more details check [how to configure this on Slack](#settings-on-slack) - -## Steps - -1. Click on the **Space settings** cog (located above your profile icon) -1. Click on the **Notification** tab -1. Click on the **+ Add configuration** button (near the top-right corner of your screen) -1. On the **Slack** card click on **+ Add** -1. A modal will be presented to you to enter the required details to enable the configuration: - 1. **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack: - - Webhook URL - URL provided on Slack for the channel you want to receive your notifications. For more details check [how to configure this on Slack](#settings-on-slack) - -## Settings on Slack - -To enable the webhook integrations on Slack you need: -1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details -1. Install the app on your workspace -1. Configure Webhook URLs for your workspace - - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks** - - ![image](https://user-images.githubusercontent.com/2930882/214251948-486229bb-195b-499b-92e4-4be59a567a19.png) - - - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace** - - After pressing that specify the channel where you want your notifications to be delivered - - ![image](https://user-images.githubusercontent.com/82235632/214103532-95f9928d-d4d6-4172-9c24-a4ddd330e96d.png) - - - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI - - ![image](https://user-images.githubusercontent.com/82235632/214104412-13aaeced-1b40-4894-85f6-9db0eb35c584.png) - -For more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack). diff --git a/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md b/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md deleted file mode 100644 index 012b0478f15532..00000000000000 --- a/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md +++ /dev/null @@ -1,234 +0,0 @@ -# Add webhook notification configuration - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on a webhook using a predefined schema. - -## Prerequisites - -To add webhook notification configurations you need: - -- A Netdata Cloud account -- Access to the space as an **administrator** -- Space needs to be on **Pro** plan or higher -- Have an app that allows you to receive webhooks following a predefined schema, for more details check [how to create the webhook service](#webhook-service) - -## Steps - -1. Click on the **Space settings** cog (located above your profile icon) -1. Click on the **Notification** tab -1. Click on the **+ Add configuration** button (near the top-right corner of your screen) -1. On the **webhook** card click on **+ Add** -1. A modal will be presented to you to enter the required details to enable the configuration: - 1. **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For webhook: - - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls. Check [how to create the webhook service](#webhook-service). - - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. For more details check [Extra headers](#extra-headers) - - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms. For more details check [Authentication mechanisms](#authentication-mechanisms): - - Mutual TLS (recommended) - default authentication mechanism used if no other method is selected. - - Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs. - - Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input. - -## Webhook service - -A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms. - -### Netdata webhook integration - -A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the "webhook URL") when an event occurs. - -Netdata webhook integration service will send alert notifications to the destination service as soon as they are detected. - -The notification content sent to the destination service will be a JSON object having these properties: - -| field | type | description | -| :-- | :-- | :-- | -| message | string | A summary message of the alert. | -| alarm | string | The alarm the notification is about. | -| info | string | Additional info related with the alert. | -| chart | string | The chart associated with the alert. | -| context | string | The chart context. | -| space | string | The space where the node that raised the alert is assigned. | -| rooms | object[object(string,string)] | Object with list of rooms names and urls where the node belongs to. | -| family | string | Context family. | -| class | string | Classification of the alert, e.g. "Error". | -| severity | string | Alert severity, can be one of "warning", "critical" or "clear". | -| date | string | Date of the alert in ISO8601 format. | -| duration | string | Duration the alert has been raised. | -| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. | -| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. | -| alarm_url | string | Netdata Cloud URL for this alarm. | - -### Extra headers - -When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL. - -By default, the following headers will be sent in the HTTP request - -| **Header** | **Value** | -|:-------------------------------:|-----------------------------| -| Content-Type | application/json | - -### Authentication mechanisms - -Netdata webhook integration supports 3 different authentication mechanisms: - -#### Mutual TLS authentication (recommended) - -In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients. - -This is the default authentication mechanism used if no other method is selected. - -To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate. - -The steps to perform this validation are as follows: - -- Store Netdata CA certificate on a file in your disk. The content of this file should be: - -
- Netdata CA certificate - -``` ------BEGIN CERTIFICATE----- -MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN -BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL -EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx -MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0 -ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh -IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++ -ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs -QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL -qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8 -fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he -s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc -Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72 -jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+ -4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY -Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw -PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU -R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC -AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e -Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY -1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ -VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io -rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP -qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH -7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts -ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4 -X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH -FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR -Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y -nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3 -5zrbwvQf ------END CERTIFICATE----- -``` -
- -- Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache` - - **NGINX** - -```bash -server { - listen 443 ssl default_server; - - # ... existing SSL configuration for server authentication ... - ssl_verify_client on; - ssl_client_certificate /path/to/Netdata_CA.pem; - - location / { - if ($ssl_client_s_dn !~ "CN=app.netdata.cloud") { - return 403; - } - # ... existing location configuration ... - } -} -``` - -**Apache** - -```bash -Listen 443 - - # ... existing SSL configuration for server authentication ... - SSLVerifyClient require - SSLCACertificateFile "/path/to/Netdata_CA.pem" - - - Require expr "%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'" - # ... existing directory configuration ... - -``` - -#### Basic authentication - -In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service. - -#### Bearer token authentication - -In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service. - -##### Challenge secret - -To validate that you has ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism. - -This mechanism works as follows: - -- The challenge secret parameter that you provide is a shared secret between you and Netdata only. -- On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string. -- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format: - -```json -{ - "response_token": "sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=" -} -``` - -- We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed. - -We will do this validation everytime you update your integration configuration. - -- Response requirements: - - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret. - - Valid response_token and JSON format. - - Latency less than 5 seconds. - - 200 HTTP response code. - -**Example response token generation in Python:** - -Here you can see how to define a handler for a Flask application in python 3: - -```python -import base64 -import hashlib -import hmac -import json - -key ='YOUR_CHALLENGE_SECRET' - -@app.route('/webhooks/netdata') -def webhook_challenge(): - token = request.args.get('crc_token').encode('ascii') - - # creates HMAC SHA-256 hash from incomming token and your consumer secret - sha256_hash_digest = hmac.new(key.encode(), - msg=token, - digestmod=hashlib.sha256).digest() - - # construct response data with base64 encoded hash - response = { - 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii') - } - - # returns properly formatted json response - return json.dumps(response) -``` - -#### Related topics - -- [Alerts Configuration](https://github.com/netdata/netdata/blob/master/health/README.md) -- [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.md) -- [Manage notification methods](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md) diff --git a/docs/cloud/alerts-notifications/notifications.md b/docs/cloud/alerts-notifications/notifications.md index ad115d43f40f93..cde30a2b4fa5e0 100644 --- a/docs/cloud/alerts-notifications/notifications.md +++ b/docs/cloud/alerts-notifications/notifications.md @@ -8,7 +8,7 @@ you or your team. Having this information centralized helps you: * Have a clear view of the health across your infrastructure, seeing all alerts in one place. -* Easily [setup your alert notification process](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md): +* Easily [set up your alert notification process](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md): methods to use and where to use them, filtering rules, etc. * Quickly troubleshoot using [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) or [Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.md) @@ -104,8 +104,8 @@ if the node should be silenced for the entire space or just for specific rooms ( ### Scope definition for Alerts * **Alert name:** silencing a specific alert name silences all alert state transitions for that specific alert. -* **Alert context:** silencing a specific alert context will silence all alert state transitions for alerts targeting that chart context, for more details check [alert configuration docs](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-on). -* **Alert role:** silencing a specific alert role will silence all the alert state transitions for alerts that are configured to be specific role recipients, for more details check [alert configuration docs](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-to). +* **Alert context:** silencing a specific alert context will silence all alert state transitions for alerts targeting that chart context, for more details check [alert configuration docs](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alert-line-on). +* **Alert role:** silencing a specific alert role will silence all the alert state transitions for alerts that are configured to be specific role recipients, for more details check [alert configuration docs](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alert-line-to). Beside the above two main entities there are another two important settings that you can define on a silencing rule: * Who does the rule affect? **All user** in the space or **Myself** @@ -124,24 +124,24 @@ the local Agent dashboard at `http://NODE:19999`. ## Anatomy of an alert notification -Email alarm notifications show the following information: +Email alert notifications show the following information: - The Space's name - The node's name -- Alarm status: critical, warning, cleared -- Previous alarm status -- Time at which the alarm triggered -- Chart context that triggered the alarm -- Name and information about the triggered alarm -- Alarm value +- Alert status: critical, warning, cleared +- Previous alert status +- Time at which the alert triggered +- Chart context that triggered the alert +- Name and information about the triggered alert +- Alert value - Total number of warning and critical alerts on that node -- Threshold for triggering the given alarm state +- Threshold for triggering the given alert state - Calculation or database lookups that Netdata uses to compute the value -- Source of the alarm, including which file you can edit to configure this alarm on an individual node +- Source of the alert, including which file you can edit to configure this alert on an individual node Email notifications also feature a **Go to Node** button, which takes you directly to the offending chart for that node within Cloud's embedded dashboards. Here's an example email notification for the `ram_available` chart, which is in a critical state: -![Screenshot of an alarm notification email from Netdata Cloud](https://user-images.githubusercontent.com/1153921/87461878-e933c480-c5c3-11ea-870b-affdb0801854.png) +![Screenshot of an alert notification email from Netdata Cloud](https://user-images.githubusercontent.com/1153921/87461878-e933c480-c5c3-11ea-870b-affdb0801854.png) diff --git a/docs/cloud/cheatsheet.md b/docs/cloud/cheatsheet.md index 35a6a2c991ff06..a3d2f0285efe7e 100644 --- a/docs/cloud/cheatsheet.md +++ b/docs/cloud/cheatsheet.md @@ -99,13 +99,13 @@ modules: sudo ./edit-config go.d/mysql.conf ``` -### Alarms & notifications +### Alerts & notifications - After any change, reload the Netdata health configuration: @@ -115,23 +115,23 @@ netdatacli reload-health killall -USR2 netdata ``` -#### Configure a specific alarm +#### Configure a specific alert ```bash -sudo ./edit-config health.d/example-alarm.conf +sudo ./edit-config health.d/example-alert.conf ``` -#### Silence a specific alarm +#### Silence a specific alert ```bash -sudo ./edit-config health.d/example-alarm.conf +sudo ./edit-config health.d/example-alert.conf ``` ``` to: silent ``` - diff --git a/docs/netdata-cloud-onprem/getting-started-light-poc.md b/docs/netdata-cloud-onprem/getting-started-light-poc.md new file mode 100644 index 00000000000000..dfe0a0c87b5306 --- /dev/null +++ b/docs/netdata-cloud-onprem/getting-started-light-poc.md @@ -0,0 +1,51 @@ +# Getting started with Netdata Cloud On-Prem Light PoC +Due to the high demand, we designed a very light and easy-to-install version of netdata for clients who do not have Kubernetes cluster installed. Please keep in mind that this is (for now) only designed to be used as a PoC with no built-in resiliency on failures of any kind. + +Requirements: + - Ubuntu 22.04 (clean installation will work best). + - 10 CPU Cores and 24 GiB of memory. + - Access to shell as a sudo. + - TLS certificate for Netdata Cloud On-Prem PoC. A single endpoint is required. The certificate must be trusted by all entities connecting to the On-Prem installation by any means. + - AWS ID and Key - contact Netdata Product Team - info@netdata.cloud + - License Key - contact Netdata Product Team - info@netdata.cloud + +To install the whole environment, log in to the designated host and run: +```shell +curl https://netdata-cloud-netdata-static-content.s3.amazonaws.com/provision.sh -o provision.sh +chmod +x provision.sh +sudo ./provision.sh --install +``` + +What does the script do during installation? +1. Prompts user to provide: + - ID and KEY for accessing the AWS (to pull helm charts and container images) + - License Key + - URL under which Netdata Cloud Onprem PoC is going to function (without protocol like `https://`) + - Path for certificate file (PEM format) + - Path for private key file (PEM format) +2. After getting all of the information installation is starting. The script will install: + - Helm + - Kubectl + - AWS CLI + - K3s cluster (single node) +3. When all the required software is installed script starts to provision the K3s cluster with gathered data. + +After cluster provisioning netdata is ready to be used. + +##### How to log in? +Because this is a PoC with 0 configurations required, only log in by mail can work. What's more every mail that Netdata Cloud On-Prem sends will appear on the mailcatcher, which acts as the SMTP server with a simple GUI to read the mails. Steps: +1. Open Netdata Cloud On-Prem PoC in the web browser on URL you specified +2. Provide email and use the button to confirm +3. Mailcatcher will catch all the emails so go to `/mailcatcher`. Find yours and click the link. +4. You are now logged into the netdata. Add your first nodes! + +##### How to remove Netdata Cloud On-Prem PoC? +To uninstall the whole PoC, use the same script that installed it, with the `--uninstall` switch. + +```shell +cd + + diff --git a/web/gui/index.html b/web/gui/index.html index d63f049638531d..5ffbdb7505336e 100644 --- a/web/gui/index.html +++ b/web/gui/index.html @@ -24,13 +24,15 @@ apiUrl: "https://app.netdata.cloud", cloudUrl: "https://app.netdata.cloud", demoSlug: "netdata-demo", - demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"]}, + demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"],"ups":["upsd"]}, webpackPublicPath: "https://app.netdata.cloud", agentApiUrl: searchParams.get("agent") || getBasename(), - version: "6.29.0", + posthogToken: "phc_hnhlqe6D2Q4IcQNrFItaqdXJAxQ8RcHkPAFAp74pubv", + version: "6.66.1", tracking: false, cookieDomain: ".netdata.cloud", - onprem: false + onprem: false, + nodeEnv: "production" } function loadStyle(url, { media, insertAfter: aref, insertBefore: bref, rel, type } = {}) { @@ -66,7 +68,7 @@ } loadStyle(window.envSettings.agentApiUrl + "/v2/static/splash.css") - loadStyle(window.envSettings.agentApiUrl + "/v2/favicon.ico", {rel: "icon", type: "image/x-icon"})

Welcome to Netdata

Loading latest Netdata UI...

We couldn't load the latest Netdata UI. You can try again
Or you can load the old single node dashboard or a local copy of Netdata UI

Welcome to Netdata

Loading latest Netdata UI...

We couldn't load the latest Netdata UI. You can try again
Or you can load the old single node dashboard or a local copy of Netdata UI
\ No newline at end of file + }).catch(() => {}) \ No newline at end of file diff --git a/web/gui/old/index.html b/web/gui/old/index.html index 87499c617283e6..4b5707a2d06690 100644 --- a/web/gui/old/index.html +++ b/web/gui/old/index.html @@ -28,7 +28,7 @@ - + @@ -94,7 +94,7 @@
  •  
  • - +
  •  
  •  
  • @@ -1156,7 +1156,7 @@
    - For progress reports and key netdata updates: follow netdata on twitter. + For progress reports and key netdata updates: follow netdata on twitter.
    You can also follow netdata on facebook, or watch netdata on github. diff --git a/web/gui/registry-access.html b/web/gui/registry-access.html index ffa008a9863093..75a403034868c4 100644 --- a/web/gui/registry-access.html +++ b/web/gui/registry-access.html @@ -14,7 +14,7 @@ var nodes = helloData.nodes || [] var doAccess = function(mg, nm, token) { - fetch(helloData.registry + '/api/v1/registry?action=access&machine=' + (mg || helloData.machine_guid) + '&name=' + encodeURIComponent(nm || helloData.hostname) + '&url=' + encodeURIComponent(originUrl) + '&agent=' + (helloData.agent ? helloData.agent.machine_guid : ""), { cache: "no-cache", credentials: "include", headers: { Authorization: 'Bearer ' + (token || localStorage.getItem("registry-netdata-token")) } }) + fetch(helloData.registry + '/api/v1/registry?action=access&machine=' + (mg || helloData.machine_guid) + '&name=' + encodeURIComponent(nm || helloData.hostname) + '&url=' + encodeURIComponent(originUrl) + '&agent=' + (helloData.agent ? helloData.agent.machine_guid : ""), { cache: "no-cache", credentials: "include", headers: { ["X-Netdata-Auth"]: 'Bearer ' + (token || localStorage.getItem("registry-netdata-token")) } }) .then(function(response) { return response.json() }) .then(function(data) { var token = data.person_guid @@ -39,7 +39,7 @@ }) } doAccess(helloData.machine_guid, helloData.hostname) - }) + }).catch(() => {}) } try { @@ -62,7 +62,7 @@ } window.addEventListener('message', function(event) { - var isNetdataMessage = Array.isArray(event.data) ? event.data.length === 3 ? event.data[0] === "netdata-registry" : isValidHttpUrl(event.data[0]) : true + var isNetdataMessage = Array.isArray(event.data) ? event.data.length === 3 ? event.data[0] === "netdata-registry" : isValidHttpUrl(event.data[0]) : false if (!isNetdataMessage) return diff --git a/web/gui/registry-alert-redirect.html b/web/gui/registry-alert-redirect.html index 0ef68dce08d87d..66b4be5111f23a 100644 --- a/web/gui/registry-alert-redirect.html +++ b/web/gui/registry-alert-redirect.html @@ -1,4 +1,4 @@ -Netdata Registry

    Netdata Alert Notifications

    Trying to find a Netdata Agent for this alert...

    Netdata Alert Notifications

    Trying to find a Netdata Agent for this alert...
    \ No newline at end of file + }) \ No newline at end of file diff --git a/web/gui/registry-hello.html b/web/gui/registry-hello.html index e95f48c47cd32b..7fba5662c5390b 100644 --- a/web/gui/registry-hello.html +++ b/web/gui/registry-hello.html @@ -14,7 +14,7 @@ var nodes = data.nodes || [] var doAccess = function(mg, nm, token) { - fetch(data.registry + '/api/v1/registry?action=access&machine=' + (mg || data.machine_guid) + '&name=' + encodeURIComponent(nm || data.hostname) + '&url=' + encodeURIComponent(originUrl) + '&agent=' + (data.agent ? data.agent.machine_guid : ""), { cache: "no-cache", credentials: "include", headers: { Authorization: 'Bearer ' + (token || localStorage.getItem("registry-netdata-token")) } }) + fetch(data.registry + '/api/v1/registry?action=access&machine=' + (mg || data.machine_guid) + '&name=' + encodeURIComponent(nm || data.hostname) + '&url=' + encodeURIComponent(originUrl) + '&agent=' + (data.agent ? data.agent.machine_guid : ""), { cache: "no-cache", credentials: "include", headers: { ["X-Netdata-Auth"]: 'Bearer ' + (token || localStorage.getItem("registry-netdata-token")) } }) .then(function(response) { return response.json() }) .then(function(data) { var token = data.person_guid @@ -39,7 +39,7 @@ }) } doAccess(data.machine_guid, data.hostname) - }) + }).catch(() => {}) } try { diff --git a/web/gui/v2/.well-known/assetlinks.json b/web/gui/v2/.well-known/assetlinks.json new file mode 100644 index 00000000000000..5e69347367cdde --- /dev/null +++ b/web/gui/v2/.well-known/assetlinks.json @@ -0,0 +1,12 @@ +[ + { + "relation": ["delegate_permission/common.handle_all_urls"], + "target": { + "namespace": "android_app", + "package_name": "cloud.netdata.android", + "sha256_cert_fingerprints": [ + "1F:B8:9A:45:AD:83:76:DD:7E:A5:9A:07:82:4A:2F:99:3E:0D:EB:64:FA:50:76:59:65:3F:CC:38:7F:32:28:AA" + ] + } + } +] diff --git a/web/gui/v2/1178.b54a742702a74832f653.chunk.js b/web/gui/v2/1178.b54a742702a74832f653.chunk.js new file mode 100644 index 00000000000000..2acba9946cbf45 --- /dev/null +++ b/web/gui/v2/1178.b54a742702a74832f653.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="5efb066b-85eb-4f9e-b200-c0c2ba7edf9b",e._sentryDebugIdIdentifier="sentry-dbid-5efb066b-85eb-4f9e-b200-c0c2ba7edf9b")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1178],{1178:function(e,t,n){n.r(t),n.d(t,{MagicLinkSent:function(){return m}});n(74916),n(64765);var i=n(67294),a=n(71893),c=n(89250),o=n(93416),r=n(15794),l=n(27266),d=n(87562),f=n(1043),s=n(39979),u=n(63346),b=(0,a.default)(o.Text).attrs({role:"button"}).withConfig({displayName:"magicLinkSent__ButtonText",componentId:"sc-ua6kmo-0"})(["cursor:pointer;"]),g=(0,s.Z)(b),m=function(){var e=(0,c.TH)(),t=e.search,n=e.state,a=((void 0===n?{}:n)||{}).email,r=window.location.hash,s=(0,f.iM)(),b=(0,i.useCallback)((function(){var e=encodeURIComponent((0,d.$)("/sign-in".concat(t),r)),n=encodeURIComponent((0,d.$)("/sign-up/verify".concat(t),r));s({email:a,redirectURI:e,registerURI:n,resend:!0})}),[a]);return i.createElement(u.ZP,{feature:"MagicLinkSent",email:a},i.createElement(l.Z,{"data-testid":"magicLinkSent"},i.createElement(o.H1,{textAlign:"center"},"Check your email!"),i.createElement(o.Flex,{column:!0,padding:[0,0,8,0],gap:8,justifyContent:"between",alignItems:"center"},i.createElement(o.Flex,{column:!0,gap:1,alignItems:"center"},i.createElement(o.TextBig,{textAlign:"center"},"We have sent an email to ",i.createElement(o.TextBig,{strong:!0},!!a&&a),"."),i.createElement(o.TextBig,{textAlign:"center"},"Please find this email (check your spam folder too) and click the button there to continue.")),i.createElement(o.Text,{textAlign:"center"},"Didn't receive it?"," ",i.createElement(g,{onClick:b,color:"primary","data-ga":"magicLikSent::click-resent::check-email-view"},"Click here to resend it.")))))};t.default=(0,r.k)(m,"light")}}]); \ No newline at end of file diff --git a/web/gui/v2/1193.efd539c053944de2599b.chunk.js b/web/gui/v2/1193.efd539c053944de2599b.chunk.js new file mode 100644 index 00000000000000..a00fa2f6437511 --- /dev/null +++ b/web/gui/v2/1193.efd539c053944de2599b.chunk.js @@ -0,0 +1 @@ +!function(){try{var t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},e=(new Error).stack;e&&(t._sentryDebugIds=t._sentryDebugIds||{},t._sentryDebugIds[e]="313f3176-b442-46a0-978c-073bd01b24db",t._sentryDebugIdIdentifier="sentry-dbid-313f3176-b442-46a0-978c-073bd01b24db")}catch(t){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1193],{54857:function(t,e,n){Object.defineProperty(e,"__esModule",{value:!0}),Object.defineProperty(e,"DraggableCore",{enumerable:!0,get:function(){return d.default}}),e.default=void 0;var o=function(t,e){if(!e&&t&&t.__esModule)return t;if(null===t||"object"!==typeof t&&"function"!==typeof t)return{default:t};var n=p(e);if(n&&n.has(t))return n.get(t);var o={},r=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in t)if("default"!==a&&Object.prototype.hasOwnProperty.call(t,a)){var i=r?Object.getOwnPropertyDescriptor(t,a):null;i&&(i.get||i.set)?Object.defineProperty(o,a,i):o[a]=t[a]}o.default=t,n&&n.set(t,o);return o}(n(67294)),r=f(n(45697)),a=f(n(73935)),i=f(n(86010)),s=n(81825),l=n(2849),u=n(9280),d=f(n(80783)),c=f(n(55904));function f(t){return t&&t.__esModule?t:{default:t}}function p(t){if("function"!==typeof WeakMap)return null;var e=new WeakMap,n=new WeakMap;return(p=function(t){return t?n:e})(t)}function g(){return g=Object.assign?Object.assign.bind():function(t){for(var e=1;e{(0,c.default)("Draggable: onDragStart: %j",e);if(!1===this.props.onStart(t,(0,l.createDraggableData)(this,e)))return!1;this.setState({dragging:!0,dragged:!0})})),h(this,"onDrag",((t,e)=>{if(!this.state.dragging)return!1;(0,c.default)("Draggable: onDrag: %j",e);const n=(0,l.createDraggableData)(this,e),o={x:n.x,y:n.y,slackX:0,slackY:0};if(this.props.bounds){const{x:t,y:e}=o;o.x+=this.state.slackX,o.y+=this.state.slackY;const[r,a]=(0,l.getBoundPosition)(this,o.x,o.y);o.x=r,o.y=a,o.slackX=this.state.slackX+(t-o.x),o.slackY=this.state.slackY+(e-o.y),n.x=o.x,n.y=o.y,n.deltaX=o.x-this.state.x,n.deltaY=o.y-this.state.y}if(!1===this.props.onDrag(t,n))return!1;this.setState(o)})),h(this,"onDragStop",((t,e)=>{if(!this.state.dragging)return!1;if(!1===this.props.onStop(t,(0,l.createDraggableData)(this,e)))return!1;(0,c.default)("Draggable: onDragStop: %j",e);const n={dragging:!1,slackX:0,slackY:0};if(Boolean(this.props.position)){const{x:t,y:e}=this.props.position;n.x=t,n.y=e}this.setState(n)})),this.state={dragging:!1,dragged:!1,x:t.position?t.position.x:t.defaultPosition.x,y:t.position?t.position.y:t.defaultPosition.y,prevPropsPosition:{...t.position},slackX:0,slackY:0,isElementSVG:!1},!t.position||t.onDrag||t.onStop||console.warn("A `position` was applied to this , without drag handlers. This will make this component effectively undraggable. Please attach `onDrag` or `onStop` handlers so you can adjust the `position` of this element.")}componentDidMount(){"undefined"!==typeof window.SVGElement&&this.findDOMNode()instanceof window.SVGElement&&this.setState({isElementSVG:!0})}componentWillUnmount(){this.setState({dragging:!1})}findDOMNode(){var t,e;return null!==(t=null===(e=this.props)||void 0===e||null===(e=e.nodeRef)||void 0===e?void 0:e.current)&&void 0!==t?t:a.default.findDOMNode(this)}render(){const{axis:t,bounds:e,children:n,defaultPosition:r,defaultClassName:a,defaultClassNameDragging:u,defaultClassNameDragged:c,position:f,positionOffset:p,scale:h,...m}=this.props;let y={},b=null;const v=!Boolean(f)||this.state.dragging,D=f||r,S={x:(0,l.canDragX)(this)&&v?this.state.x:D.x,y:(0,l.canDragY)(this)&&v?this.state.y:D.y};this.state.isElementSVG?b=(0,s.createSVGTransform)(S,p):y=(0,s.createCSSTransform)(S,p);const w=(0,i.default)(n.props.className||"",a,{[u]:this.state.dragging,[c]:this.state.dragged});return o.createElement(d.default,g({},m,{onStart:this.onDragStart,onDrag:this.onDrag,onStop:this.onDragStop}),o.cloneElement(o.Children.only(n),{className:w,style:{...n.props.style,...y},transform:b}))}}e.default=m,h(m,"displayName","Draggable"),h(m,"propTypes",{...d.default.propTypes,axis:r.default.oneOf(["both","x","y","none"]),bounds:r.default.oneOfType([r.default.shape({left:r.default.number,right:r.default.number,top:r.default.number,bottom:r.default.number}),r.default.string,r.default.oneOf([!1])]),defaultClassName:r.default.string,defaultClassNameDragging:r.default.string,defaultClassNameDragged:r.default.string,defaultPosition:r.default.shape({x:r.default.number,y:r.default.number}),positionOffset:r.default.shape({x:r.default.oneOfType([r.default.number,r.default.string]),y:r.default.oneOfType([r.default.number,r.default.string])}),position:r.default.shape({x:r.default.number,y:r.default.number}),className:u.dontSetMe,style:u.dontSetMe,transform:u.dontSetMe}),h(m,"defaultProps",{...d.default.defaultProps,axis:"both",bounds:!1,defaultClassName:"react-draggable",defaultClassNameDragging:"react-draggable-dragging",defaultClassNameDragged:"react-draggable-dragged",defaultPosition:{x:0,y:0},scale:1})},80783:function(t,e,n){Object.defineProperty(e,"__esModule",{value:!0}),e.default=void 0;var o=function(t,e){if(!e&&t&&t.__esModule)return t;if(null===t||"object"!==typeof t&&"function"!==typeof t)return{default:t};var n=c(e);if(n&&n.has(t))return n.get(t);var o={},r=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in t)if("default"!==a&&Object.prototype.hasOwnProperty.call(t,a)){var i=r?Object.getOwnPropertyDescriptor(t,a):null;i&&(i.get||i.set)?Object.defineProperty(o,a,i):o[a]=t[a]}o.default=t,n&&n.set(t,o);return o}(n(67294)),r=d(n(45697)),a=d(n(73935)),i=n(81825),s=n(2849),l=n(9280),u=d(n(55904));function d(t){return t&&t.__esModule?t:{default:t}}function c(t){if("function"!==typeof WeakMap)return null;var e=new WeakMap,n=new WeakMap;return(c=function(t){return t?n:e})(t)}function f(t,e,n){return(e=function(t){var e=function(t,e){if("object"!==typeof t||null===t)return t;var n=t[Symbol.toPrimitive];if(void 0!==n){var o=n.call(t,e||"default");if("object"!==typeof o)return o;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===e?String:Number)(t)}(t,"string");return"symbol"===typeof e?e:String(e)}(e))in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}const p={start:"touchstart",move:"touchmove",stop:"touchend"},g={start:"mousedown",move:"mousemove",stop:"mouseup"};let h=g;class m extends o.Component{constructor(){super(...arguments),f(this,"dragging",!1),f(this,"lastX",NaN),f(this,"lastY",NaN),f(this,"touchIdentifier",null),f(this,"mounted",!1),f(this,"handleDragStart",(t=>{if(this.props.onMouseDown(t),!this.props.allowAnyClick&&"number"===typeof t.button&&0!==t.button)return!1;const e=this.findDOMNode();if(!e||!e.ownerDocument||!e.ownerDocument.body)throw new Error(" not mounted on DragStart!");const{ownerDocument:n}=e;if(this.props.disabled||!(t.target instanceof n.defaultView.Node)||this.props.handle&&!(0,i.matchesSelectorAndParentsTo)(t.target,this.props.handle,e)||this.props.cancel&&(0,i.matchesSelectorAndParentsTo)(t.target,this.props.cancel,e))return;"touchstart"===t.type&&t.preventDefault();const o=(0,i.getTouchIdentifier)(t);this.touchIdentifier=o;const r=(0,s.getControlPosition)(t,o,this);if(null==r)return;const{x:a,y:l}=r,d=(0,s.createCoreData)(this,a,l);(0,u.default)("DraggableCore: handleDragStart: %j",d),(0,u.default)("calling",this.props.onStart);!1!==this.props.onStart(t,d)&&!1!==this.mounted&&(this.props.enableUserSelectHack&&(0,i.addUserSelectStyles)(n),this.dragging=!0,this.lastX=a,this.lastY=l,(0,i.addEvent)(n,h.move,this.handleDrag),(0,i.addEvent)(n,h.stop,this.handleDragStop))})),f(this,"handleDrag",(t=>{const e=(0,s.getControlPosition)(t,this.touchIdentifier,this);if(null==e)return;let{x:n,y:o}=e;if(Array.isArray(this.props.grid)){let t=n-this.lastX,e=o-this.lastY;if([t,e]=(0,s.snapToGrid)(this.props.grid,t,e),!t&&!e)return;n=this.lastX+t,o=this.lastY+e}const r=(0,s.createCoreData)(this,n,o);(0,u.default)("DraggableCore: handleDrag: %j",r);if(!1!==this.props.onDrag(t,r)&&!1!==this.mounted)this.lastX=n,this.lastY=o;else try{this.handleDragStop(new MouseEvent("mouseup"))}catch(a){const t=document.createEvent("MouseEvents");t.initMouseEvent("mouseup",!0,!0,window,0,0,0,0,0,!1,!1,!1,!1,0,null),this.handleDragStop(t)}})),f(this,"handleDragStop",(t=>{if(!this.dragging)return;const e=(0,s.getControlPosition)(t,this.touchIdentifier,this);if(null==e)return;let{x:n,y:o}=e;if(Array.isArray(this.props.grid)){let t=n-this.lastX||0,e=o-this.lastY||0;[t,e]=(0,s.snapToGrid)(this.props.grid,t,e),n=this.lastX+t,o=this.lastY+e}const r=(0,s.createCoreData)(this,n,o);if(!1===this.props.onStop(t,r)||!1===this.mounted)return!1;const a=this.findDOMNode();a&&this.props.enableUserSelectHack&&(0,i.removeUserSelectStyles)(a.ownerDocument),(0,u.default)("DraggableCore: handleDragStop: %j",r),this.dragging=!1,this.lastX=NaN,this.lastY=NaN,a&&((0,u.default)("DraggableCore: Removing handlers"),(0,i.removeEvent)(a.ownerDocument,h.move,this.handleDrag),(0,i.removeEvent)(a.ownerDocument,h.stop,this.handleDragStop))})),f(this,"onMouseDown",(t=>(h=g,this.handleDragStart(t)))),f(this,"onMouseUp",(t=>(h=g,this.handleDragStop(t)))),f(this,"onTouchStart",(t=>(h=p,this.handleDragStart(t)))),f(this,"onTouchEnd",(t=>(h=p,this.handleDragStop(t))))}componentDidMount(){this.mounted=!0;const t=this.findDOMNode();t&&(0,i.addEvent)(t,p.start,this.onTouchStart,{passive:!1})}componentWillUnmount(){this.mounted=!1;const t=this.findDOMNode();if(t){const{ownerDocument:e}=t;(0,i.removeEvent)(e,g.move,this.handleDrag),(0,i.removeEvent)(e,p.move,this.handleDrag),(0,i.removeEvent)(e,g.stop,this.handleDragStop),(0,i.removeEvent)(e,p.stop,this.handleDragStop),(0,i.removeEvent)(t,p.start,this.onTouchStart,{passive:!1}),this.props.enableUserSelectHack&&(0,i.removeUserSelectStyles)(e)}}findDOMNode(){var t,e;return null!==(t=this.props)&&void 0!==t&&t.nodeRef?null===(e=this.props)||void 0===e||null===(e=e.nodeRef)||void 0===e?void 0:e.current:a.default.findDOMNode(this)}render(){return o.cloneElement(o.Children.only(this.props.children),{onMouseDown:this.onMouseDown,onMouseUp:this.onMouseUp,onTouchEnd:this.onTouchEnd})}}e.default=m,f(m,"displayName","DraggableCore"),f(m,"propTypes",{allowAnyClick:r.default.bool,children:r.default.node.isRequired,disabled:r.default.bool,enableUserSelectHack:r.default.bool,offsetParent:function(t,e){if(t[e]&&1!==t[e].nodeType)throw new Error("Draggable's offsetParent must be a DOM Node.")},grid:r.default.arrayOf(r.default.number),handle:r.default.string,cancel:r.default.string,nodeRef:r.default.object,onStart:r.default.func,onDrag:r.default.func,onStop:r.default.func,onMouseDown:r.default.func,scale:r.default.number,className:l.dontSetMe,style:l.dontSetMe,transform:l.dontSetMe}),f(m,"defaultProps",{allowAnyClick:!1,disabled:!1,enableUserSelectHack:!0,onStart:function(){},onDrag:function(){},onStop:function(){},onMouseDown:function(){},scale:1})},61193:function(t,e,n){const{default:o,DraggableCore:r}=n(54857);t.exports=o,t.exports.default=o,t.exports.DraggableCore=r},81825:function(t,e,n){Object.defineProperty(e,"__esModule",{value:!0}),e.addClassName=u,e.addEvent=function(t,e,n,o){if(!t)return;const r={capture:!0,...o};t.addEventListener?t.addEventListener(e,n,r):t.attachEvent?t.attachEvent("on"+e,n):t["on"+e]=n},e.addUserSelectStyles=function(t){if(!t)return;let e=t.getElementById("react-draggable-style-el");e||(e=t.createElement("style"),e.type="text/css",e.id="react-draggable-style-el",e.innerHTML=".react-draggable-transparent-selection *::-moz-selection {all: inherit;}\n",e.innerHTML+=".react-draggable-transparent-selection *::selection {all: inherit;}\n",t.getElementsByTagName("head")[0].appendChild(e));t.body&&u(t.body,"react-draggable-transparent-selection")},e.createCSSTransform=function(t,e){const n=l(t,e,"px");return{[(0,r.browserPrefixToKey)("transform",r.default)]:n}},e.createSVGTransform=function(t,e){return l(t,e,"")},e.getTouch=function(t,e){return t.targetTouches&&(0,o.findInArray)(t.targetTouches,(t=>e===t.identifier))||t.changedTouches&&(0,o.findInArray)(t.changedTouches,(t=>e===t.identifier))},e.getTouchIdentifier=function(t){if(t.targetTouches&&t.targetTouches[0])return t.targetTouches[0].identifier;if(t.changedTouches&&t.changedTouches[0])return t.changedTouches[0].identifier},e.getTranslation=l,e.innerHeight=function(t){let e=t.clientHeight;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e-=(0,o.int)(n.paddingTop),e-=(0,o.int)(n.paddingBottom),e},e.innerWidth=function(t){let e=t.clientWidth;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e-=(0,o.int)(n.paddingLeft),e-=(0,o.int)(n.paddingRight),e},e.matchesSelector=s,e.matchesSelectorAndParentsTo=function(t,e,n){let o=t;do{if(s(o,e))return!0;if(o===n)return!1;o=o.parentNode}while(o);return!1},e.offsetXYFromParent=function(t,e,n){const o=e===e.ownerDocument.body?{left:0,top:0}:e.getBoundingClientRect(),r=(t.clientX+e.scrollLeft-o.left)/n,a=(t.clientY+e.scrollTop-o.top)/n;return{x:r,y:a}},e.outerHeight=function(t){let e=t.clientHeight;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e+=(0,o.int)(n.borderTopWidth),e+=(0,o.int)(n.borderBottomWidth),e},e.outerWidth=function(t){let e=t.clientWidth;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e+=(0,o.int)(n.borderLeftWidth),e+=(0,o.int)(n.borderRightWidth),e},e.removeClassName=d,e.removeEvent=function(t,e,n,o){if(!t)return;const r={capture:!0,...o};t.removeEventListener?t.removeEventListener(e,n,r):t.detachEvent?t.detachEvent("on"+e,n):t["on"+e]=null},e.removeUserSelectStyles=function(t){if(!t)return;try{if(t.body&&d(t.body,"react-draggable-transparent-selection"),t.selection)t.selection.empty();else{const e=(t.defaultView||window).getSelection();e&&"Caret"!==e.type&&e.removeAllRanges()}}catch(e){}};var o=n(9280),r=function(t,e){if(!e&&t&&t.__esModule)return t;if(null===t||"object"!==typeof t&&"function"!==typeof t)return{default:t};var n=a(e);if(n&&n.has(t))return n.get(t);var o={},r=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in t)if("default"!==i&&Object.prototype.hasOwnProperty.call(t,i)){var s=r?Object.getOwnPropertyDescriptor(t,i):null;s&&(s.get||s.set)?Object.defineProperty(o,i,s):o[i]=t[i]}o.default=t,n&&n.set(t,o);return o}(n(38650));function a(t){if("function"!==typeof WeakMap)return null;var e=new WeakMap,n=new WeakMap;return(a=function(t){return t?n:e})(t)}let i="";function s(t,e){return i||(i=(0,o.findInArray)(["matches","webkitMatchesSelector","mozMatchesSelector","msMatchesSelector","oMatchesSelector"],(function(e){return(0,o.isFunction)(t[e])}))),!!(0,o.isFunction)(t[i])&&t[i](e)}function l(t,e,n){let{x:o,y:r}=t,a="translate(".concat(o).concat(n,",").concat(r).concat(n,")");if(e){const t="".concat("string"===typeof e.x?e.x:e.x+n),o="".concat("string"===typeof e.y?e.y:e.y+n);a="translate(".concat(t,", ").concat(o,")")+a}return a}function u(t,e){t.classList?t.classList.add(e):t.className.match(new RegExp("(?:^|\\s)".concat(e,"(?!\\S)")))||(t.className+=" ".concat(e))}function d(t,e){t.classList?t.classList.remove(e):t.className=t.className.replace(new RegExp("(?:^|\\s)".concat(e,"(?!\\S)"),"g"),"")}},38650:function(t,e){Object.defineProperty(e,"__esModule",{value:!0}),e.browserPrefixToKey=r,e.browserPrefixToStyle=function(t,e){return e?"-".concat(e.toLowerCase(),"-").concat(t):t},e.default=void 0,e.getPrefix=o;const n=["Moz","Webkit","O","ms"];function o(){var t;let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"transform";if("undefined"===typeof window)return"";const o=null===(t=window.document)||void 0===t||null===(t=t.documentElement)||void 0===t?void 0:t.style;if(!o)return"";if(e in o)return"";for(let a=0;a: Unmounted during event!");return e}},9280:function(t,e){Object.defineProperty(e,"__esModule",{value:!0}),e.dontSetMe=function(t,e,n){if(t[e])return new Error("Invalid prop ".concat(e," passed to ").concat(n," - do not set this, set it on the child."))},e.findInArray=function(t,e){for(let n=0,o=t.length;n=0||(a[r]=e[r]);return a}var w=(0,a.default)(o.Flex).attrs({position:"relative","data-testid":"chartPopover-dimension",padding:[1,0]}).withConfig({displayName:"dimension__GridRow",componentId:"sc-wluvip-0"})(["display:contents;"]),O=(0,a.default)(i.ColorBar).attrs({position:"absolute",top:0,left:0,backgroundOpacity:.4,round:.5}).withConfig({displayName:"dimension__ColorBackground",componentId:"sc-wluvip-1"})([""]),x={ANOMALY_RATE:"arp",default:"value"},_=function(e){var t=e.children,r=e.fractionDigits,a=void 0===r?0:r,i=y(e,p),l=t.toString().split("."),c=l[0],f=l[1];return n.default.createElement(o.Flex,{alignItems:"center",justifyContent:"end",padding:[0,.5]},n.default.createElement(u.Value,h({},i,{textAlign:"right"}),c),"undefined"!==typeof f&&n.default.createElement(u.Value,i,"."),n.default.createElement(u.Value,h({as:o.Flex,flex:!1,width:1.8*a},i,{textAlign:"left"}),f))},j=function(e){var t=e.children,r=y(e,m);return n.default.createElement(o.Flex,{gap:1,justifyContent:"end"},Object.keys(t).map((function(e){return n.default.createElement(o.Flex,{key:e,border:{size:"1px",side:"all",color:t[e]},round:!0,flex:!1,padding:[0,.5]},n.default.createElement(d.default,{content:f.labels[e]||e},n.default.createElement(u.Value,h({},r,{color:t[e]}),e)))})))};t.default=function(e){var t=e.id,r=e.strong,a=e.rowFlavour,f=e.fullCols,d=(0,c.useVisibleDimensionId)(t),p=(0,c.useChart)().getAttribute("unitsConversionFractionDigits");return n.default.createElement(w,{opacity:d?null:"weak"},n.default.createElement(o.Flex,{alignItems:"center",gap:1,position:"relative",overflow:"hidden"},n.default.createElement(O,{id:t,valueKey:x[a]||x.default,height:"100%"},n.default.createElement(i.default,{id:t})),n.default.createElement(l.default,{padding:[.5,1],flex:!0,id:t,strong:r,fontSize:"1.1em"})),n.default.createElement(u.default,{id:t,strong:r,visible:d,Component:_,fractionDigits:p,color:a===s.rowFlavours.default?"text":"textLite",fontSize:"1.1em"}),f&&n.default.createElement(n.default.Fragment,null,n.default.createElement(u.default,{id:t,strong:r,visible:d,valueKey:"arp",Component:_,fractionDigits:2,color:a===s.rowFlavours.ANOMALY_RATE?"anomalyTextFocus":"anomalyText",fontSize:"1.1em"}),n.default.createElement(u.default,{textAlign:"right",id:t,strong:r,visible:d,valueKey:"pa",Component:j,color:a===s.rowFlavours.ANNOTATIONS?"text":"textLite",fontSize:"1.1em"})))}},86420:function(e,t,r){t.__esModule=!0,t.rowFlavours=t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=c(r(71893)),o=r(54576),i=r(93416),l=c(r(9033)),u=c(r(59514));function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}var d=a.default.div.withConfig({displayName:"dimensions__Grid",componentId:"sc-x7v8hd-0"})(["display:grid;width:100%;grid-template-columns:",";gap:",";align-items:center;"],(function(e){return"full"===e.cols?"3fr 1fr 1fr 1fr":"5fr 2fr"}),(function(e){return e.gap||0})),s=[null,null],p=function(e){var t=Math.round((e-70)/18);return t<2?2:t},m=function(e){return p(e)/2},g=t.rowFlavours={ANOMALY_RATE:"ANOMALY_RATE",ANNOTATIONS:"ANNOTATIONS",default:"VALUE"},v={ANOMALY_RATE:"anomalyDesc",ANNOTATIONS:"annotationsDesc",default:"valueDesc"},b=function(e){var t=e.height,r=(e.width,(0,o.useChart)()),a=(0,o.useAttributeValue)("hoverX")||s,c=a[0],f=a[1],b=(0,o.usePayload)().data,h=(0,n.useMemo)((function(){var e=r.getClosestRow(c)||b.length-1,n=r.onHoverSortDimensions(e,v[f]||r.getAttribute("dimensionsSort")||v.default)||[];r.getAttribute("selectedDimensions").length>0&&(n=n.filter((function(e){return r.isDimensionVisible(e)})));var a=n.findIndex((function(e){return e===f})),o=n.length,i=Math.floor(function(e,t,r){return ee-m(r)?t-(m(r)+(e-t)):t-m(r)}(o,a,t)),l=Math.ceil(function(e,t,r){return ee-m(r)?e:t+m(r)}(o,a,t));return[i,l,o,n.slice(i,l)]}),[r,f,c,b,t]),y=h[0],w=h[1],O=h[2],x=h[3],_=g[f]||g.default,j=(0,o.useAttributeValue)("cols");return n.default.createElement(n.default.Fragment,null,n.default.createElement(i.TextNano,{fontSize:"1em",color:"textLite"},y>0?"\u2191"+y+" more values":n.default.createElement(n.default.Fragment,null,"\xa0")),n.default.createElement(d,{gap:"2px",column:!0,cols:j},n.default.createElement(i.TextMicro,{fontSize:"1em",strong:!0},"Dimension"),n.default.createElement(i.TextMicro,{fontSize:"1em",color:_===g.default?"text":"textLite",textAlign:"right"},"Value"," ",n.default.createElement(l.default,{visible:!0,strong:_===g.default,color:_===g.default?"text":"textLite",fontSize:"1em"})),"full"===j&&n.default.createElement(n.default.Fragment,null,n.default.createElement(i.TextMicro,{fontSize:"1em",strong:_===g.ANOMALY_RATE,color:_===g.ANOMALY_RATE?"text":"textLite",textAlign:"right"},"AR %"),n.default.createElement(i.TextMicro,{fontSize:"1em",strong:_===g.ANNOTATIONS,color:_===g.ANNOTATIONS?"text":"textLite",textAlign:"right"},"Info")),x.map((function(e){return n.default.createElement(u.default,{key:e,id:e,strong:f===e,rowFlavour:_,fullCols:"full"===j})}))),n.default.createElement(i.TextNano,{color:"textLite",fontSize:"1em"},w=0||(a[r]=e[r]);return a}(e,c),f=(0,o.useOnResize)(r),d=f.width,p=f.height;return n.default.createElement(l.ChartWrapper,{ref:t},n.default.createElement(a.default,s({uiName:r,column:!0,gap:.5,position:"relative"},i),n.default.createElement(u.default,{height:p,width:d})))}));t.ZP=(0,i.default)(p,{tile:!0})},58607:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(54576),i=["uiName"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),l=(0,o.useChart)(),c=(0,n.useRef)();return(0,n.useLayoutEffect)((function(){return l.getUI(t).mount(c.current),function(){return l.getUI(t)&&l.getUI(t).unmount()}}),[]),n.default.createElement(a.Flex,u({"data-testid":"chartContent",ref:c,height:"100%",width:"100%"},r))}},36864:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=(n=r(71893))&&n.__esModule?n:{default:n},o=r(93416),i=["height","width"];function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function u(e,t,r){return(t=function(e){var t=function(e,t){if("object"!==typeof e||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===typeof t?t:String(t)}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var c=(0,a.default)(o.Flex).attrs((function(e){var t=e.height,r=void 0===t?"100%":t,n=e.width,a=void 0===n?"100%":n;return function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i))})).withConfig({displayName:"container__Container",componentId:"sc-sbp2x3-0"})(["::selection{background:transparent;}::-moz-selection{background:transparent;}"]);t.default=c},1981:function(e,t,r){t.__esModule=!0,t.default=t.Skeleton=t.D3pie=void 0;var n=p(r(67294)),a=p(r(71893)),o=r(93416),i=d(r(58607)),l=r(54576),u=d(r(70486)),c=r(86954),f=["uiName"];function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function p(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function m(){return m=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,f),o=(0,l.useAttributeValue)("loaded"),u=(0,l.useOnResize)(r),d=u.width,s=u.height,p=d=0||(a[r]=e[r]);return a}(e,i);return a.default.createElement(o.Flex,l({gap:4},u),t,a.default.createElement(o.Flex,{column:!0,gap:1,flex:"grow",basis:0},a.default.createElement(o.Text,{strong:!0,color:"key"},r),n&&a.default.createElement(o.Flex,{column:!0,gap:1},n)))}},22009:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=c(r(67294)),a=c(r(9837)),o=r(93416),i=c(r(3701)),l=r(54576),u=c(r(4522));function c(e){return e&&e.__esModule?e:{default:e}}t.default=function(){var e=(0,l.useAttributeValue)("nodeName");return n.default.createElement(u.default,{icon:n.default.createElement(i.default,{svg:a.default,color:"key"}),title:"Source",color:"key","data-testid":"chartDetails-source"},n.default.createElement(o.TextSmall,{color:"textDescription"},e))}},28138:function(e,t,r){t.ZP=void 0;var n=m(r(67294)),a=m(r(71893)),o=r(93416),i=s(r(58607)),l=r(54576),u=s(r(70486)),c=r(86954),f=s(r(3640)),d=["uiName"];function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function g(){return g=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,d),o=(0,l.useAttributeValue)("loaded"),u=(0,l.useOnResize)(r),f=u.width,s=u.height,p=f=0||(a[r]=e[r]);return a}(e,u),n=(0,i.useChart)(),o=(0,i.useAttributeValue)("aggregationMethod"),c=function(e){return(0,a.useMemo)((function(){return[{value:"avg",label:"Average",description:"For each point presented, calculate the average of the metrics contributing to it.",short:"AVG()","data-track":e.track("avg")},{value:"sum",label:"Sum",description:"For each point presented, calculate the sum of the metrics contributing to it.",short:"SUM()","data-track":e.track("sum")},{value:"min",label:"Minimum",description:"For each point presented, present the minimum of the metrics contributing to it.",short:"MIN()","data-track":e.track("min")},{value:"max",label:"Maximum",description:"For each point presented, present the maximum of the metrics contributing to it.",short:"MAX()","data-track":e.track("max")}]}),[e])}(n),d=(c.find((function(e){return e.value===o}))||c[0]).short;return a.default.createElement(l.default,f({value:o,onChange:n.updateAggregationMethodAttribute,items:c,"data-track":n.track("aggregate"),dropTitle:m},r,{labelProps:s({secondaryLabel:"the",label:d,title:g.heading,tooltipProps:g},t)}))};t.default=(0,a.memo)(v)},11627:function(e,t,r){t.__esModule=!0,t.uniqueColumn=t.minColumn=t.metricsColumn=t.maxColumn=t.labelColumn=t.instancesColumn=t.contributionColumn=t.avgColumn=t.anomalyRateColumn=t.alertsColumn=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=c(r(12460)),i=c(r(9033)),l=r(54576),u=c(r(88811));function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}t.labelColumn=function(e){return{id:"label",header:function(){return n.default.createElement(a.TextSmall,{strong:!0},"Name")},size:200,minSize:60,maxSize:800,cell:function(t){var r=t.getValue,i=t.row,c=function(e){return(0,n.useMemo)((function(){return{dimension:"dimensions",node:"nodes",instance:e.intl("instance",2),label:"labels",value:"values",default:"values"}}),[])}((0,l.useChart)());return n.default.createElement(a.Flex,{justifyContent:"between",alignItems:"center",padding:[0,0,0,3*i.depth],width:"100%"},n.default.createElement(a.Flex,{gap:1},n.default.createElement(o.default,{id:i.original.value}),n.default.createElement(a.TextSmall,{strong:!0,onClick:i.original.disabled?void 0:i.getToggleSelectedHandler(),cursor:i.original.disabled?"default":"pointer",whiteSpace:"normal",wordBreak:"break-word"},r())),i.getCanExpand()&&n.default.createElement(u.default,{label:c[i.original.value]||c[e]||c.default,onClick:function(e){i.getToggleExpandedHandler()(e),setTimeout((function(){return e.target.scrollIntoView({behavior:"smooth",block:"nearest"})}))},iconRotate:i.getIsExpanded()?2:null,textProps:{fontSize:"10px",color:"textLite"},alignItems:"center"}))}}},t.uniqueColumn=function(){return{id:"unique",header:n.default.createElement(a.TextMicro,{strong:!0},"Unique"),size:45,minSize:30,maxSize:90,cell:function(e){var t=e.getValue;return n.default.createElement(a.TextSmall,{color:"textLite"},t())},sortingFn:"basic"}},t.minColumn=function(){return{id:"min",header:n.default.createElement(a.TextMicro,{strong:!0},"Min ",n.default.createElement(i.default,{visible:!0})),size:60,minSize:30,maxSize:90,cell:function(e){var t=e.getValue,r=(0,l.useConverted)(t());return n.default.createElement(a.TextSmall,{color:"textLite"},r)},sortingFn:"basic"}},t.avgColumn=function(){return{id:"avg",header:n.default.createElement(a.TextMicro,{strong:!0},"Avg ",n.default.createElement(i.default,{visible:!0})),size:60,minSize:30,maxSize:90,cell:function(e){var t=e.getValue,r=(0,l.useConverted)(t());return n.default.createElement(a.TextSmall,{color:"textLite"},r)},sortingFn:"basic"}},t.maxColumn=function(){return{id:"max",header:n.default.createElement(a.TextMicro,{strong:!0},"Max ",n.default.createElement(i.default,{visible:!0})),size:60,minSize:30,maxSize:90,cell:function(e){var t=e.getValue,r=(0,l.useConverted)(t());return n.default.createElement(a.TextSmall,{color:"textLite"},r)},sortingFn:"basic"}},t.instancesColumn=function(){return{id:"instances",header:n.default.createElement(a.TextMicro,{strong:!0},"Instances"),size:60,minSize:30,maxSize:90,cell:function(e){var t,r=e.getValue,o=e.row;if(null==(t=o.original.info)||!t.is)return n.default.createElement(a.TextSmall,{color:"textLite"},r());var i=o.original.info.is,l=i.qr,u=void 0===l?0:l,c=i.sl,f=void 0===c?0:c,d=i.ex,s=void 0===d?0:d;return n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"textLite"},n.default.createElement(a.TextSmall,{color:"primary"},u)," of ",f+s),n.default.createElement(a.ProgressBar,{background:"progressBg",color:["green","deyork"],height:2,width:u/(f+s)*100+"%",containerWidth:"100%",border:"none"}))},sortingFn:"basic"}},t.metricsColumn=function(){return{id:"metrics",header:n.default.createElement(a.TextMicro,{strong:!0},"Metrics"),size:80,minSize:60,cell:function(e){var t,r=e.row,o=e.getValue;if(null==(t=r.original.info)||!t.ds)return n.default.createElement(a.TextSmall,{color:"textLite"},o());var i=r.original.info.ds,l=i.qr,u=void 0===l?0:l,c=i.sl,f=void 0===c?0:c,d=i.ex,s=void 0===d?0:d;return n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"textLite"},n.default.createElement(a.TextSmall,{color:"primary"},u)," of ",f+s),n.default.createElement(a.ProgressBar,{background:"progressBg",color:["green","deyork"],height:2,width:u/(f+s)*100+"%",containerWidth:"100%",border:"none"}))},sortingFn:"basic"}},t.contributionColumn=function(){return{id:"contribution",header:n.default.createElement(a.TextMicro,{strong:!0},"Vol %"),size:60,minSize:30,maxSize:90,cell:function(e){var t,r=e.row,o=e.getValue;return null!=(t=r.original.info)&&t.sts?n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"primary"},Math.round(100*(o()+Number.EPSILON))/100,"%"),n.default.createElement(a.ProgressBar,{background:"progressBg",color:["green","deyork"],height:2,width:o()+"%",containerWidth:"100%",border:"none"})):n.default.createElement(a.TextSmall,{color:"textLite"},o())},sortingFn:"basic"}},t.anomalyRateColumn=function(){return{id:"anomalyRate",header:n.default.createElement(a.TextMicro,{strong:!0},"AR %"),size:60,minSize:30,maxSize:90,cell:function(e){var t,r=e.row,o=e.getValue;return null!=(t=r.original.info)&&t.sts?n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"textLite"},Math.round(100*(o()+Number.EPSILON))/100,"%"),n.default.createElement(a.ProgressBar,{background:"progressBg",color:"anomalyText",height:2,width:o()+"%",containerWidth:"100%",border:"none"})):n.default.createElement(a.TextSmall,{color:"textLite"},o())},sortingFn:"basic"}},t.alertsColumn=function(){return{id:"alerts",header:n.default.createElement(a.TextMicro,{strong:!0},"Alerts"),size:75,minSize:60,maxSize:90,cell:function(e){var t,r=e.row,o=e.getValue;if(null==(t=r.original.info)||!t.al)return n.default.createElement(a.TextSmall,{color:"textLite"},o());var i=r.original.info.al,l=i.cl,u=void 0===l?0:l,c=i.cr,f=void 0===c?0:c,d=i.wr,s=void 0===d?0:d,p={text:f,flavour:f?"error":"disabledError"},m={text:s,flavour:s?"warning":"disabledWarning"},g={text:u,flavour:u?"clear":"disabledClear"};return n.default.createElement(a.Flex,null,n.default.createElement(a.MasterCard,{pillLeft:p,pillRight:m,pillEnd:g}))},sortingFn:"basic"}}},92247:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(54576),i=(n=r(37101))&&n.__esModule?n:{default:n},l=["labelProps"];function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,l),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("contextScope"),f=(0,o.useAttributeValue)("contextItems");if(!f.length)return null;var s=(f.find((function(e){return e.value===u[0]}))||f[0]).label;return a.default.createElement(i.default,c({value:u,onChange:n.updateContextScopeAttribute,items:f,"data-track":n.track("contextScope")},r,{labelProps:d({secondaryLabel:"On",label:s,title:p.heading,tooltipProps:p},t)}))};t.default=(0,a.memo)(m)},28646:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(54576),i=(n=r(23410))&&n.__esModule?n:{default:n},l=r(33370),u=r(11627),c=["labelProps"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("selectedDimensions"),f=(0,o.useAttributeValue)("dimensions"),m=(0,o.useAttributeValue)("dimensionsTotals"),g=(0,a.useCallback)((function(){return Object.keys(f).map((function(e){var t=u.includes(e);return(0,l.getStats)(n,f[e],{key:"dimensions",props:{selected:t}})}))}),[f,u]),v=(0,o.useAttribute)("nodesSortBy"),b=v[0],h=v[1];return a.default.createElement(i.default,d({title:"Dimensions",resourceName:"dimension","data-track":n.track("dimensions"),labelProps:t,onChange:n.updateDimensionsAttribute,getOptions:g,tooltipProps:s,value:u,columns:p,sortBy:b,onSortByChange:h,totals:m},r))};t.default=(0,a.memo)(m)},37101:function(e,t,r){t.__esModule=!0,t.default=t.ItemContainer=t.Item=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=s(r(71893)),o=r(93416),i=s(r(41523)),l=s(r(30075)),u=s(r(3701)),c=s(r(81743)),f=s(r(88811)),d=["labelProps"];function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(){return m=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,d);return n.default.createElement(o.Menu,m({},r,{Item:h,dropProps:{align:{top:"bottom",left:"left"},"data-toolbox":!0,width:"460px"},dropdownProps:{padding:[0,0,2,0],height:{max:"80vh"}}},r),n.default.createElement(f.default,m({},t,{"data-value":""+(r.value||"No selection")})))};t.default=(0,n.memo)(y)},23410:function(e,t,r){t.__esModule=!0,t.meta=t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=d(r(71893)),o=r(93416),i=d(r(91211)),l=d(r(88811)),u=d(r(59965)),c=["getOptions","onItemClick","close","columns","sortBy","onSortByChange","expanded","onExpandedChange","tableMeta","enableSubRowSelection","value","newValues","totals","emptyMessage","title","filterSelectedCount"],f=["label","labelProps","onChange","getOptions","secondaryLabel","tooltipProps","value","columns","sortBy","onSortByChange","expanded","onExpandedChange","tableMeta","enableSubRowSelection","totals","emptyMessage","resourceName","title","filterSelectedCount"];function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}function g(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function v(e){for(var t=1;t0&&{backgroundOpacity:.4}),e.depth>0&&0===r&&{border:{side:"left",size:"4px"}})}},w=function(){},O=[{id:"contribution",desc:!0}],x={},_=function(){return!0},j=function(e){var t=e.getOptions,r=e.onItemClick,a=(e.close,e.columns),l=e.sortBy,f=e.onSortByChange,d=e.expanded,s=e.onExpandedChange,g=e.tableMeta,b=void 0===g?y:g,O=e.enableSubRowSelection,x=e.value,j=e.newValues,P=e.totals,k=e.emptyMessage,M=e.title,C=e.filterSelectedCount,S=void 0===C?_:C,D=m(e,c),A=(0,n.useMemo)(t,[]),T=(0,n.useState)((function(){return E(A,{})})),L=T[0],I=T[1];(0,n.useEffect)((function(){var e=E(A,{});I((function(t){return(0,i.default)(t,e)?t:e}))}),[]);var N=(0,n.useMemo)((function(){return!!j&&(0,i.default)(x,j)}),[j]),F=(0,n.useMemo)((function(){return null!=j&&j.length?S(j).length:0}),[j]);return n.default.createElement(h,p({role:"listbox",background:"dropdown",padding:[0],margin:[1,0],column:!0,tabindex:"-1",flex:!0},D),n.default.createElement(o.Table,{title:M,background:"dropdownTable",enableResizing:!0,enableSorting:!0,enableSelection:!0,dataColumns:a,data:A,onRowSelected:r,onSearch:w,meta:b,sortBy:l,rowSelection:L,onSortingChange:f,expanded:d,onExpandedChange:s,enableSubRowSelection:O,width:{base:250,max:"80vw"}}),n.default.createElement(o.Flex,{padding:[2],justifyContent:"between",alignItems:"center",border:{side:"top",color:"borderSecondary"}},n.default.createElement(o.Flex,{gap:1,alignItems:"center"},n.default.createElement(o.TextSmall,{color:"textLite"},"Selected ",n.default.createElement(o.TextSmall,{strong:!0},F)," of"," ",n.default.createElement(o.TextSmall,{strong:!0},((null==P?void 0:P.sl)||0)+((null==P?void 0:P.ex)||0)||A.length)),n.default.createElement(o.Button,{padding:[0],flavour:"borderless",width:"auto",height:"auto",cursor:"pointer",color:"primary",onClick:function(){I({}),r([])},disabled:!(null!=j&&j.length)&&!x.length,label:"clear",small:!0}),n.default.createElement(o.Button,{padding:[0],flavour:"borderless",width:"auto",height:"auto",cursor:"pointer",color:"primary",onClick:function(){I(v({},L)),r(x)},disabled:!N,label:"reset",small:!0}),!(null!=j&&j.length)&&!!k&&n.default.createElement(o.TextSmall,{color:"warningText"},k)),P&&n.default.createElement(u.default,p({selected:x},P))))},E=function e(t,r,n){return t.reduce((function(t,r,a){return"undefined"!==typeof n&&(a=n+"."+a),r.selected&&(t[a]=!0),r.children&&e(r.children,t,a),t}),r)};t.default=function(e){var t=e.label,r=e.labelProps,a=e.onChange,i=e.getOptions,c=e.secondaryLabel,d=e.tooltipProps,s=e.value,g=e.columns,v=e.sortBy,b=void 0===v?O:v,h=e.onSortByChange,y=e.expanded,w=void 0===y?x:y,_=e.onExpandedChange,E=e.tableMeta,P=e.enableSubRowSelection,k=e.totals,M=e.emptyMessage,C=e.resourceName,S=e.title,D=e.filterSelectedCount,A=m(e,f),T=(0,n.useState)(!1),L=T[0],I=T[1],N=(0,n.useState)(),F=N[0],z=N[1],W=(0,n.useRef)();return(0,n.useEffect)((function(){!L&&F&&(W.current=null,a(F))}),[L]),(0,n.useEffect)((function(){return function(){return W.current&&a(W.current)}}),[]),n.default.createElement(o.Menu,p({onChange:function(e){W.current=e,z(e)},hasSearch:!1,closeOnClick:!1,Dropdown:j,dropProps:{align:{top:"bottom",left:"left"},"data-toolbox":!0,keepHorizontal:!0,stretch:null},dropdownProps:{height:{max:"60vh"},width:"100%",overflow:"auto",columns:g,getOptions:i,sortBy:b,onSortByChange:h,expanded:w,onExpandedChange:_,tableMeta:E,enableSubRowSelection:P,value:s,totals:k,newValues:F,emptyMessage:M,title:S,filterSelectedCount:D},value:s,onOpen:function(){return I(!0)},onClose:function(){return I(!1)}},A),n.default.createElement(l.default,p({"data-value":s.join("|")||C+" all-selected",secondaryLabel:c,label:t||n.default.createElement(u.default,p({selected:s},k,{resourceName:C,teaser:!0})),title:d.heading,tooltipProps:d},r)))}},40107:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=w(r(67294)),a=r(93501),o=r(54576),i=w(r(96376)),l=w(r(28646)),u=w(r(45230)),c=w(r(53926)),f=w(r(92247)),d=w(r(77051)),s=w(r(5628)),p=w(r(25789)),m=w(r(33126)),g=w(r(17371)),v=w(r(34034)),b=w(r(13100)),h=w(r(93506)),y=w(r(3701));function w(e){return e&&e.__esModule?e:{default:e}}var O={secondaryLabel:"The"},x={},_={nodes:{icon:n.default.createElement(y.default,{svg:g.default,color:"textLite",size:"16px"}),padding:[0]},instances:{icon:n.default.createElement(y.default,{svg:v.default,color:"textLite",size:"16px"}),padding:[0]},dimensions:{icon:n.default.createElement(y.default,{svg:b.default,color:"textLite",size:"16px"}),padding:[0]},labels:{icon:n.default.createElement(y.default,{svg:h.default,color:"textLite",size:"16px"}),padding:[0]}};t.default=function(e){var t=e.plain,r=(0,a.useIsHeatmap)(),g=(0,o.useAttributeValue)("filterElements");return g?g.map((function(e,t){return n.default.createElement(e,{key:t})})):t?n.default.createElement(n.default.Fragment,null,n.default.createElement(c.default,{labelProps:_.nodes}),n.default.createElement(u.default,{labelProps:_.instances}),n.default.createElement(l.default,{labelProps:_.dimensions}),n.default.createElement(p.default,{labelProps:_.labels})):n.default.createElement(n.default.Fragment,null,!r&&n.default.createElement(f.default,null),!r&&n.default.createElement(d.default,{labelProps:{secondaryLabel:"Group by"}}),n.default.createElement(i.default,{labelProps:r?O:x}),n.default.createElement(c.default,null),n.default.createElement(u.default,null),n.default.createElement(l.default,null),n.default.createElement(p.default,null),n.default.createElement(s.default,null),n.default.createElement(m.default,null))}},44841:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=(n=r(71893))&&n.__esModule?n:{default:n},o=r(93416);function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function l(e,t,r){return(t=function(e){var t=function(e,t){if("object"!==typeof e||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===typeof t?t:String(t)}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var u=(0,a.default)(o.Flex).attrs((function(e){return function(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r=0||(a[r]=e[r]);return a}(e,f),c=(0,o.useChart)(),d=(0,o.useAttributeValue)("groupBy"),m=(0,o.useAttributeValue)("groupByLabel"),b=function(e){return(0,a.useMemo)((function(){return[{nm:"node",id:"node",key:"nodes"},{nm:e.intl("instance")+" "+("instance"===e.intl("instance")?"":"(instance)"),id:"instance",key:"instances"},{nm:"dimension",id:"dimension",key:"dimensions"}]}),[])}(c),h=(0,a.useCallback)((function(){var e=c.getAttributes(),t=b.map((function(t){var r=d.includes(t.id);return(0,u.getStats)(c,t,{key:"group-by",childrenKey:"label",props:{contribution:"-",anomalyRate:"-",alerts:"-",min:"-",avg:"-",max:"-",selected:r},childProps:{unique:"-",disabled:"hidden"},children:Object.values(e[t.key])})}));return[].concat(p(t),p(Object.keys(e.labels).map((function(t){return(0,u.getStats)(c,e.labels[t],{key:"group-by",childrenKey:"label",props:{getLabel:function(e){return"label: "+(e.nm||t||e.id)},isLabel:!0,selected:m.includes(t)},childProps:{unique:"-",disabled:"hidden"},children:e.labels[t].vl})}))))}),[d,m]);t=(0,a.useMemo)((function(){var e=d.filter((function(e){return"node"!==e})),t=e.map((function(e){return"label"===e?m.length>1?m.length+" labels":m[0]:e}));return e.length=0||(a[r]=e[r]);return a}(e,l);return n.default.createElement(a.default,r,n.default.createElement(o.default,{plain:t}))}))},45230:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(54576),i=r(60773),l=(n=r(23410))&&n.__esModule?n:{default:n},u=r(33370),c=r(11627),f=["labelProps"];function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,f),n=(0,o.useChart)(),c=(0,o.useAttributeValue)("selectedInstances"),d=(0,o.useAttributeValue)("instances"),m=(0,o.useAttributeValue)("instancesTotals"),g=(0,a.useCallback)((function(){return Object.keys(d).map((function(e){return(0,u.getStats)(n,d[e],{id:e,key:"instances",props:{selected:c.includes(e)}})}))}),[d,c]),v=(0,o.useAttribute)("instancesSortBy"),b=v[0],h=v[1],y=function(e){return(0,a.useMemo)((function(){return{heading:(0,i.uppercase)(e.intl("instance",2)),body:"View or filter the "+e.intl("instance",2)+" contributing time-series metrics to this chart. This menu also provides the contribution of each "+e.intl("instance")+" to the volume of the chart, and a break down of the anomaly rate of the queried data per "+e.intl("instance")+"."}}),[])}(n);return a.default.createElement(l.default,s({title:(0,i.uppercase)(n.intl("instance",2)),resourceName:"instance","data-track":n.track("instances"),labelProps:t,onChange:n.updateInstancesAttribute,getOptions:g,tooltipProps:y,value:c,columns:p,sortBy:b,onSortByChange:h,totals:m},r))};t.default=(0,a.memo)(m)},88811:function(e,t,r){t.__esModule=!0,t.default=t.Container=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=d(r(71893)),o=r(93416),i=d(r(36131)),l=d(r(3701)),u=r(81743),c=["width","open"],f=["icon","secondaryLabel","tertiaryLabel","label","chevron","iconRotate","textProps"];function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var h=t.Container=(0,a.default)(o.Flex).attrs((function(e){var t=e.width,r=void 0===t?{max:100}:t,n=e.open,a=b(e,c);return g(g({cursor:"pointer",role:"button",padding:[.5],gap:.5,width:r,alignItems:"center"},n&&{background:"selected"}),a)})).withConfig({displayName:"label__Container",componentId:"sc-1lmmfid-0"})(["&:hover{background:",";}"],(0,o.getColor)("selected")),y=(0,a.default)(o.TextSmall).attrs({whiteSpace:"nowrap",truncate:!0}).withConfig({displayName:"label__StyledLabel",componentId:"sc-1lmmfid-1"})(["flex:1;"]),w=(0,n.forwardRef)((function(e,t){var r=e.icon,a=e.secondaryLabel,u=e.tertiaryLabel,c=e.label,d=e.chevron,s=void 0===d||d,m=e.iconRotate,g=e.textProps,v=b(e,f);return r?n.default.createElement(h,p({ref:t},v),r):n.default.createElement(h,p({ref:t},v),a&&n.default.createElement(o.TextSmall,{color:"textLite",whiteSpace:"nowrap",truncate:!0},a),n.default.createElement(y,g,c),u&&n.default.createElement(o.TextSmall,{color:"textLite",whiteSpace:"nowrap",truncate:!0},u),s&&n.default.createElement(l.default,{svg:i.default,size:"12px",color:"textNoFocus",rotate:m}))}));t.default=(0,u.withTooltip)(w,{Content:function(e){var t=e.header,r=e.body;return n.default.createElement(o.Flex,p({column:!0,gap:1},u.tooltipStyleProps),n.default.createElement(o.TextSmall,{color:"bright",strong:!0},t),r&&n.default.createElement(o.TextSmall,{color:"bright"},r))},align:"top"})},25789:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(54576),i=(n=r(23410))&&n.__esModule?n:{default:n},l=r(33370),u=r(11627),c=["labelProps"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("selectedLabels"),f=(0,o.useAttributeValue)("labels"),m=(0,o.useAttributeValue)("labelsTotals"),g=(0,a.useCallback)((function(){return Object.keys(f).map((function(e){return(0,l.getStats)(n,f[e],{key:"labels",childrenKey:"values",props:{selected:u.includes(e)},childProps:{unique:"-",parentId:e,getIsSelected:function(t){return u.includes(e+":"+t.id)},getValue:function(t){return e+":"+t.id}},children:f[e].vl})}))}),[f,u]),v=(0,o.useAttribute)("labelsSortBy"),b=v[0],h=v[1],y=(0,o.useAttribute)("labelsExpanded"),w=y[0],O=y[1],x=(0,a.useCallback)((function(e){return e.filter((function(e){return!!e.parentId}))}),[]);return a.default.createElement(i.default,d({title:"Labels",resourceName:"label","data-track":n.track("labels"),labelProps:t,onChange:n.updateLabelsAttribute,getOptions:g,tooltipProps:s,value:u,columns:p,enableSubRowSelection:!0,sortBy:b,onSortByChange:h,expanded:w,onExpandedChange:O,totals:m,filterSelectedCount:x},r))};t.default=(0,a.memo)(m)},53926:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(54576),i=(n=r(23410))&&n.__esModule?n:{default:n},l=r(33370),u=r(11627),c=["labelProps"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r=0||(a[r]=e[r]);return a}(e,c),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("selectedNodes"),f=(0,o.useAttributeValue)("nodes"),p=(0,o.useAttributeValue)("instances"),v=(0,o.useAttributeValue)("nodesTotals"),b=(0,o.useAttributeValue)("selectedInstances"),h=(0,a.useCallback)((function(){return Object.keys(f).map((function(e){var t=u.includes(e);return(0,l.getStats)(n,f[e],{id:e,key:"nodes",childrenKey:"instances",props:{selected:t},childProps:{isInstance:!0,getValue:function(t){return t.id+"@"+e},getIsSelected:function(t){return b.includes(t.id+"@"+e)}},children:Object.keys(p).reduce((function(t,r){return p[r].ni===f[e].ni?[].concat(s(t),[p[r]]):t}),[])})}))}),[f,u,b]),y=(0,o.useAttribute)("nodesSortBy"),w=y[0],O=y[1],x=(0,o.useAttribute)("nodesExpanded"),_=x[0],j=x[1],E=(0,a.useCallback)((function(e){return e.filter((function(e){return!e.isInstance}))}),[]);return a.default.createElement(i.default,d({title:"Nodes",resourceName:"node","data-track":n.track("nodes"),labelProps:t,onChange:n.updateNodesAttribute,getOptions:h,tooltipProps:m,value:u,columns:g,sortBy:w,onSortByChange:O,expanded:_,onExpandedChange:j,enableSubRowSelection:!1,totals:v,filterSelectedCount:E},r))};t.default=(0,a.memo)(v)},33126:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=l(r(67294)),a=l(r(71893)),o=r(93416),i=r(41145);function l(e){return e&&e.__esModule?e:{default:e}}var u=(0,a.default)(o.Button).attrs({flavour:"borderless",label:"Reset",width:"initial",height:"initial",padding:[0,1],title:"Reset Filters",small:!0,neutral:!0}).withConfig({displayName:"reset__StyledButton",componentId:"sc-kilpc3-0"})(["&&{height:initial;font-weight:normal;}"]);t.default=function(e){var t,r=e.attribute,a=void 0===r?"pristine":r,o=e.resetFunction,l=(0,i.useChart)(),c=(0,i.useAttributeValue)(a),f=0===(null==(t=Object.keys(c))?void 0:t.length);return n.default.createElement(u,{disabled:f,onClick:null!=o?o:l.resetPristine,"data-track":l.track("reset")})}},63209:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=l(r(67294)),a=r(93416),o=l(r(44841)),i=l(r(88811));function l(e){return e&&e.__esModule?e:{default:e}}t.default=function(){return n.default.createElement(o.default,null,n.default.createElement(a.Flex,{gap:1},n.default.createElement(i.default,{width:"90px",background:"borderSecondary",secondaryLabel:"",label:""}),n.default.createElement(i.default,{width:"120px",background:"borderSecondary"}),n.default.createElement(i.default,{width:"100px",background:"borderSecondary"})))}},5628:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=c(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(93416),i=r(54576),l=(n=r(37101))&&n.__esModule?n:{default:n},u=["labelProps"];function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(c=function(e){return e?r:t})(e)}function f(){return f=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,u),n=(0,i.useChart)(),o=(0,i.useAttributeValue)("groupingMethod").match(/[\d.]+|\D+/g)||[],c=o[0],d=void 0===c?"":c,p=o[1],h=void 0===p?"":p,y=(0,i.useAttributeValue)("viewUpdateEvery"),w=(0,i.useAttributeValue)("perTier"),O=function(e,t){void 0===t&&(t=[]);var r=t,n=r[0],o=r.slice(1);return(0,a.useMemo)((function(){return[{value:"min",label:"Minimum",description:"Reveal short dives that would otherwise be smoothed out.",short:"MIN()","data-track":e.track("time-aggregation-min")},{value:"max",label:"Maximum",description:"Reveal short spikes that would otherwise be smoothed out.",short:"MAX()","data-track":e.track("time-aggregation-max")},{value:"average",label:"Mean or Average",description:"Calculate the longer term average, as if data were collected at screen resolution.",short:"AVG()","data-track":e.track("time-aggregation-average")},{value:"sum",label:"Sum",description:"Provide the sum of the points that are aggregated over time. Use it when a sense of volume is needed over the aggregation period. It may not be sensible to use this function on all data types.",short:"SUM()","data-track":e.track("time-aggregation-sum")},Array.isArray(o)&&"undefined"!==typeof(null==n?void 0:n.points)&&{justDesc:!0,description:"The functions below lose accuracy when applied on tiered data, compared to high resolution data. Your current query is "+100*n.points/t.reduce((function(e,t){return e+t.points}),0)+"% high resolution and "+100*o.reduce((function(e,t){return e+t.points}),0)/t.reduce((function(e,t){return e+t.points}),0).toFixed(2)+"% tiered data of lower resolution."},{value:"percentile",label:"Percentile",description:"Provide the maximum value of a percentage of the aggregated points, having the smaller values. The default is p95, which provides the maximum value of the aggregated points after ignoring the top 5% of them.",short:"PERCENTILE()","data-track":e.track("time-aggregation-percentile95")},{value:"trimmed-mean",label:"Trimmed Average or Trimmed Mean",description:"Like average, but first remove a percentage of the extreme high and low values.",short:"TRIMMEAN()","data-track":e.track("time-aggregation-trimmed-mean5")},{value:"median",label:"Median",description:"The middle value of all points that would otherwise be smoothed out. This function works like average, but short extreme dives and spikes influence it significantly less than average.",short:"MEDIAN()","data-track":e.track("time-aggregation-median")},{value:"trimmed-median",label:"Trimmed Median",description:"Like median, but first remove a percentage of the extreme high and low values.",short:"TRIMMEDIAN()","data-track":e.track("time-aggregation-trimmed-median5")},{value:"stddev",label:"Standard deviation",description:"Reveal how far each point lies from the average. A high standard deviation means that values are generally far from the average, while a low standard deviation indicates that values are clustered close to the mean. The result is again in the original units of the data source metric.",short:"STDDEV()","data-track":e.track("time-aggregation-stddev")},{value:"cv",label:"Coefficient of variation or Relative standard deviation",description:"The ratio of the standard deviation to the average. Its use is the same as standard deviation, but expressed as a percentage related to the average. The units change to %.",short:"CV()","data-track":e.track("time-aggregation-cv")},{value:"incremental-sum",label:"Incremental Sum or Delta",description:"Provide the difference between the newest and the oldest values of the aggregated points. Each point will be positive if the trend grows and negative if the trend shrinks.",short:"DELTA()","data-track":e.track("time-aggregation-incremental-sum")},{value:"ses",label:"Single exponential smoothing",description:"Use the aggregated points to produce a forecast of the next value, and reveal the forecasted value. Use it when there are indications that the trend is more predictable using the more recent points than the older ones.",short:"SES()","data-track":e.track("time-aggregation-ses")},{value:"des",label:"Double exponential smoothing",description:"Like single exponential smoothing, but better suited when the aggregated points may have a strong trend.",short:"DES()","data-track":e.track("time-aggregation-des")}].filter(Boolean)}),[e,null==n?void 0:n.points])}(n,w),x=function(e){var t=e.chart,r=e.method;return(0,a.useMemo)((function(){return"percentile"===r?[{value:"25",label:"25th",short:"25th","data-track":t.track("time-aggregation-percentile25")},{value:"50",label:"50th",short:"50th","data-track":t.track("time-aggregation-percentile50")},{value:"75",label:"75th",short:"75th","data-track":t.track("time-aggregation-percentile75")},{value:"80",label:"80th",short:"80th","data-track":t.track("time-aggregation-percentile80")},{value:"90",label:"90th",short:"90th","data-track":t.track("time-aggregation-percentile90")},{value:"95",label:"95th",short:"95th","data-track":t.track("time-aggregation-percentile95")},{value:"97",label:"97th",short:"97th","data-track":t.track("time-aggregation-percentile97")},{value:"98",label:"98th",short:"98th","data-track":t.track("time-aggregation-percentile98")},{value:"99",label:"99th",short:"99th","data-track":t.track("time-aggregation-percentile99")}]:r.includes("trimmed")?[{value:"1",label:"1%",short:"1%","data-track":t.track("time-aggregation-"+r+"1")},{value:"2",label:"2%",short:"2%","data-track":t.track("time-aggregation-"+r+"2")},{value:"3",label:"3%",short:"3%","data-track":t.track("time-aggregation-"+r+"3")},{value:"5",label:"5%",short:"5%","data-track":t.track("time-aggregation-"+r+"5")},{value:"10",label:"10%",short:"10%","data-track":t.track("time-aggregation-"+r+"10")},{value:"15",label:"15%",short:"15%","data-track":t.track("time-aggregation-"+r+"15")},{value:"20",label:"20%",short:"20%","data-track":t.track("time-aggregation-"+r+"20")},{value:"25",label:"25%",short:"25%","data-track":t.track("time-aggregation-"+r+"25")}]:[]}),[t,r])}({chart:n,method:d}),_=(O.find((function(e){return e.value===d}))||O[0]).short,j=x.find((function(e){return e.value===h}))||x[0];return a.default.createElement(a.default.Fragment,null,h&&a.default.createElement(l.default,f({value:h,onChange:function(e){return n.updateTimeAggregationMethodAttribute({alias:e,method:d})},items:x,"data-track":n.track("groupingMethodAlias")},r,{labelProps:s({secondaryLabel:"each as",label:j.short,title:g.heading,tooltipProps:g},t)})),a.default.createElement(l.default,f({value:d,onChange:function(e){return n.updateTimeAggregationMethodAttribute({alias:m[e],method:e})},items:O,"data-track":n.track("groupingMethod"),dropTitle:b},r,{labelProps:s({secondaryLabel:!h&&"each as",tertiaryLabel:"every "+y+"s",label:_,title:v.heading,tooltipProps:v},t)})))};t.default=(0,a.memo)(h)},59965:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=c(r(67294)),a=r(93416),o=c(r(41523)),i=c(r(64275)),l=c(r(3701)),u=r(54576);function c(e){return e&&e.__esModule?e:{default:e}}t.default=function(e){var t=e.selected,r=void 0===t?[]:t,c=e.qr,f=void 0===c?0:c,d=e.fl,s=void 0===d?0:d,p=e.sl,m=void 0===p?0:p,g=e.ex,v=void 0===g?0:g,b=e.teaser,h=void 0!==b&&b,y=e.resourceName,w=m+v,O=r.length&&r.length0||h&&f<(r.length||w),_=(h?r.length||w:r.length)||m,j=(0,u.useChart)();return n.default.createElement(a.TextMicro,{color:"textLite"},n.default.createElement(a.TextMicro,{color:h?"text":"primary"},O),h?" ":" queried",!h&&n.default.createElement(l.default,{margin:[-.5,1,-.5,0],width:"14px",height:"14px",color:"primary",svg:o.default}),!!s&&n.default.createElement(n.default.Fragment,null,h?n.default.createElement(a.TextMicro,{color:"errorLite"}," +"):"+ ",n.default.createElement(a.TextMicro,{color:"errorLite"},s),h?" ":"failed ",n.default.createElement(l.default,{margin:[-.5,1,-.5,0],width:"14px",height:"14px",color:"errorLite",svg:i.default})),x&&n.default.createElement(n.default.Fragment,null,"of ",n.default.createElement(a.TextMicro,{color:h?"textLite":"text"},_),h?" ":" selected"),!h&&f!==w&&n.default.createElement(n.default.Fragment,null,"of ",n.default.createElement(a.TextMicro,null,w)," available"),y?j.intl(y,x?_:O):"")}},33370:function(e,t){t.__esModule=!0,t.getStats=void 0;var r=["getValue","getLabel","getIsSelected"];function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function a(e){for(var t=1;t=0||(a[r]=e[r]);return a}(v,r);return a(a({label:(null==h?void 0:h(n))||n.nm||f||n.id,value:(null==b?void 0:b(n))||f||n.id,"data-track":t.track(d+"-"+(f||n.id||n.nm)),unique:m.length,instances:l(n.is),metrics:l(n.ds),contribution:i(n.sts,"con"),anomalyRate:i(n.sts,"arp"),min:i(n.sts,"min"),avg:i(n.sts,"avg"),max:i(n.sts,"max"),alerts:u(n.al),info:n,selected:(null==y?void 0:y(n))||!1},w),{},{children:m.map((function(r){return e(t,r,{key:d+"-"+s,props:g})}))})}},34262:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=i(r(67294)),a=r(54576),o=i(r(63209));function i(e){return e&&e.__esModule?e:{default:e}}t.default=function(e){return function(t){return(0,a.useInitialLoading)()?n.default.createElement(o.default,t):n.default.createElement(e,t)}}},16374:function(e,t,r){t.ZP=void 0;var n=g(r(67294)),a=g(r(71893)),o=r(93416),i=p(r(58607)),l=r(54576),u=p(r(70486)),c=r(86954),f=p(r(3640)),d=["empty","index","uiName"],s=["uiName"];function p(e){return e&&e.__esModule?e:{default:e}}function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(m=function(e){return e?r:t})(e)}function g(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=m(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function v(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function b(e,t,r){return(t=function(e){var t=function(e,t){if("object"!==typeof e||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===typeof t?t:String(t)}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function h(){return h=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var w=(0,a.default)(o.Text).withConfig({displayName:"gauge__Label",componentId:"sc-1o49axm-0"})(["line-height:1;font-size:",";flex:",";",";"],(function(e){return e.fontSize}),(function(e){var t=e.flex;return void 0===t?0:t}),(function(e){return e.isFetching&&f.default})),O=(0,a.default)(w).withConfig({displayName:"gauge__StrokeLabel",componentId:"sc-1o49axm-1"})(["text-shadow:0.02em 0 ",",0 0.02em ",",-0.02em 0 ",",0 -0.02em ",";"],(0,o.getColor)("border"),(0,o.getColor)("border"),(0,o.getColor)("border"),(0,o.getColor)("border")),x=function(){var e=(0,l.useLatestConvertedValue)("selected");return n.default.createElement(O,{flex:"2",color:"text",fontSize:"2em",strong:!0},e)},_=function(){var e=(0,l.useUnitSign)();return n.default.createElement(w,{color:"textLite",fontSize:"1em"},e)},j=function(e){var t=e.empty,r=e.index,a=e.uiName,o=y(e,d),i=(0,l.useChart)(),u=i.getUI(a).getMinMax();return n.default.createElement(w,h({color:"textLite",fontSize:"1.3em"},o),t?"-":i.getConvertedValue(u[r]))},E=(0,a.default)(o.Flex).attrs({alignItems:"center",justifyContent:"between",flex:!0}).withConfig({displayName:"gauge__BoundsContainer",componentId:"sc-1o49axm-2"})([""]),P=function(e){var t=e.uiName;return n.default.createElement(E,null,n.default.createElement(j,{index:0,uiName:t}),n.default.createElement(j,{index:1,uiName:t}))},k=(0,a.default)(o.Flex).attrs({position:"absolute",column:!0,alignContent:"center",justifyContent:"center"}).withConfig({displayName:"gauge__StatsContainer",componentId:"sc-1o49axm-3"})(["inset:",";text-align:center;font-size:",";"],(function(e){return e.inset}),(function(e){return e.fontSize})),M=function(e){var t=e.uiName,r=(0,l.useOnResize)(t),a=r.width,o=r.height,i=a=0;--o){var i=this.tryEntries[o],l=i.completion;if("root"===i.tryLoc)return a("end");if(i.tryLoc<=this.prev){var u=n.call(i,"catchLoc"),c=n.call(i,"finallyLoc");if(u&&c){if(this.prev=0;--r){var a=this.tryEntries[r];if(a.tryLoc<=this.prev&&n.call(a,"finallyLoc")&&this.prev=0;--t){var r=this.tryEntries[t];if(r.finallyLoc===e)return this.complete(r.completion,r.afterLoc),S(r),b}},catch:function(e){for(var t=this.tryEntries.length-1;t>=0;--t){var r=this.tryEntries[t];if(r.tryLoc===e){var n=r.completion;if("throw"===n.type){var a=n.arg;S(r)}return a}}throw new Error("illegal catch attempt")},delegateYield:function(t,r,n){return this.delegate={iterator:A(t),resultName:r,nextLoc:n},"next"===this.method&&(this.arg=e),b}},t}t.getWidth=function(e,t){var r=void 0===t?{}:t,n=r.aspectRatio,a=r.cellSize,o=(0,l.getRows)(e,n),i=(0,l.getColumns)(o,n);return(0,l.getFullWidth)(i,a)};var f=function(e,t){var r=void 0===t?{}:t,n=r.aspectRatio,a=r.cellSize,o=r.padding,i=(0,l.getRows)(e,n),u=(0,l.getColumns)(i,n);return{width:(0,l.getFullWidth)(u,a),height:(0,l.getFullHeight)(i,a,o),columns:Math.ceil(u)}},d=t.makeGetColor=function(e,t,r){return(0,a.scaleLinear)().domain([e,t]).range(r)};t.default=function(e,t,r,n){var a=c().mark(S),s=r.onMouseenter,p=r.onMouseout;void 0===n&&(n={});var m=n,g=m.cellSize,v=m.cellPadding,b=m.cellStroke,h=void 0===b?2:b,y=m.lineWidth,w=void 0===y?1:y,O=m.colorRange,x=void 0===O?[e.getThemeAttribute("themeGroupBoxesMin"),e.getThemeAttribute("themeGroupBoxesMax")]:O,_=t.getContext("2d"),j=(0,i.createCanvas)(_.width,_.height),E=j.getContext("2d"),P=-1,k=function(){},M=function(){},C=function(){};function S(r,m){var b,y,O,S,D,A,T,L,I;return c().wrap((function(a){for(;;)switch(a.prev=a.next){case 0:if(b=f(r,n),y=b.width,O=b.height,S=b.columns,y&&O){a.next=6;break}if(!(0,o.unstable_shouldYield)()){a.next=5;break}return void(a.next=5);case 5:return a.abrupt("return");case 6:j.width=parseInt(y),j.height=parseInt(O),E.clearRect(0,0,j.width,j.height),D=e.getAttribute("min"),A=e.getAttribute("max"),T=d(D,A,x),L=function(t,r,n){t.beginPath(),t.fillStyle=T(e.getRowDimensionValue(r,m));var a=(0,l.getXPosition)(S,n,g),o=(0,l.getYPosition)(S,n,g);w&&h&&t.clearRect(a-w,o-w,(0,l.getCellBoxSize)(g,v)+h,(0,l.getCellBoxSize)(g,v)+h),t.fillRect(a,o,(0,l.getCellBoxSize)(g,v),(0,l.getCellBoxSize)(g,v))},I=0;case 14:if(!(I=r||(i(s(u)),d=u))};return e.addEventListener("mousemove",m),e.addEventListener("mouseout",p),function(){e.removeEventListener("mousemove",m),e.removeEventListener("mouseout",p)}}},9549:function(e,t,r){t.__esModule=!0,t.default=t.Container=void 0;var n=l(r(67294)),a=r(93416),o=l(r(78122)),i=l(r(88578));function l(e){return e&&e.__esModule?e:{default:e}}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0;--o){var i=this.tryEntries[o],l=i.completion;if("root"===i.tryLoc)return a("end");if(i.tryLoc<=this.prev){var u=n.call(i,"catchLoc"),c=n.call(i,"finallyLoc");if(u&&c){if(this.prev=0;--r){var a=this.tryEntries[r];if(a.tryLoc<=this.prev&&n.call(a,"finallyLoc")&&this.prev=0;--t){var r=this.tryEntries[t];if(r.finallyLoc===e)return this.complete(r.completion,r.afterLoc),S(r),b}},catch:function(e){for(var t=this.tryEntries.length-1;t>=0;--t){var r=this.tryEntries[t];if(r.tryLoc===e){var n=r.completion;if("throw"===n.type){var a=n.arg;S(r)}return a}}throw new Error("illegal catch attempt")},delegateYield:function(t,r,n){return this.delegate={iterator:A(t),resultName:r,nextLoc:n},"next"===this.method&&(this.arg=e),b}},t}function g(e,t){if(null==e)return{};var r,n,a={},o=Object.keys(e);for(n=0;n=0||(a[r]=e[r]);return a}t.default=function(e){var t=e.uiName,r=e.dimensions,s=e.groupLabel,p=g(e,c),v=(0,a.useChart)(),b=(0,n.useRef)(),h=(0,n.useRef)(),y=(0,n.useRef)(),w=(0,n.useState)(null),O=w[0],x=w[1],_=(0,n.useRef)(-1),j=(0,n.useRef)();(0,n.useLayoutEffect)((function(){return y.current=(0,i.default)(v,h.current,{onMouseenter:function(e){var t=e.index,r=g(e,f);_.current=t,y.current.activateBox(t),j.current=setTimeout((function(){x({target:{getBoundingClientRect:function(){return r}},index:t})}),100)},onMouseout:function(){_.current=-1,clearTimeout(j.current),requestAnimationFrame((function(){x((function(e){return-1===_.current||_.current!==(null==e?void 0:e.index)?(y.current.deactivateBox(),_.current=-1,null):e}))}))},onClick:function(e){void 0===e&&(e={});var t=e,r=t.index,n=g(t,d);_.current=r,y.current.activateBox(r),j.current=setTimeout((function(){x({target:{getBoundingClientRect:function(){return n}},index:r})}),100)}},p),function(){return y.current.clear()}}),[]);var E=(0,l.default)(t),P=(0,o.default)(),k=P[1],M=P[2],C=(0,a.useAttributeValue)("theme");(0,n.useLayoutEffect)((function(){return k(m().mark((function e(){return m().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return O&&b.current&&b.current[O.index]!==r[O.index]&&(y.current.deactivateBox(),x(null),_.current=-1),b.current=r,e.delegateYield(y.current.update(r,E),"t0",3);case 3:case"end":return e.stop()}}),e)}))),function(){return M()}}),[E,k,M,C]);var S=(0,n.useMemo)((function(){if(O){var e=r[O.index].split(",");return e[e.length-1]}}),[r[null==O?void 0:O.index]]);return n.default.createElement(n.Fragment,null,n.default.createElement("canvas",{"data-testid":"groupBox",ref:h}),O&&n.default.createElement(u.default,{target:O.target,label:S,index:O.index,groupLabel:s,data:E,id:r[O.index]}))}},98950:function(e,t,r){t.__esModule=!0,t.default=t.SkeletonIcon=void 0;var n=s(r(67294)),a=s(r(71893)),o=r(93416),i=r(54576),l=f(r(1496)),u=f(r(88515)),c=f(r(78927));function f(e){return e&&e.__esModule?e:{default:e}}function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function p(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function m(e,t,r){return(t=function(e){var t=function(e,t){if("object"!==typeof e||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===typeof t?t:String(t)}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var g=(0,a.keyframes)(["from{opacity:0.2;}to{opacity:0.6;}"]),v=(0,a.default)(o.Flex).attrs((function(e){return function(e){for(var t=1;t3&&n.default.createElement("span",null,"(",s.length,")"))),m.length?Object.keys(a).map((function(t){return n.default.createElement(e,{key:t,label:t,subTree:a[t],data:l,uiName:r,groupedBy:m,hasMore:m.length>1})})):n.default.createElement(u.default,{dimensions:s,groupLabel:c,uiName:r,groupKey:p}))},y=function(e){var t=e.uiName,r=(0,c.default)(t),a=r.data,u=r.tree,f=(0,i.useAttributeValue)("loaded"),d=(0,i.useAttributeValue)("showingInfo"),s=(0,i.useAttributeValue)("viewDimensions").grouped_by||[],p=s[0],m=s.slice(1);return f?n.default.createElement(o.Flex,{"data-testid":"groupBoxes",flexWrap:!0,flex:!0,position:"relative",height:{min:"150px"}},d?n.default.createElement(l.default,null):m.length?Object.keys(u).map((function(e){return n.default.createElement(h,{key:e,label:e,subTree:u[e],data:a,uiName:t,groupedBy:m,hasMore:m.length>1})})):n.default.createElement(h,{key:p,label:p,subTree:u,data:a,uiName:t,groupedBy:m})):n.default.createElement(b,null)};t.default=(0,n.memo)(y)},20581:function(e,t,r){t.ZP=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=v(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=g(r(32839)),o=g(r(36657)),i=g(r(70486)),l=r(54576),u=g(r(58607)),c=g(r(79779)),f=g(r(18947)),d=g(r(36864)),s=g(r(98950)),p=g(r(9549)),m=["uiName"];function g(e){return e&&e.__esModule?e:{default:e}}function v(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(v=function(e){return e?r:t})(e)}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,m),g=(0,l.useChart)(),v=(0,o.default)({onHover:g.focus,onBlur:g.blur,isOut:function(e){return!e||!e.closest("[data-toolbox]")&&!e.closest("[data-testid=chart]")}},[g]),h=(0,a.default)((function(e){v.current=e,t&&(t.current=e)}))[1],y=(0,l.useAttributeValue)("showingInfo");return n.default.createElement(d.default,b({ref:h},i),n.default.createElement(c.default,null),n.default.createElement(f.default,null),n.default.createElement(u.default,{uiName:r,column:!0,gap:4,padding:[4,2]},n.default.createElement(s.default,{uiName:r})),!y&&n.default.createElement(p.default,null))}));t.ZP=(0,i.default)(h)},87951:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=c(r(73935)),o=c(r(22109)),i=c(r(11226)),l=c(r(85862)),u=c(r(66220));function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}var d={right:"left",bottom:"top"},s={right:"left",top:"bottom"},p={left:"right",bottom:"top"},m={left:"right",top:"bottom"};t.default=function(e){var t=e.target,r=e.label,c=e.index,f=e.groupLabel,g=e.data,v=e.id,b=(0,n.useRef)(),h=(0,n.useRef)(),y=(0,n.useState)(m),w=y[0],O=y[1];h.current=(0,i.default)(t,b,w,"width"),(0,n.useEffect)((function(){if(null!=t&&t.getBoundingClientRect&&b.current){var e=t.getBoundingClientRect(),r=e.right,n=e.bottom,a=window.innerHeight,o=window.innerWidth,i=b.current.getBoundingClientRect(),l=i.width,u=i.height;O(function(e,t){return e&&t?d:e?s:t?p:m}(r+l>o,n+u>a))}}),[t]),(0,n.useEffect)((function(){h.current()}),[w]);var x=(0,l.default)();return a.default.createPortal(n.default.createElement(o.default,{"data-toolbox":!0,ref:b,width:{max:"100%"},column:!0,"data-testid":"drop",sx:{pointerEvents:"none"}},n.default.createElement(u.default,{"data-testid":"chartPopover",label:r,index:c,groupLabel:f,data:g,id:v})),x)}},47201:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=i(r(67294)),a=i(r(71893)),o=r(93416);function i(e){return e&&e.__esModule?e:{default:e}}var l=(0,a.default)(o.Flex).attrs({"data-testid":"chartPopover-label"}).withConfig({displayName:"label__GridRow",componentId:"sc-1j7ox7-0"})(["display:contents;"]);t.default=function(e){var t=e.label,r=e.value;return n.default.createElement(l,null,n.default.createElement(o.TextMicro,{padding:[1,0]},t),n.default.createElement(o.TextSmall,{strong:!0},(null==r?void 0:r.join(", "))||"-"))}},66220:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=c(r(71893)),o=r(93416),i=r(54576),l=r(12460),u=c(r(47201));function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}var d=(0,a.default)(o.Flex).attrs((function(e){return{round:!0,border:{side:"all",color:"elementBackground"},width:{min:"196px",max:e.maxWidth?e.maxWidth+"px":"80vw"},background:"dropdown",column:!0,padding:[4],gap:1}})).withConfig({displayName:"labels__Container",componentId:"sc-5eymlk-0"})(["box-shadow:0px 8px 12px rgba(9,30,66,0.15),0px 0px 1px rgba(9,30,66,0.31);"]),s=(0,a.default)(l.BaseColorBar).attrs({position:"absolute",top:1,left:0,backgroundOpacity:.4,round:.5}).withConfig({displayName:"labels__ColorBackground",componentId:"sc-5eymlk-1"})([""]),p=a.default.div.withConfig({displayName:"labels__Grid",componentId:"sc-5eymlk-2"})(["display:grid;width:100%;grid-template-columns:auto 2fr;column-gap:8px;align-items:center;"]),m=(0,n.forwardRef)((function(e,t){var r=e.index,a=e.label,l=e.groupLabel,c=e.data,f=e.id,m=(0,i.useChart)(),g=m.getAttribute("viewDimensions"),v=(0,i.useAttributeValue)("min"),b=(0,i.useAttributeValue)("max"),h=.9*m.getUI().getChartWidth(),y=m.getRowDimensionValue(f,c),w=(0,i.useConverted)(y,{valueKey:"percent"});return n.default.createElement(d,{"data-testid":"chartPopover-labels",maxWidth:h,gap:2,ref:t},n.default.createElement(o.Flex,{column:!0,gap:1},n.default.createElement(o.TextMicro,null,l),n.default.createElement(o.TextMicro,{strong:!0},a),n.default.createElement(o.Flex,{alignItems:"center",position:"relative"},n.default.createElement(s,{value:y,min:v,max:b,bg:m.getThemeAttribute("themeGroupBoxesMax"),height:"18px"}),n.default.createElement(o.TextMicro,{padding:[1.5,2],strong:!0},w,"-"!==w&&"%"))),!(null==g||!g.labels)&&n.default.createElement(p,{gap:1,column:!0},Object.keys(g.labels).map((function(e){var t;return n.default.createElement(u.default,{key:e,label:e,value:null==(t=g.labels[e])?void 0:t[r]})}))))}));t.default=(0,n.memo)(m)},78927:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(54576);t.default=function(e){var t=(0,n.useChart)(),r=(0,n.useForceUpdate)();return(0,n.useImmediateListener)((function(){return t.getUI(e).on("groupBoxChanged",r)}),[t]),t.getUI(e).getGroupBox()}},51653:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(54576);t.default=function(e){var t=(0,n.useChart)(),r=(0,n.useForceUpdate)();return(0,n.useImmediateListener)((function(){return t.getUI(e).on("groupBoxRowDataChanged",r)}),[t]),t.getUI(e).getGroupBoxRowData()}},31859:function(e,t){t.__esModule=!0,t.getYPosition=t.getXPosition=t.getRows=t.getOffsetPosition=t.getFullWidth=t.getFullHeight=t.getColumns=t.getCellBoxSize=t.defaultPadding=t.defaultCellSize=t.defaultAspectRatio=void 0;var r=t.defaultCellSize=17,n=t.defaultPadding=1,a=t.defaultAspectRatio=Math.round(16/9);t.getCellBoxSize=function(e,t){return void 0===e&&(e=r),void 0===t&&(t=n),e-t},t.getRows=function(e,t){return void 0===t&&(t=a),Math.sqrt(e.length/t)||1},t.getColumns=function(e,t){return void 0===t&&(t=a),e*t||1},t.getXPosition=function(e,t,n){return void 0===n&&(n=r),Math.floor(t%e)*n},t.getYPosition=function(e,t,n){return void 0===n&&(n=r),Math.floor(t/e)*n},t.getFullWidth=function(e,t){return void 0===t&&(t=r),Math.ceil(e)*t},t.getFullHeight=function(e,t,n){return void 0===t&&(t=r),void 0===n&&(n=r),Math.ceil(e)*t+n},t.getOffsetPosition=function(e,t){return void 0===t&&(t=r),Math.floor(e/t)}},79779:function(e,t,r){t.__esModule=!0,t.default=t.Container=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(54576),i=c(r(2713)),l=c(r(21040)),u=r(96808);function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),d=(0,n.useState)(""),s=d[0],p=d[1],m=(0,n.useState)(),g=m[0],v=m[1];return(0,n.useEffect)((function(){if(g){for(var e=g.offsetWidth,r=0;g.scrollWidth>e;)g.textContent=(0,a.default)(g.textContent,r),r+=1;g.textContent!==t&&p(t)}}),[t,g]),n.default.createElement(o.default,{content:!u&&s?t:"",align:"bottom",isBasic:!0},n.default.createElement(l,c({truncate:!0,ref:v},f),t))}},3640:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(71893),a=(0,n.keyframes)(["from{opacity:0.4;}to{opacity:1;}"]),o=(0,n.css)(["animation:"," 1.6s ease-in infinite;"],a);t.default=o},58802:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(67294),a=r(63840);t.default=function(){var e=(0,n.useState)(!1),t=e[0],r=e[1],o=(0,n.useRef)(),i=(0,n.useRef)(),l=(0,n.useCallback)((function(){o.current&&((0,a.unstable_cancelCallback)(o.current),o.current=void 0,r(!1)),i.current&&(i.current(),i.current=void 0)}),[]),u=(0,n.useCallback)((function(e,t){void 0===t&&(t=a.unstable_IdlePriority),l();var n=e(),u=(0,a.unstable_runWithPriority)(t,(function e(){var t=n.next();if(i.current=t.value,!t.done)return e;r(!1)}));u&&((0,a.unstable_runWithPriority)(a.unstable_ImmediatePriority,(function(){return r(!0)})),o.current=(0,a.unstable_scheduleCallback)(t,u))}),[]);return(0,n.useEffect)((function(){return l}),[]),[t,u,l]}},70486:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=c(r(67294)),a=r(54576),o=c(r(2855)),i=c(r(92767)),l=c(r(91272)),u=c(r(86954));function c(e){return e&&e.__esModule?e:{default:e}}t.default=function(e,t){void 0===t&&(t={});var r=(0,i.default)((0,o.default)((0,l.default)(e)));t.tile&&(r=(0,u.default)(r)),r=(0,a.withChartProvider)(r);return function(e){return n.default.createElement(r,e)}}},2855:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=o(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in e)if("default"!==i&&Object.prototype.hasOwnProperty.call(e,i)){var l=a?Object.getOwnPropertyDescriptor(e,i):null;l&&(l.get||l.set)?Object.defineProperty(n,i,l):n[i]=e[i]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(54576);function o(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(o=function(e){return e?r:t})(e)}function i(){return i=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(t,o),m=(0,a.useChart)();return(0,a.useImmediateListener)((function(){if(u&&(!p.uiName||"default"===p.uiName)){var e=window.requestAnimationFrame(m.activate);return function(){window.cancelAnimationFrame(e),m.deactivate()}}}),[u,m,p.uiName]),n.default.createElement(e,l({ref:r,height:f,width:s},p))}))}},92767:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var l=a?Object.getOwnPropertyDescriptor(e,o):null;l&&(l.get||l.set)?Object.defineProperty(n,o,l):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(54576);function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var j=(0,a.default)(i.Text).withConfig({displayName:"withTile__Label",componentId:"sc-1oake2r-0"})(["line-height:1;font-size:",";"],(function(e){return e.fontSize})),E=(0,a.default)(i.Flex).attrs((function(e){var t=e.size,r=_(e,g);return O({background:"panelBg",round:!0,fontSize:parseInt(t/3,10),height:"100%",width:"100%"},r)})).withConfig({displayName:"withTile__ChartHeadWrapper",componentId:"sc-1oake2r-1"})(["font-size:","px;"],(function(e){return e.fontSize>11?11:e.fontSize<8?8:e.fontSize})),P=t.Title=function(){var e=(0,d.useChart)(),t=(0,d.useTitle)();return n.default.createElement(j,{fontSize:"1em",textAlign:"center",color:"sectionDescription",width:"80%",onClick:function(t){t.preventDefault(),e.sdk.trigger("goToLink",e)},cursor:"pointer",padding:[2,0,0]},t)},k=t.HeadWrapper=function(e){var t,r=e.children,a=e.uiName,g=_(e,v),b=(0,d.useOnResize)().parentWidth,h=(0,d.useAttributeValue)("focused"),w=null==(t=(0,d.useDimensionIds)())?void 0:t[0],O=(0,o.default)(a).width,x=parseInt((b||O)/30,10);x=x<20?20:x>50?50:x;var j=(0,d.useChart)(),k=(0,f.default)({onHover:j.focus,onBlur:j.blur,isOut:function(e){return!e||!e.closest("[data-toolbox]")&&!e.closest("[data-testid=chart]")}},[j]);return n.default.createElement(E,y({size:x},g,{ref:k}),n.default.createElement(i.Flex,{column:!0,width:5,padding:[2,0]},n.default.createElement(c.default,{plain:!0}),n.default.createElement(i.Collapsible,{open:h,column:!0,width:5},n.default.createElement(s.default,{column:!0,background:"elementBackground",border:"none",justifyContent:"start",plain:!0}))),n.default.createElement(i.Flex,{column:!0,alignItems:"center",justifyContent:"center",padding:[1,0,2],height:"100%",width:"100%",position:"relative",overflow:"hidden"},n.default.createElement(P,null),r),n.default.createElement(i.Flex,{column:!0,width:5,alignItems:"center",padding:[4,0],gap:2},"selected"===w&&n.default.createElement(n.default.Fragment,null,n.default.createElement(i.Flex,{column:!0,height:"100%",width:"2px",background:"neutralHighlight",justifyContent:"end"},n.default.createElement(p.ColorBar,{id:"selected",valueKey:"arp",width:"2px",styleDimension:"height",round:.5})),n.default.createElement(m.default,{content:"Anomaly rate for this metric"},n.default.createElement(u.default,{svg:l.default,color:"anomalyTextLite",size:"14px"})))))};t.ChartWrapper=(0,a.default)(i.Flex).attrs((function(e){return O({column:!0,justifyContent:"center",alignContent:"center",gap:2,position:"relative",width:"100%",height:"100%",overflow:"hidden"},e)})).withConfig({displayName:"withTile__ChartWrapper",componentId:"sc-1oake2r-2"})([""]),t.default=function(e){return function(t){var r=t.count,a=t.tile,o=void 0===a||a,i=t.height,l=void 0===i?"100%":i,u=t.width,c=void 0===u?"100%":u,f=_(t,b);return o?n.default.createElement(k,{count:r,uiName:f.uiName,height:l,width:c},n.default.createElement(e,f)):n.default.createElement(E,{size:20,height:l,width:c},n.default.createElement(e,f))}}},44473:function(e,t,r){t.__esModule=!0,t.default=t.ContentWrapper=t.Container=void 0;var n=h(r(67294)),a=h(r(71893)),o=r(93416),i=r(54576),l=r(36657),u=v(r(58607)),c=v(r(98079)),f=v(r(44945)),d=v(r(13828)),s=v(r(35815)),p=r(29270),m=v(r(22810)),g=v(r(18839));function v(e){return e&&e.__esModule?e:{default:e}}function b(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(b=function(e){return e?r:t})(e)}function h(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=b(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function y(){return y=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var h=t.Color=(0,o.default)(i.Flex).attrs((function(e){return function(e){for(var t=1;t0?u<0?0:u:s,g=(0,l.useLatestValue)(t,{valueKey:r})||0;return a.default.createElement(y,m({value:g,min:p,max:s>f?s:f,valueKey:r,bg:i},n))},t.default=function(e){var t=e.id,r=b(e,s),n=(0,l.useChart)().selectDimensionColor(t);return n?a.default.createElement(h,m({bg:n},r)):null}},43946:function(e,t,r){t.__esModule=!0,t.default=t.Name=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(93416),i=r(54576),l=(n=r(39434))&&n.__esModule?n:{default:n},u=["children"],c=["id"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var p=t.Name=(0,a.memo)((0,a.forwardRef)((function(e,t){var r=e.children,n=s(e,u);return a.default.createElement(l.default,d({text:r,Component:o.TextMicro,color:"textDescription",whiteSpace:"nowrap",ref:t,"data-testid":"chartDimensions-name"},n))})));t.default=function(e){var t=e.id,r=s(e,c),n=(0,i.useChart)().getDimensionName(t);return a.default.createElement(p,r,n)}},9033:function(e,t,r){t.__esModule=!0,t.default=t.Value=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(54576),i=["visible"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),a=(0,o.useUnitSign)();return t?n.default.createElement(c,r,a):null};t.default=(0,n.memo)(f)},58358:function(e,t,r){t.__esModule=!0,t.default=t.Value=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(54576),i=["id","visible","valueKey","period","objKey","Component"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),v=(0,o.useConvertedValue)(r,d,{valueKey:l,objKey:s,allowNull:!0});return a?n.default.createElement(m,u({},g,{ref:t}),v):null}));t.default=f},13589:function(e,t){t.__esModule=!0,t.tabs=t.actions=void 0;t.actions={values:"values",drillDown:"drillDown",compare:"compare",correlate:"correlate"},t.tabs={window:"window",selectedArea:"selectedArea"}},25830:function(e,t,r){t.__esModule=!0,t.valueColumn=t.minColumn=t.maxColumn=t.labelColumn=t.avgColumn=t.anomalyColumn=void 0;var n=v(r(67294)),a=r(93416),o=v(r(71893)),i=g(r(12460)),l=v(r(43946)),u=v(r(9033)),c=g(r(58358)),f=r(54576),d=v(r(88811)),s=r(19151),p=["children","fractionDigits"];function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(m=function(e){return e?r:t})(e)}function g(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=m(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function v(e){return e&&e.__esModule?e:{default:e}}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,p),l=t.toString().split("."),u=l[0],f=l[1];return n.default.createElement(a.Flex,{alignItems:"center",justifyContent:"start"},n.default.createElement(c.Value,b({},i,{flex:!1,basis:3*1.6,textAlign:"right"}),u),"undefined"!==typeof f&&n.default.createElement(c.Value,i,"."),n.default.createElement(c.Value,b({as:a.Flex,flex:!1,width:1.6*o},i,{textAlign:"left"}),f))});t.valueColumn=function(){return{id:"value",header:n.default.createElement(a.TextMicro,null,"Value ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var t=e.row,r=t.original,a=(t.depth,t.getCanExpand,t.getToggleExpandedHandler,t.getIsExpanded,(0,f.useVisibleDimensionId)(r)),o=(0,f.useChart)().getAttribute("unitsConversionFractionDigits");return n.default.createElement(c.default,{period:"latest",id:r,visible:a,Component:x,fractionDigits:o})},sortingFn:"basic"}},t.anomalyColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-arp":"arp",header:n.default.createElement(a.TextMicro,null,"AR %"),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"arp",Component:x,fractionDigits:2,color:"anomalyTextFocus"})},sortingFn:"basic"}},t.minColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-min":"min",header:n.default.createElement(a.TextMicro,null,"Min ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"min",Component:x,fractionDigits:2})},sortingFn:"basic"}},t.avgColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-avg":"avg",header:n.default.createElement(a.TextMicro,null,"Avg ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"avg",Component:x,fractionDigits:2})},sortingFn:"basic"}},t.maxColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-max":"max",header:n.default.createElement(a.TextMicro,null,"Max ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"max",Component:x,fractionDigits:2})},sortingFn:"basic"}}},45275:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(93416),i=r(60773),l=r(41145),u=(n=r(1981))&&n.__esModule?n:{default:n},c=r(25830);function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function s(e){for(var t=1;t0&&{backgroundOpacity:.4}),e.depth>0&&0===r&&{border:{side:"left",size:"4px"}}),headStyles:{height:"32px"},styles:{verticalAlign:"middle"},bulkActionsStyles:{padding:[2,0]},searchContainerStyles:{width:"100%",padding:[0,2,0,2]},searchStyles:{inputContainerStyles:{height:"20px",border:{side:"all",size:"1px",color:"inputBg"},background:"inputBg",round:!0,padding:[1,2],_hover:{border:{side:"all",size:"1px",color:"borderSecondary"}}}}}};t.default=function(){var e=(0,l.useDimensionIds)(),t=function(e,t){void 0===t&&(t={});var r=(0,l.useAttributeValue)("hoverX");return(0,a.useMemo)((function(){var n=s({period:e},t),l=s(s({},n),{},{objKey:"dbDimensions",unitsKey:"dbUnits"});return[{id:"Dimensions",header:function(){return a.default.createElement(o.TextSmall,null,"Dimension (",r?"hovering":"latest"," value)")},columns:[(0,c.labelColumn)(),(0,c.valueColumn)()]},{id:"visible",header:function(){return a.default.createElement(o.TextSmall,null,(0,i.uppercase)(e)," points")},columns:[(0,c.minColumn)(n),(0,c.avgColumn)(n),(0,c.maxColumn)(n),(0,c.anomalyColumn)(n)]},{id:"aggregated",header:function(){return a.default.createElement(o.TextSmall,null,"Aggregated points")},columns:[(0,c.minColumn)(l),(0,c.avgColumn)(l),(0,c.maxColumn)(l),(0,c.anomalyColumn)(l)]}]}),[e,!!r])}((0,l.useAttributeValue)("weightsTab")),r=(0,l.useChart)();return(0,a.useMemo)((function(){return r.makeChartUI("custom","d3pie")}),[]),a.default.createElement(o.Flex,{gap:2},a.default.createElement(o.Table,{enableSorting:!0,dataColumns:t,data:e,meta:m,width:"100%"}),a.default.createElement(u.default,{chart:r,uiName:"custom"}))}},41865:function(e,t,r){t.__esModule=!0,t.valueColumn=t.minColumn=t.maxColumn=t.labelColumn=t.avgColumn=t.anomalyColumn=void 0;var n=v(r(67294)),a=r(93416),o=v(r(71893)),i=g(r(12460)),l=v(r(43946)),u=v(r(9033)),c=g(r(58358)),f=r(54576),d=v(r(88811)),s=r(19151),p=["children","fractionDigits"];function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(m=function(e){return e?r:t})(e)}function g(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=m(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function v(e){return e&&e.__esModule?e:{default:e}}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,p),l=t.toString().split("."),u=l[0],f=l[1];return n.default.createElement(a.Flex,{alignItems:"center",justifyContent:"start"},n.default.createElement(c.Value,b({},i,{flex:!1,basis:3*1.6,textAlign:"right"}),u),"undefined"!==typeof f&&n.default.createElement(c.Value,i,"."),n.default.createElement(c.Value,b({as:a.Flex,flex:!1,width:1.6*o},i,{textAlign:"left"}),f))});t.valueColumn=function(){return{id:"value",header:n.default.createElement(a.TextMicro,null,"Value ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var t=e.row,r=t.original,a=(t.depth,t.getCanExpand,t.getToggleExpandedHandler,t.getIsExpanded,(0,f.useVisibleDimensionId)(r)),o=(0,f.useChart)().getAttribute("unitsConversionFractionDigits");return n.default.createElement(c.default,{period:"latest",id:r,visible:a,Component:x,fractionDigits:o})},sortingFn:"basic"}},t.anomalyColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-arp":"arp",header:n.default.createElement(a.TextMicro,null,"AR %"),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"arp",Component:x,fractionDigits:2,color:"anomalyTextFocus"})},sortingFn:"basic"}},t.minColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-min":"min",header:n.default.createElement(a.TextMicro,null,"Min ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"min",Component:x,fractionDigits:2})},sortingFn:"basic"}},t.avgColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-avg":"avg",header:n.default.createElement(a.TextMicro,null,"Avg ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"avg",Component:x,fractionDigits:2})},sortingFn:"basic"}},t.maxColumn=function(e){var t=e.period,r=e.objKey;return{id:r?r+"-max":"max",header:n.default.createElement(a.TextMicro,null,"Max ",n.default.createElement(u.default,{visible:!0})),size:45,minSize:45,cell:function(e){var a=e.row,o=a.original,i=(a.depth,a.getCanExpand,a.getToggleExpandedHandler,a.getIsExpanded,(0,f.useVisibleDimensionId)(o));return n.default.createElement(c.default,{period:t,objKey:r,textAlign:"right",id:o,visible:i,valueKey:"max",Component:x,fractionDigits:2})},sortingFn:"basic"}}},65190:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(60773),i=r(41145),l=r(41865);function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function c(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function f(e){for(var t=1;t0&&{backgroundOpacity:.4}),e.depth>0&&0===r&&{border:{side:"left",size:"4px"}}),headStyles:{height:"32px"},bulkActionsStyles:{padding:[2,0]},searchContainerStyles:{width:"100%",padding:[0,2,0,2]},searchStyles:{inputContainerStyles:{height:"20px",border:{side:"all",size:"1px",color:"inputBg"},background:"inputBg",round:!0,padding:[1,2],_hover:{border:{side:"all",size:"1px",color:"borderSecondary"}}}}}};t.default=function(){var e=(0,i.useDimensionIds)(),t=function(e,t){void 0===t&&(t={});var r=(0,i.useAttributeValue)("hoverX");return(0,n.useMemo)((function(){var i=f({period:e},t),u=f(f({},i),{},{objKey:"dbDimensions",unitsKey:"dbUnits"});return[{id:"Dimensions",header:function(){return n.default.createElement(a.TextSmall,null,"Dimension (",r?"hovering":"latest"," value)")},columns:[(0,l.labelColumn)(),(0,l.valueColumn)()]},{id:"visible",header:function(){return n.default.createElement(a.TextSmall,null,(0,o.uppercase)(e)," points")},columns:[(0,l.minColumn)(i),(0,l.avgColumn)(i),(0,l.maxColumn)(i),(0,l.anomalyColumn)(i)]},{id:"aggregated",header:function(){return n.default.createElement(a.TextSmall,null,"Aggregated points")},columns:[(0,l.minColumn)(u),(0,l.avgColumn)(u),(0,l.maxColumn)(u),(0,l.anomalyColumn)(u)]}]}),[e,!!r])}((0,i.useAttributeValue)("weightsTab"));return n.default.createElement(a.Flex,null,n.default.createElement(a.Table,{enableSorting:!0,dataColumns:t,data:e,meta:s,width:"100%"}))}},30483:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=u(r(67294)),a=(u(r(71893)),r(93416)),o=r(54576),i=r(13589),l=["onClick"];function u(e){return e&&e.__esModule?e:{default:e}}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,l),r=(0,o.useChart)(),u=(0,o.useAttributeValue)("weightsAction"),f=(0,o.useAttributeValue)("weightsTab");return n.default.createElement(a.Flex,c({justifyContent:"between"},t),n.default.createElement(a.Flex,{gap:6},n.default.createElement(a.Flex,{gap:1},n.default.createElement(a.Button,{tiny:!0,neutral:i.actions.values!==u,icon:"line_chart",onClick:function(){return r.updateAttribute("weightsAction",i.actions.values)}}),n.default.createElement(a.Button,{tiny:!0,neutral:i.actions.drillDown!==u,icon:"weights_drill_down",onClick:function(){return r.updateAttribute("weightsAction",i.actions.drillDown)}}),n.default.createElement(a.Button,{tiny:!0,neutral:i.actions.compare!==u,icon:"weights_compare",onClick:function(){return r.updateAttribute("weightsAction",i.actions.compare)}}),n.default.createElement(a.Button,{tiny:!0,neutral:i.actions.correlate!==u,icon:"correlation_inv",onClick:function(){return r.updateAttribute("weightsAction",i.actions.correlate)}})),n.default.createElement(a.Flex,{gap:1},n.default.createElement(a.Button,{tiny:!0,neutral:i.tabs.window!==f,label:"Window",onClick:function(){return r.updateAttribute("weightsTab",i.tabs.window)}}),n.default.createElement(a.Button,{tiny:!0,neutral:i.tabs.selectedArea!==f,label:"Selected area",disabled:!0,onClick:function(){return r.updateAttribute("weightsTab",i.tabs.selectedArea)}}))))}},50657:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),o=r(93416),i=r(54576),l=f(r(30483)),u=f(r(45275)),c=(f(r(65190)),r(13589));function f(e){return e&&e.__esModule?e:{default:e}}function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}var s=((n={})[c.actions.values]=u.default,n[c.actions.drillDown]=u.default,n[c.actions.compare]=u.default,n[c.actions.correlate]=u.default,n);t.default=function(){var e=(0,i.useAttributeValue)("expandedHeight"),t=(0,i.useAttributeValue)("weightsAction"),r=(0,a.useMemo)((function(){return s[t]||s.values}),[t]);return a.default.createElement(o.Flex,{height:e+"px",column:!0,gap:2,padding:[4]},a.default.createElement(l.default,null),a.default.createElement(r,null))}},58768:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=c(r(67294)),a=c(r(71893)),o=r(93416),i=c(r(59884)),l=r(54576),u=c(r(3701));function c(e){return e&&e.__esModule?e:{default:e}}function f(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function d(e,t,r){return(t=function(e){var t=function(e,t){if("object"!==typeof e||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===typeof t?t:String(t)}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var s=(0,a.default)(o.Flex).attrs((function(e){return function(e){for(var t=1;t *{color:",";fill:",";}"],(0,o.getColor)("text"),(0,o.getColor)("text"));t.default=function(){var e=(0,l.useChart)(),t=(0,l.useAttributeValue)("expanded");return n.default.createElement(s,{cursor:"pointer",onClick:function(){return e.updateAttribute("expanded",!t)},alignSelf:"center"},n.default.createElement(u.default,{svg:i.default,color:"textLite",width:"7.5px",height:"5px",rotate:t?2:0}),n.default.createElement(o.TextSmall,{color:"textLite"},t?"Collapse":"Expand"),n.default.createElement(u.default,{svg:i.default,color:"textLite",width:"7.5px",height:"5px",rotate:t?2:0}))}},13289:function(e,t,r){t.__esModule=!0,t.default=t.Container=void 0;var n=p(r(67294)),a=r(93416),o=p(r(817)),i=p(r(39757)),l=p(r(38566)),u=r(41145),c=p(r(78122)),f=p(r(50657)),d=p(r(58768)),s=r(93501);function p(e){return e&&e.__esModule?e:{default:e}}function m(){return m=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,m),x=(0,l.useChart)(),_=(0,l.useAttributeValue)("showingInfo"),j=(0,l.useAttributeValue)("sparkline"),E=(0,o.default)({onHover:x.focus,onBlur:x.blur,isOut:function(e){return!e||!e.closest("[data-toolbox]")&&!e.closest("[data-testid=chart]")}},[x]),P=(0,a.default)((function(e){E.current=e,t&&(t.current=e)}))[1];return n.default.createElement(s.default,h({ref:P},j&&{border:!1},O),i&&n.default.createElement(u.default,null),y&&n.default.createElement(d.default,null),n.default.createElement(f.ContentWrapper,null,_?n.default.createElement(c.default,null):n.default.createElement(f.default,{uiName:w})),v&&n.default.createElement(p.default,null))}));t.ZP=(0,i.default)(y)},78122:function(e,t,r){t.__esModule=!0,t.getDateDiff=t.default=void 0;var n,a=(n=r(67294))&&n.__esModule?n:{default:n},o=r(93416),i=r(54576);function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=p&&!!n}},{value:o,unit:"h",check:function(){return r>=m&&!!o}},{value:i,unit:"m",check:function(){return!!i}},{value:a-=60*i,unit:"s",check:function(){return r9?t:"0"+t:r)+n}(c(c({},t),{},{hasPrev:!!e[e.length-1]}));return r&&e.push(r),e}),[])},v=function(e){var t=e.date,r=e.after,n=e.before,l=(0,i.useFormatTime)(1e3*r),u=(0,i.useFormatTime)(1e3*n),c=g(r,n);return a.default.createElement(o.Flex,{gap:1},a.default.createElement(o.TextNano,{color:"textDescription"},t," \u2022"),a.default.createElement(o.TextNano,{color:"textLite"},l," \u2192 ",u),a.default.createElement(o.TextNano,{color:"textDescription"},"\u2022 ",c))},b=function(e){var t=e.afterDate,r=e.beforeDate,n=e.after,l=e.before,u=(0,i.useFormatTime)(1e3*n),c=(0,i.useFormatTime)(1e3*l),f=g(n,l);return a.default.createElement(o.Flex,{gap:1},a.default.createElement(o.TextNano,{color:"textDescription"},t," \u2022"),a.default.createElement(o.TextNano,{color:"textLite"},u," \u2192"),a.default.createElement(o.TextNano,{color:"textDescription"},r," \u2022"),a.default.createElement(o.TextNano,{color:"textLite"},c),a.default.createElement(o.TextNano,{color:"textDescription"},"\u2022 ",f))},h=function(e){var t=e.after,r=e.before,n=(0,i.useFormatDate)(1e3*r),o=(0,i.useFormatDate)(1e3*t);return n===o?a.default.createElement(v,{date:o,after:t,before:r}):a.default.createElement(b,{afterDate:o,beforeDate:n,after:t,before:r})};t.default=function(e){var t,r=(0,i.useChart)(),n=(0,i.useAttributeValue)("overlays").highlight,u=null==n?void 0:n.range,c=null!=(t=null==n?void 0:n.moveX)?t:{},f=c.after,d=c.before;return a.default.createElement(o.Flex,l({padding:[1],gap:1,justifyContent:"between",flex:!0},e),u?a.default.createElement(o.Flex,{onClick:function(){d&&f&&r.moveX(f,d)},cursor:"pointer",gap:1,padding:[0,11,0]},a.default.createElement(o.TextNano,{color:"textLite"},"Highlight:"),a.default.createElement(h,{after:u[0],before:u[1]})):a.default.createElement("div",null),a.default.createElement(s,null))}},3171:function(e,t,r){t.__esModule=!0,t.default=t.SkeletonDimension=t.EmptyDimension=void 0;var n,a=g(r(67294)),o=r(71893),i=r(93416),l=g(r(12460)),u=g(r(43946)),c=g(r(58358)),f=g(r(81743)),d=(n=r(9033))&&n.__esModule?n:{default:n},s=r(54576),p=r(93501);function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(m=function(e){return e?r:t})(e)}function g(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=m(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function v(){return v=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),l=(0,i.useAttributeValue)(t),f=(0,i.useChart)();return r&&(l=r(l,f.getAttributes())),l?n.default.createElement(u.default,{content:l},n.default.createElement(s,d({color:"key",text:l,Component:o.TextSmall},a))):null}},59204:function(e,t,r){t.__esModule=!0,t.default=t.alignment=void 0;var n,a,o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),i=(n=r(71893))&&n.__esModule?n:{default:n},l=r(93416),u=r(54576),c=["id","align","right","fixed","children","uiName"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),v=(0,o.useRef)(),b=(0,o.useState)(),h=b[0],y=b[1],w=(0,u.useChart)(),O=function(e){if(w&&w.getUI(f)&&e&&v.current){var t=function(e,t,r,n,a){void 0===e&&(e=s.elementMiddle);var o=r.from,i=r.width,l=t.getUI(a).getChartWidth();return(p[e]||p.elementMiddle)({from:o,width:i,chartWidth:l,element:n})}(r,w,e,v.current,f),n=t[1];v.current.style.right="calc(100% - "+(n+a)+"px)"}};return(0,o.useLayoutEffect)((function(){return!i&&w.getUI(f).on("overlayedAreaChanged:"+t,(function(e){O(e),y((function(t){return!!t!==!!e?e:t}))}))}),[]),(0,o.useLayoutEffect)((function(){return!i&&O(h)}),[h]),h||i?o.default.createElement(m,d({ref:v},g),l):null};t.default=(0,o.memo)(g)},10253:function(e,t,r){t.__esModule=!0,t.default=t.Period=void 0;var n=s(r(67294)),a=s(r(71893)),o=r(93416),i=s(r(99141)),l=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(3701)),u=s(r(81743)),c=r(54576),f=r(78122);function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(e){return e&&e.__esModule?e:{default:e}}var p=(0,a.default)(l.Button).withConfig({displayName:"correlation__CorrelationButton",componentId:"sc-a0l0u6-0"})(["pointer-events:all;"]);t.Period=function(e){var t=e.id,r=(0,c.useAttributeValue)("overlays")[t].range,a=r[0],i=r[1],l=(0,f.getDateDiff)(a,i);return n.default.createElement(o.TextNano,{strong:!0},l)},t.default=function(e){var t=e.id,r=(0,c.useAttributeValue)("overlays")[t].range,a=r[0],f=function(e){return e<15?"requires 15 secs minimum selection":""}(r[1]-a),d=(0,c.useChart)();return n.default.createElement(u.default,{content:f?"Metrics correlation: "+f:"Run metrics correlation"},n.default.createElement(o.Flex,null,n.default.createElement(p,{"data-track":d.track("metrics-correlation"),icon:n.default.createElement(l.default,{svg:i.default,size:"20px"}),onClick:function(){return d.sdk.trigger("correlation",d,r)},"data-testid":"highlight-correlations",disabled:!!f})))}},9745:function(e,t,r){t.__esModule=!0,t.default=t.Divider=void 0;var n,a=f(r(67294)),o=(n=r(71893))&&n.__esModule?n:{default:n},i=r(93416),l=f(r(10253)),u=r(54576);function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(c=function(e){return e?r:t})(e)}function f(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=c(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(n,l),f=i.default[o];return a.default.createElement(f,c({key:e,id:e,uiName:t},u))})))};t.default=(0,a.memo)(f)},51399:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=u(r(67294)),a=u(r(71893)),o=r(93416),i=r(54576),l=["dimensionId","textProps"];function u(e){return e&&e.__esModule?e:{default:e}}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,l),o=(0,i.useUnitSign)(),u=(0,i.useLatestConvertedValue)(t);return u?n.default.createElement(d,c({column:!0},a),n.default.createElement(f,c({fontSize:"2.1em",lineHeight:"1.1em",strong:!0},s,r),u),n.default.createElement(f,c({fontSize:"1.1em",strong:!0},s,{color:"textLite"},r),o)):n.default.createElement(f,c({fontSize:"2.5em",strong:!0},s,r,a),"string"!==typeof u?"Loading...":"No data")}},29270:function(e,t,r){t.__esModule=!0,t.default=t.Processing=void 0;var n=u(r(67294)),a=u(r(71893)),o=r(93416),i=r(54576),l=["defaultValue","uiName"];function u(e){return e&&e.__esModule?e:{default:e}}var c=(0,a.default)(o.Flex).attrs({column:!0,round:!0,border:{side:"all",color:"borderSecondary"},gap:1,padding:[1,2],flex:!1}).withConfig({displayName:"proceeded__ProceededContainer",componentId:"sc-ac5spo-0"})(["direction:initial;"]),f=function(e){var t=e.defaultValue,r=e.uiName,a=function(e,t){if(null==e)return{};var r,n,a={},o=Object.keys(e);for(n=0;n=0||(a[r]=e[r]);return a}(e,l),u=(0,i.useChart)().getUI(r).getChartWidth(),f=(0,i.useChartError)();return u<240?null:f&&t?n.default.createElement(c,a,n.default.createElement(o.Text,{textAlign:"center",textTransform:"firstLetter"},f||t)):null},d=(0,a.default)(o.Flex).withConfig({displayName:"proceeded__CenterContainer",componentId:"sc-ac5spo-1"})(["z-index:60000;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);"]);t.Processing=function(){return n.default.createElement(d,null,n.default.createElement(f,{defaultValue:"Processing"}))},t.default=f},67148:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=w(r(67294)),a=w(r(71893)),o=w(r(47088)),i=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=y(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(59204)),l=w(r(13247)),u=w(r(55823)),c=w(r(9745)),f=w(r(29270)),d=w(r(23708)),s=w(r(51399)),p=r(54576),m=["id"],g=["id"],v=["id"],b=["id","uiName"],h=["containerProps"];function y(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(y=function(e){return e?r:t})(e)}function w(e){return e&&e.__esModule?e:{default:e}}function O(){return O=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var _=(0,a.default)(o.default).withConfig({displayName:"types__NoEventsContainer",componentId:"sc-19vz950-0"})(["pointer-events:none;"]);t.default={alarm:function(e){var t=e.id,r=x(e,m);return n.default.createElement(i.default,O({id:t,top:"20px",margin:[0,2,0,0],align:i.alignment.elementLeft},r),n.default.createElement(l.default,{id:t}))},alarmRange:function(e){var t=e.id,r=x(e,g);return n.default.createElement(i.default,O({id:t,top:"20px",margin:[0,2,0,0],align:i.alignment.elementLeft},r),n.default.createElement(u.default,{id:t}))},highlight:function(e){var t=e.id,r=x(e,v);return(0,p.useAttributeValue)("sparkline")?null:n.default.createElement(i.default,O({id:t,align:i.alignment.elementRight,bottom:"25px",right:100},r,{noEvents:!0}),n.default.createElement(c.default,{id:t}))},proceeded:function(e){var t=e.id,r=e.uiName,a=x(e,b);return n.default.createElement(i.default,O({id:t,top:"50%",align:i.alignment.chartMiddle,uiName:r},a),n.default.createElement(f.default,{id:t,uiName:r}))},name:function(e){var t=e.containerProps,r=x(e,h);return n.default.createElement(_,O({isAbsolute:!0,position:"top",margin:[2,0,0,0]},t),n.default.createElement(d.default,r))},latestValue:function(e){return n.default.createElement(_,{isAbsolute:!0,position:"center"},n.default.createElement(s.default,e))}}},32931:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=b(r(67294)),a=b(r(71893)),o=r(93416),i=v(r(12460)),l=b(r(43946)),u=v(r(58358)),c=r(54576),f=r(35890),d=r(93501),s=r(19151),p=["children","fractionDigits"],m=["children","showFull"];function g(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(g=function(e){return e?r:t})(e)}function v(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=g(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function b(e){return e&&e.__esModule?e:{default:e}}function h(){return h=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var w=(0,a.default)(o.Flex).attrs({position:"relative","data-testid":"chartPopover-dimension",padding:[1,0]}).withConfig({displayName:"dimension__GridRow",componentId:"sc-adzl1v-0"})(["display:contents;"]),O=(0,a.default)(i.ColorBar).attrs({position:"absolute",top:1,left:0,backgroundOpacity:.4,round:.5}).withConfig({displayName:"dimension__ColorBackground",componentId:"sc-adzl1v-1"})([""]),x={ANOMALY_RATE:"arp",default:"value"},_=function(e){var t=e.children,r=e.fractionDigits,a=void 0===r?0:r,i=y(e,p),l=t.toString().split("."),c=l[0],f=l[1];return n.default.createElement(o.Flex,{alignItems:"center",justifyContent:"end"},n.default.createElement(u.Value,h({},i,{textAlign:"right"}),c),"undefined"!==typeof f&&n.default.createElement(u.Value,i,"."),n.default.createElement(u.Value,h({as:o.Flex,flex:!1,width:1.6*a},i,{textAlign:"left"}),f))},j=function(e){var t=e.children,r=e.showFull,a=y(e,m);return n.default.createElement(o.Flex,{gap:1,justifyContent:"end"},Object.keys(t).map((function(e){return n.default.createElement(o.Flex,{key:e,border:{size:"1px",side:"all",color:t[e]},round:!0,flex:!1,padding:[0,.5]},n.default.createElement(u.Value,h({},a,{color:t[e]}),r&&f.labels[e]||e))})))};t.default=function(e){var t=e.id,r=e.strong,a=e.rowFlavour,f=(0,c.useVisibleDimensionId)(t),p=(0,c.useChart)().getAttribute("unitsConversionFractionDigits"),m=(0,d.useIsHeatmap)();return n.default.createElement(w,{opacity:f?null:"weak"},n.default.createElement(o.Flex,{alignItems:"center",gap:1,position:"relative"},n.default.createElement(O,{id:t,valueKey:x[a]||x.default,height:"18px"},!m&&n.default.createElement(i.default,{id:t})),n.default.createElement(l.default,{padding:[1,2],flex:!0,id:t,strong:r,noTooltip:!0,color:r?"textFocus":"text"})),n.default.createElement(u.default,{id:t,strong:r,visible:f,Component:_,fractionDigits:p,color:a===s.rowFlavours.default?r?"textFocus":"text":"textLite"}),n.default.createElement(u.default,{id:t,strong:r,visible:f,valueKey:"arp",Component:_,fractionDigits:2,color:a===s.rowFlavours.ANOMALY_RATE?"anomalyTextFocus":"anomalyText"}),n.default.createElement(u.default,{textAlign:"right",id:t,strong:r,visible:f,valueKey:"pa",Component:j,color:a===s.rowFlavours.ANNOTATIONS?r?"textFocus":"text":"textLite",showFull:a===s.rowFlavours.ANNOTATIONS}))}},19151:function(e,t,r){t.__esModule=!0,t.rowFlavours=t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=d(r(71893)),o=r(93416),i=r(54576),l=d(r(9033)),u=d(r(8051)),c=d(r(60616)),f=d(r(32931));function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}var p=(0,a.default)(o.Flex).attrs({round:!0,width:{min:"196px",max:"80vw"},background:"dropdown",column:!0,padding:[4],gap:1}).withConfig({displayName:"dimensions__Container",componentId:"sc-172on4u-0"})(["box-shadow:0px 8px 12px rgba(9,30,66,0.15),0px 0px 1px rgba(9,30,66,0.31);"]),m=a.default.div.withConfig({displayName:"dimensions__Grid",componentId:"sc-172on4u-1"})(["display:grid;width:100%;grid-template-columns:minmax(150px,max-content) 60px 60px minmax(80px,auto);align-items:center;"]),g=a.default.div.withConfig({displayName:"dimensions__GridHeader",componentId:"sc-172on4u-2"})(["display:contents;"]),v=[null,null],b=function(){var e=Math.floor((window.innerHeight-500)/15)||16;return e<5?5:10},h=function(){return b()/2},y=t.rowFlavours={ANOMALY_RATE:"ANOMALY_RATE",ANNOTATIONS:"ANNOTATIONS",default:"VALUE"},w={ANOMALY_RATE:"anomalyDesc",ANNOTATIONS:"annotationsDesc",default:"valueDesc"},O=function(e){var t=e.uiName,r=(0,i.useChart)(),a=(0,i.useAttributeValue)("hoverX")||v,d=a[0],s=a[1],O=(0,n.useMemo)((function(){var e=r.getClosestRow(d),t=r.onHoverSortDimensions(e,w[s]||w.default)||[];r.getAttribute("selectedDimensions").length>0&&(t=t.filter((function(e){return r.isDimensionVisible(e)})));var n=t.findIndex((function(e){return e===s})),a=t.length,o=Math.floor(function(e,t){return ee-h()?t-(h()+(e-t)):t-h()}(a,n)),i=Math.ceil(function(e,t){return ee-h()?e:t+h()}(a,n));return[o,i,a,t.slice(o,i)]}),[r,s,d]),x=O[0],_=O[1],j=O[2],E=O[3],P=(0,i.useOnResize)(t),k=(P.parentWidth,P.width,y[s]||y.default);return n.default.createElement(p,{"data-testid":"chartPopover-dimensions",gap:2},n.default.createElement(o.Flex,{column:!0,gap:1},d&&n.default.createElement(c.default,{value:d}),n.default.createElement(u.default,null)),n.default.createElement(o.Flex,{flex:!1,height:3},x>0&&n.default.createElement(o.TextNano,{color:"textLite"},"\u2191",x," more values")),n.default.createElement(m,{gap:1,column:!0},n.default.createElement(g,null,n.default.createElement(o.TextMicro,{strong:!0},"Dimension"),n.default.createElement(o.TextMicro,{color:k===y.default?"text":"textLite",textAlign:"right"},"Value","heatmap"!==r.getAttribute("chartType")&&n.default.createElement(n.default.Fragment,null," ",n.default.createElement(l.default,{visible:!0,strong:k===y.default,color:k===y.default?"text":"textLite"}))),n.default.createElement(o.TextMicro,{strong:k===y.ANOMALY_RATE,color:k===y.ANOMALY_RATE?"text":"textLite",textAlign:"right"},"AR %"),n.default.createElement(o.TextMicro,{strong:k===y.ANNOTATIONS,color:k===y.ANNOTATIONS?"text":"textLite",textAlign:"right"},"Info")),E.map((function(e){return n.default.createElement(f.default,{key:e,id:e,strong:s===e,rowFlavour:k})}))),n.default.createElement(o.Flex,{flex:!1,height:3},_o,n+u>a))}}})),r.getUI(t).on("mouseout",(function(){return j(!1)})),r.onAttributeChange("panning",(function(e){return e&&j(!1)})),r.onAttributeChange("highlighting",(function(e){return e&&j(!1)})))}),[r]);var M=(0,u.default)();return _?n.default.createElement(n.Fragment,null,n.default.createElement(a.Flex,{ref:function(e){return y(e)},position:"absolute"}),o.default.createPortal(n.default.createElement(i.default,{"data-toolbox":!0,margin:[P.top?2:-2,P.right?-2:2],ref:s,width:{max:"100%"},column:!0,"data-testid":"drop",sx:{pointerEvents:"none"}},n.default.createElement(d.default,{uiName:t,"data-testid":"chartPopover"})),M)):null}},60616:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var l=a?Object.getOwnPropertyDescriptor(e,o):null;l&&(l.get||l.set)?Object.defineProperty(n,o,l):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(54576);function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}t.default=function(e){var t=e.value,r=(0,o.useChart)(),i=(0,n.useMemo)((function(){return r.formatDate(t)+" \u2022 "+r.formatTime(t)}),[t]);return n.default.createElement(a.TextMicro,{color:"textDescription","data-testid":"chartPopover-timestamp"},i)}},8051:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var l=a?Object.getOwnPropertyDescriptor(e,o):null;l&&(l.get||l.set)?Object.defineProperty(n,o,l):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=r(93416),o=r(54576);function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}t.default=function(){var e=(0,o.useAttributeValue)("viewUpdateEvery"),t=(0,o.useAttributeValue)("updateEvery"),r=(0,o.useAttributeValue)("groupingMethod");return n.default.createElement(n.Fragment,null,n.default.createElement(a.Flex,{gap:1,"data-testid":"chartPopover-collection"},n.default.createElement(a.TextMicro,{color:"textLite"},"Granularity:"),n.default.createElement(a.TextMicro,{color:"textDescription"},t,"s")),e!==t&&n.default.createElement(a.Flex,{gap:1,"data-testid":"chartPopover-collection"},n.default.createElement(a.TextMicro,{color:"textLite"},"View point:"),n.default.createElement(a.TextMicro,{color:"textDescription"},r," ",e,"s")))}},21040:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=(n=r(67294))&&n.__esModule?n:{default:n},o=r(93416);t.default=function(e){var t=e.disabled;return a.default.createElement(o.Flex,{width:"1px",background:t?"disabled":"borderSecondary"})}},13828:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=d(r(67294)),a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(71893)),o=r(93416),i=r(54576),l=d(r(3701)),u=d(r(41330)),c=["height"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(e){return e&&e.__esModule?e:{default:e}}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),l=(0,i.useLoadingColor)();return n.default.createElement(o.Flex,s({flex:!0,padding:[0,0,0,10]},a),n.default.createElement(m,{color:l,height:r}))}},44945:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=b(r(67294)),a=g(r(71893)),o=r(93416),i=g(r(14232)),l=g(r(80550)),u=g(r(11743)),c=g(r(87609)),f=g(r(57348)),d=b(r(3701)),s=r(54576),p=g(r(14821)),m=g(r(13319));function g(e){return e&&e.__esModule?e:{default:e}}function v(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(v=function(e){return e?r:t})(e)}function b(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=v(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function h(){return h=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,d),m=s.icon,g=s.value,v=s.title;return n.default.createElement(a.Flex,h({ref:t,alignItems:"end"},p),n.default.createElement(f.Button,{icon:m,title:v,active:r===g,onClick:function(){return o(g)},padding:"2px",small:!0}),n.default.createElement(f.Button,{icon:n.default.createElement(f.default,{svg:c?l.default:u.default,size:"12px"}),onClick:i,padding:"2px",stroked:!0,small:!0}))})),w=function(e){var t=e.onItemClick,r=e.items[0],o=r.icon,i=r.value,l=r.title;return n.default.createElement(a.Flex,{background:"dropdown",round:{side:"bottom"},border:{side:"bottom",color:"borderSecondary"},padding:[1,0],"data-toolbox":!0},n.default.createElement(f.Button,{title:l,icon:o,onClick:function(){return t(i)},padding:"2px",small:!0}))},O=function(){var e=(0,c.useChart)(),t=(0,c.useAttribute)("navigation"),r=t[0],l=t[1],u=function(e){return(0,n.useMemo)((function(){return[{value:"select",title:"Select and zoom",icon:n.default.createElement(f.default,{svg:o.default,size:"16px"}),"data-track":e.track("selectHorizontal")},{value:"selectVertical",title:"Select vertical and zoom",icon:n.default.createElement(f.default,{svg:i.default,size:"16px"}),"data-track":e.track("selectVertical")}]}),[e])}(e),d=(0,n.useMemo)((function(){return u.reduce((function(e,t){return t.value===r?v(v({},e),{},{selectedItem:t}):v(v({},e),{},{remainingItems:[t]})}),{selectedItem:u[0],remainingItems:[]})}),[r]),s=d.selectedItem,p=d.remainingItems;return n.default.createElement(a.Menu,{value:r,onChange:l,items:p,Dropdown:w,"data-track":"select"},n.default.createElement(y,{value:r,onChange:l,item:s}))};t.default=(0,n.memo)(O)},18892:function(e,t,r){t.ZP=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(67294)),a=d(r(71893)),o=r(93416),i=d(r(58607)),l=r(54576),u=d(r(70486)),c=r(86954),f=["uiName"];function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,f);return n.default.createElement(c.ChartWrapper,{ref:t},n.default.createElement(i.default,p({uiName:r,column:!0,alignItems:"center",justifyContent:"center",position:"relative"},a),n.default.createElement(g,null),n.default.createElement(v,null)))}));t.ZP=(0,u.default)(b,{tile:!0})},96808:function(e,t,r){t.__esModule=!0,t.default=t.Title=void 0;var n,a=(n=r(67294))&&n.__esModule?n:{default:n},o=r(93416),i=r(54576);function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i))})).withConfig({displayName:"container__Container",componentId:"sc-l6u9ms-0"})(["left:-99999px;"," "," "," backface-visibility:hidden;perspective:1000;transform:translate3d(0,0,0);will-change:left,top,transform;"],(function(e){return e.animation&&d}),(function(e){return!e.hideShadow&&"box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15);"}),(function(e){return!!e.noEvents&&"pointer-events: none;"}));t.default=s},11226:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(67294),a=function e(t,r,n,a){void 0===a&&(a=!0);var o=function(e,t,r){return"left"===e.left?t.left:"right"===e.left?t.right:"right"===e.right?t.right-r.width:"left"===e.right?t.left-r.width:t.left+t.width/2-r.width/2}(t,r,n),i=Math.max(0,o);return o=Math.min(window.innerWidth-n.width,i),a||i===o?o:e(function(e){return"left"===e.left?{right:"right"}:"right"===e.left?{right:"left"}:"right"===e.right?{left:"left"}:"left"===e.right?{left:"right"}:void 0}(t),r,n)},o=function e(t,r,n,a){void 0===a&&(a=!0);var o=function(e,t,r){if("top"===e.top)return t.top;if("bottom"===e.top)return t.bottom;if("bottom"===e.bottom)return t.bottom-r.height;if("top"===e.bottom){var n=t.top-r.height;return n<0&&t.bottom+r.height4)return a;var r=t.map((function(t){return(0,n.getDimension)(e,t)}));return 1===r.length?{top:r[0],right:r[0],bottom:r[0],left:r[0]}:2===r.length?{top:r[0],right:r[1],bottom:r[0],left:r[1]}:3===r.length?{top:r[0],right:r[1],bottom:r[2],left:r[1]}:{top:r[0],right:r[1],bottom:r[2],left:r[3]}}},80539:function(e,t){t.__esModule=!0,t.default=void 0;var r={start:"flex-start",center:"center",end:"flex-end",between:"space-between",around:"space-around",stretch:"stretch"};t.default=function(e){var t=e.alignContent;return t in r?"align-content: "+r[t]+";":""}},51287:function(e,t){t.__esModule=!0,t.default=void 0;var r={start:"flex-start",center:"center",end:"flex-end",baseline:"baseline",stretch:"stretch"};t.default=function(e){var t=e.alignItems;return t in r?"align-items: "+r[t]+";":""}},12904:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(52612);t.default=function(e){var t=e.theme,r=e.background,a=e.backgroundOpacity;if(!r)return"";var o=a?(0,n.getRgbColor)(r,a)({theme:t}):(0,n.getColor)(r)({theme:t});return o&&"background-color: "+o+";"}},96327:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(52612);function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t *:not(:last-child) {\n margin-"+i+": "+t*r+"px;\n }\n "}},57971:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,r=e.height;if("object"===typeof r){var n=r.min,a=void 0===n?"":n,o=r.max,i=void 0===o?"":o;return"\n "+(a&&"min-height: "+("number"===typeof a?t*a+"px":a)+";")+"\n "+(i&&"max-height: "+("number"===typeof i?t*i+"px":i)+";")+"\n "}return r&&"height: "+("number"===typeof r?t*r+"px":r)+";"}},43881:function(e,t){t.__esModule=!0,t.default=void 0;var r={start:"flex-start",center:"center",end:"flex-end",between:"space-between",around:"space-around",evenly:"space-evenly",stretch:"stretch"};t.default=function(e){var t=e.justifyContent;return t in r?"justify-content: "+r[t]+";":""}},26529:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.overflow;if(!t)return"";if("string"===typeof t)return"overflow: "+t+";";var r=t.vertical,n=void 0===r?"":r,a=t.horizontal,o=void 0===a?"":a;return"\n "+(n&&"overflow-y: "+n+";")+"\n "+(o&&"overflow-x: "+o+";")+"\n "}},46275:function(e,t,r){t.__esModule=!0,t.pseudoSelectors=t.default=t.calculateStyles=void 0;var n=f(r(12904)),a=f(r(96327)),o=f(r(74465)),i=r(52612),l=f(r(51287)),u=["theme"],c=["theme"];function f(e){return e&&e.__esModule?e:{default:e}}function d(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function s(e){for(var t=1;t=0||(a[r]=e[r]);return a}var g={boxShadow:o.default,border:a.default,background:n.default,color:function(e){var t=e.theme,r=e.color;return r?"color: "+(0,i.getColor)(r)({theme:t})+";":""},alignItems:l.default},v=t.calculateStyles=function(e){var t=e.theme,r=m(e,u),n="";for(var a in r)if(void 0===g[a]){n=n+(a+":")+r[a]+";"}else{var o=g[a];n+=o&&"function"===typeof o?o(s({theme:t},r)):""}return n},b=t.pseudoSelectors={_before:"&::before",_after:"&::after",_hover:"&:hover, &[data-hover]",_active:"&:active, &[data-active]",_focus:"&:focus, &[data-focus]",_focusWithin:"&:focus-within",_visited:"&:visited",_empty:"&:empty",_even:"&:nth-of-type(even)",_odd:"&:nth-of-type(odd)",_disabled:"&[disabled], &[aria-disabled=true], &[data-disabled]",_checked:"&[aria-checked=true]",_mixed:"&[aria-checked=mixed]",_selected:"&[aria-selected=true], [data-selected] > &",_invalid:"&[aria-invalid=true]",_pressed:"&[aria-pressed=true]",_readOnly:"&[aria-readonly=true], &[readonly]",_first:"&:first-of-type",_last:"&:last-of-type",_expanded:"&[aria-expanded=true]",_grabbed:"&[aria-grabbed=true]",_notFirst:"&:not(:first-of-type)",_notLast:"&:not(:last-of-type)",_groupHover:"[role=group]:hover &",_autofill:"&:-webkit-autofill",_placeholder:"&::placeholder"};t.default=function(e){var t=e.theme,r=m(e,c),n="";for(var a in r)if(a in b){var o=a,i=r[o],l=v(s({theme:t},i));n=n+"\n "+b[o]+"{ \n "+l+" \n }"}return n.replace(/^(?=\n)$|^\s*|\s*$|\n\n+/gm,"")}},74465:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(52612);t.default=function(e){var t=e.boxShadow,r=e.theme;if(!t)return"";var a=t.color?(0,n.getColor)(t.color)({theme:r}):"",o=t.size?t.size:"";return o?"box-shadow:"+o+" "+a+";":""}},46905:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,r=e.width;if("object"===typeof r){var n=r.min,a=void 0===n?"":n,o=r.max,i=void 0===o?"":o,l=r.base,u=void 0===l?"":l;return"\n "+(a&&"min-width: "+("number"===typeof a?t*a+"px":a)+";")+"\n "+(i&&"max-width: "+("number"===typeof i?t*i+"px":i)+";")+"\n "+(u&&"width: "+("number"===typeof u?t*u+"px":u)+";")+"\n "}return r&&"width: "+("number"===typeof r?t*r+"px":r)+";"}},42732:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e){var t=function(e){return!0===e?"wrap":!1===e?"nowrap":"reverse"===e?e:""}(e.flexWrap);return t&&"flex-wrap: "+t+";"}},69595:function(e,t){t.__esModule=!0,t.default=void 0;var r=new Set(["left","center","right"]);t.default=function(e){var t=e.textAlign;return r.has(t)&&"text-align: "+t+";"}},15537:function(e,t){t.__esModule=!0,t.default=void 0;var r=new Set(["underline","none","line-through"]);t.default=function(e){var t=e.textDecoration;return r.has(t)&&"text-decoration: "+t+";"}},33368:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e){return e.truncate&&"\n white-space: nowrap;\n text-overflow: ellipsis;\n overflow: hidden;\n"}},14942:function(e,t){t.__esModule=!0,t.default=void 0;var r=new Set(["normal","nowrap","pre-line"]);t.default=function(e){var t=e.whiteSpace;return r.has(t)&&"white-space: "+t+";"}},5722:function(e,t){t.__esModule=!0,t.default=void 0;var r=new Set(["normal","break-all","keep-all","break-word"]);t.default=function(e){var t=e.wordBreak;return r.has(t)&&"word-break: "+t+";"}},26949:function(e,t,r){t.__esModule=!0,t.makeTypography=t.makeText=t.makeSmall=t.makeNano=t.makeMicro=t.makeHuge=t.makeH6=t.makeH5=t.makeH4=t.makeH3=t.makeH2=t.makeH1=t.makeH0=t.makeFemto=t.makeBigger=t.makeBig=t.fontColor=t.fontCode=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=b(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&Object.prototype.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(71893)),a=r(52612),o=v(r(15986)),i=v(r(38249)),l=v(r(89281)),u=v(r(90105)),c=v(r(69595)),f=v(r(15537)),d=v(r(33368)),s=v(r(14942)),p=v(r(5722)),m=v(r(43080)),g=v(r(61430));function v(e){return e&&e.__esModule?e:{default:e}}function b(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(b=function(e){return e?r:t})(e)}var h=t.fontColor=function(e){var t=e.theme,r=e.color,n=void 0===r?"text":r;return"color: "+(0,a.getColor)(n)({theme:t})+";"},y=t.fontCode=function(e){var t=e.background,r=void 0===t?"text":t,n=e.code,o=e.color,i=void 0===o?"elementBackground":o,l=e.theme;return n&&"\n background-color: "+(0,a.getColor)(r)({theme:l})+";\n border-radius: 4px;\n color: "+(0,a.getColor)(i)({theme:l})+";\n padding: 0 6px;\n "},w=(0,n.css)([""," "," "," "," "," "," "," "," "," "," "," "," "," "," ",""],h,y,o.default,c.default,f.default,u.default,d.default,s.default,p.default,i.default,l.default,m.default,g.default,(function(e){var t=e.fontSize;return t&&"\n font-size: "+t+";\n line-height: "+t+";\n "}),(function(e){var t=e.lineHeight;return t&&"\n line-height: "+t+";\n "})),O=t.makeTypography=function(e,t){var r,a,o=t.fontSize,i=t.lineHeight,l=t.strong;return(0,n.default)(e).withConfig({displayName:"typography",componentId:"sc-1lwqv72-0"})(['font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Ubuntu,"Helvetica Neue",sans-serif;font-style:normal;'," "," "," ",""],(a=o,function(){return"font-size: "+a+";"}),function(e){return function(){return"line-height: "+e+";"}}(i),(r=l,function(e){var t=e.strong;return"font-weight: "+((void 0===t?r:t)?"bold":"normal")+";"}),w)};t.makeH0=function(e){return O(e,{fontSize:"26px",lineHeight:"32px",strong:!0})},t.makeH1=function(e){return O(e,{fontSize:"24px",lineHeight:"28px",strong:!0})},t.makeH2=function(e){return O(e,{fontSize:"22px",lineHeight:"24px",strong:!0})},t.makeH3=function(e){return O(e,{fontSize:"20px",lineHeight:"24px",strong:!0})},t.makeH4=function(e){return O(e,{fontSize:"16px",lineHeight:"21px",strong:!0})},t.makeH5=function(e){return O(e,{fontSize:"14px",lineHeight:"18px",strong:!0})},t.makeH6=function(e){return O(e,{fontSize:"12px",lineHeight:"14px",strong:!0})},t.makeFemto=function(e){return O(e,{fontSize:"7px",lineHeight:"8px"})},t.makeNano=function(e){return O(e,{fontSize:"8px",lineHeight:"10px"})},t.makeMicro=function(e){return O(e,{fontSize:"10px",lineHeight:"13px"})},t.makeSmall=function(e){return O(e,{fontSize:"11px",lineHeight:"14px"})},t.makeText=function(e){return O(e,{fontSize:"12px",lineHeight:"16px"})},t.makeBig=function(e){return O(e,{fontSize:"14px",lineHeight:"20px"})},t.makeBigger=function(e){return O(e,{fontSize:"16px",lineHeight:"18px"})},t.makeHuge=function(e){return O(e,{fontSize:"24px",lineHeight:"32px"})}},85862:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(67294);t.default=function(){var e=(0,n.useMemo)((function(){var e=document.createElement("div");return document.body.append(e),e}),[]);return(0,n.useLayoutEffect)((function(){return function(){return document.body.removeChild(e)}}),[]),e}},32839:function(e,t,r){t.__esModule=!0,t.default=void 0;var n,a=r(67294),o=(n=r(47833))&&n.__esModule?n:{default:n};t.default=function(e){var t=(0,a.useRef)(),r=(0,a.useCallback)((function(r){t.current=r,(0,o.default)(e,r)}),[]);return[t,r]}},43080:function(e,t){t.__esModule=!0,t.default=void 0;var r={weak:.3,medium:.4,strong:.8};t.default=function(e){var t=e.opacity,n=t&&r[t]||t;return n?"opacity: "+n+";":""}},89281:function(e,t,r){t.__esModule=!0,t.default=void 0;var n=r(22837);t.default=function(e){var t=e.theme,r=e.padding;return r?Array.isArray(r)&&r.length>=1&&r.length<=4?"padding: "+(0,n.getDimensions)(t,r)+";":(console.error("Please provide an array (max 4 elements) for `padding` style helper."),""):""}},52203:function(e,t){t.__esModule=!0,t.default=void 0;var r={static:"static",absolute:"absolute",fixed:"fixed",relative:"relative",sticky:"sticky",initial:"initial",inherit:"inherit"};t.default=function(e){var t=e.position;return t in r?"position: "+t+";":""}},46930:function(e,t){t.__esModule=!0,t.default=void 0;var r=function(e,t){return!0===t?e+"px":"number"===typeof t?e*t+"px":"string"===typeof t?t:""},n=function(e,t){return"border-top-left-radius: "+r(e,t)+";"},a=function(e,t){return"border-top-right-radius: "+r(e,t)+";"},o=function(e,t){return"border-bottom-left-radius: "+r(e,t)+";"},i=function(e,t){return"border-bottom-right-radius: "+r(e,t)+";"},l={top:function(e,t){return"\n "+n(e,t)+"\n "+a(e,t)+"\n "},left:function(e,t){return"\n "+n(e,t)+"\n "+o(e,t)+"\n "},bottom:function(e,t){return"\n "+o(e,t)+"\n "+i(e,t)+"\n "},right:function(e,t){return"\n "+a(e,t)+"\n "+i(e,t)+"\n "},"top-left":n,"top-right":a,"bottom-left":o,"bottom-right":i};t.default=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,n=e.round;if(!n)return"";var a=r(t,n);if(a)return"border-radius: "+a+";";var o=n.side,i=n.size,u=void 0===i?1:i;return o in l?""+l[o](t,u):""}},47833:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e,t){"function"===typeof e?e(t):e&&(e.current=t)}},90105:function(e,t){t.__esModule=!0,t.default=void 0;var r={none:"none",capitalize:"capitalize",uppercase:"uppercase",lowercase:"lowercase",firstLetter:"firstLetter",fullWidth:"full-width"};t.default=function(e){var t=(void 0===e?{}:e).textTransform,n=void 0===t?r.none:t;return n===r.firstLetter?"text-transform: lowercase;\n &::first-letter {\n text-transform: uppercase;\n }\n":n in r?"text-transform: "+r[n]+";":"text-transform: "+r.none+";"}},71817:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.zIndex;if(t&&"number"===typeof t)return"z-index: "+t+";"}},22810:function(e,t,r){r.r(t)},41330:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"skeleton",use:"skeleton-usage",viewBox:"0 0 1225 192",content:''});i().add(l);t.default=l},13100:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"D",use:"D-usage",viewBox:"0 0 16 16",content:''});i().add(l);t.default=l},34034:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"I",use:"I-usage",viewBox:"0 0 16 16",content:''});i().add(l);t.default=l},93506:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"L",use:"L-usage",viewBox:"0 0 16 16",content:''});i().add(l);t.default=l},17371:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"N",use:"N-usage",viewBox:"0 0 16 16",content:''});i().add(l);t.default=l},49234:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"anomaly_badge",use:"anomaly_badge-usage",viewBox:"0 0 15 16",content:''});i().add(l);t.default=l},41523:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"checkmark_s",use:"checkmark_s-usage",viewBox:"0 0 16 16",content:''});i().add(l);t.default=l},36131:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"chevron_down",use:"chevron_down-usage",viewBox:"0 0 12 12",content:''});i().add(l);t.default=l},70075:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"chevron_down_thin",use:"chevron_down_thin-usage",viewBox:"0 0 16 24",content:''});i().add(l);t.default=l},59884:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"chevron_expand",use:"chevron_expand-usage",viewBox:"0 0 8 6",content:''});i().add(l);t.default=l},16029:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"chevron_up_thin",use:"chevron_up_thin-usage",viewBox:"0 0 16 24",content:''});i().add(l);t.default=l},99141:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"correlations",use:"correlations-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},9837:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"database",use:"database-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},97369:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"dot",use:"dot-usage",viewBox:"0 0 10 10",content:''});i().add(l);t.default=l},84492:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"drag_horizontal",use:"drag_horizontal-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},75177:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"drag_vertical",use:"drag_vertical-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},58e3:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"metrics",use:"metrics-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},33207:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"nav_left",use:"nav_left-usage",viewBox:"0 0 8 10",content:''});i().add(l);t.default=l},62491:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"nav_right",use:"nav_right-usage",viewBox:"0 0 8 10",content:''});i().add(l);t.default=l},14232:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"pan_tool",use:"pan_tool-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},85265:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"plugins",use:"plugins-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},80550:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"selected_area",use:"selected_area-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},31043:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"sort_ascending",use:"sort_ascending-usage",viewBox:"0 0 18 18",content:''});i().add(l);t.default=l},62074:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"sort_descending",use:"sort_descending-usage",viewBox:"0 0 18 18",content:''});i().add(l);t.default=l},64275:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"warning_triangle_hollow",use:"warning_triangle_hollow-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},11743:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"zoom_in",use:"zoom_in-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},87609:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"zoom_out",use:"zoom_out-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},57348:function(e,t,r){r.r(t);var n=r(87854),a=r.n(n),o=r(95348),i=r.n(o),l=new(a())({id:"zoom_reset",use:"zoom_reset-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l}}]); \ No newline at end of file diff --git a/web/gui/v2/1277.884a6e09fbb50c214d8e.chunk.js.LICENSE.txt b/web/gui/v2/1277.884a6e09fbb50c214d8e.chunk.js.LICENSE.txt new file mode 100644 index 00000000000000..ae386fb79c9744 --- /dev/null +++ b/web/gui/v2/1277.884a6e09fbb50c214d8e.chunk.js.LICENSE.txt @@ -0,0 +1 @@ +/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */ diff --git a/web/gui/v2/1282.8f29fe3a597e5ef695e5.js b/web/gui/v2/1282.8f29fe3a597e5ef695e5.js new file mode 100644 index 00000000000000..e80af221547d01 --- /dev/null +++ b/web/gui/v2/1282.8f29fe3a597e5ef695e5.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="f42898bc-54f5-46e5-81e5-7ae1d07cb032",e._sentryDebugIdIdentifier="sentry-dbid-f42898bc-54f5-46e5-81e5-7ae1d07cb032")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1282],{90026:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=o(r(58960)),n=r(24298),s=o(r(93492));function o(e){return e&&e.__esModule?e:{default:e}}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function l(e){for(var t=1;te.x+e.w||t.x+t.we.y+e.h)},getColorShade:function(e,t){(e=String(e).replace(/[^0-9a-f]/gi,"")).length<6&&(e=e[0]+e[0]+e[1]+e[1]+e[2]+e[2]),t=t||0;for(var r="#",i=0;i<3;i++){var n=parseInt(e.substr(2*i,2),16);r+=("00"+(n=Math.round(Math.min(Math.max(0,n+n*t),255)).toString(16))).substr(n.length)}return r},initSegmentColors:function(e){for(var t=e.options.data.content,r=e.options.misc.colors.segments,i=[],n=0;n=t.value?(i.push(e[c]),n+=e[c].value,s++):(e[c].isGrouped=!1,r.push(e[c]))}else if("percentage"===t.valueType){var d=l.getTotalPieSize(e);for(c=0;c99?99:o)<0?0:o;var l=i0&&(l-=u)}a=Math.floor(l/100*o)/2}else a=parseInt(t.pieOuterRadius,10);/%/.test(t.pieInnerRadius)?(o=(o=(o=parseInt(t.pieInnerRadius.replace(/[\D]/,""),10))>99?99:o)<0?0:o,s=Math.floor(a/100*o)):s=parseInt(t.pieInnerRadius,10),e.innerRadius=s,e.outerRadius=a},getTotalPieSize:function(e){for(var t=0,r=0;rt.label.toLowerCase()?1:-1}));break;case"label-desc":t.sort((function(e,t){return e.label.toLowerCase()n.truncation.truncateLength&&(r=e.label.substring(0,n.truncation.truncateLength)+"..."),r})).style("font-size",n.mainLabel.fontSize).style("font-family",n.mainLabel.font).style("font-weight",n.mainLabel.fontWeight).style("fill",(function(t,r){return"segment"===n.mainLabel.color?e.options.colors[r]:n.mainLabel.color})),i.percentage&&o.append("text").attr("id",(function(r,i){return e.cssPrefix+"segmentPercentage"+i+"-"+t})).attr("class",e.cssPrefix+"segmentPercentage-"+t).text((function(e,t){var r=e.percentage;return n.formatter?(a.index=t,a.part="percentage",a.value=e.value,a.label=e.percentage,r=n.formatter(a)):r+="%",r})).style("font-size",n.percentage.fontSize).style("font-family",n.percentage.font).style("font-weight",n.percentage.fontWeight).style("fill",n.percentage.color),i.value&&o.append("text").attr("id",(function(r,i){return e.cssPrefix+"segmentValue"+i+"-"+t})).attr("class",e.cssPrefix+"segmentValue-"+t).text((function(e,t){return a.index=t,a.part="value",a.value=e.value,a.label=e.value,a.realLabel=e.label,n.formatter?n.formatter(a,e.value):e.value})).style("font-size",n.value.fontSize).style("font-family",n.value.font).style("font-weight",n.value.fontWeight).style("fill",n.value.color)},positionLabelElements:function(e,t,r){c["dimensions-"+t]=[],e.__labels[t].each((function(r,i){var n=o.select(this).selectAll("."+e.cssPrefix+"segmentMainLabel-"+t),s=o.select(this).selectAll("."+e.cssPrefix+"segmentPercentage-"+t),a=o.select(this).selectAll("."+e.cssPrefix+"segmentValue-"+t);c["dimensions-"+t].push({mainLabel:null!==n.node()?n.node().getBBox():null,percentage:null!==s.node()?s.node().getBBox():null,value:null!==a.node()?a.node().getBBox():null})}));var i=5,n=c["dimensions-"+t];switch(r){case"label-value1":e.svg.selectAll("."+e.cssPrefix+"segmentValue-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width+i}));break;case"label-value2":e.svg.selectAll("."+e.cssPrefix+"segmentValue-"+t).attr("dy",(function(e,t){return n[t].mainLabel.height}));break;case"label-percentage1":e.svg.selectAll("."+e.cssPrefix+"segmentPercentage-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width+i}));break;case"label-percentage2":e.svg.selectAll("."+e.cssPrefix+"segmentPercentage-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width/2-n[t].percentage.width/2})).attr("dy",(function(e,t){return n[t].mainLabel.height}))}},computeLabelLinePositions:function(e){e.lineCoordGroups=[],e.__labels.outer.each((function(t,r){return c.computeLinePosition(e,r)}))},computeLinePosition:function(e,t){var r,i,n,s,o=f.getSegmentAngle(t,e.options.data.content,e.totalSize,{midpoint:!0}),a=l.rotate(e.pieCenter.x,e.pieCenter.y-e.outerRadius,e.pieCenter.x,e.pieCenter.y,o),u=e.outerLabelGroupData[t].h/5,c=6,d=Math.floor(o/90),p=4;switch(2===d&&180===o&&(d=1),d){case 0:r=e.outerLabelGroupData[t].x-c-(e.outerLabelGroupData[t].x-c-a.x)/2,i=e.outerLabelGroupData[t].y+(a.y-e.outerLabelGroupData[t].y)/p,n=e.outerLabelGroupData[t].x-c,s=e.outerLabelGroupData[t].y-u;break;case 1:r=a.x+(e.outerLabelGroupData[t].x-a.x)/p,i=a.y+(e.outerLabelGroupData[t].y-a.y)/p,n=e.outerLabelGroupData[t].x-c,s=e.outerLabelGroupData[t].y-u;break;case 2:var m=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+c;r=a.x-(a.x-m)/p,i=a.y+(e.outerLabelGroupData[t].y-a.y)/p,n=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+c,s=e.outerLabelGroupData[t].y-u;break;case 3:var g=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+c;r=g+(a.x-g)/p,i=e.outerLabelGroupData[t].y+(a.y-e.outerLabelGroupData[t].y)/p,n=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+c,s=e.outerLabelGroupData[t].y-u}"straight"===e.options.labels.lines.style?e.lineCoordGroups[t]=[{x:a.x,y:a.y},{x:n,y:s}]:e.lineCoordGroups[t]=[{x:a.x,y:a.y},{x:r,y:i},{x:n,y:s}]},addLabelLines:function(e){var t=e.svg.insert("g","."+e.cssPrefix+"pieChart").attr("class",e.cssPrefix+"lineGroups").style("opacity",1).selectAll("."+e.cssPrefix+"lineGroup").data(e.lineCoordGroups).enter().append("g").attr("class",e.cssPrefix+"lineGroup"),r=o.line().curve(o.curveBasis).x((function(e){return e.x})).y((function(e){return e.y}));t.append("path").attr("d",r).attr("stroke",(function(t,r){return"segment"===e.options.labels.lines.color?e.options.colors[r]:e.options.labels.lines.color})).attr("stroke-width",1).attr("fill","none").style("opacity",(function(t,r){var i=e.options.labels.outer.hideWhenLessThanPercentage;return null!==i&&t.percentage0){var c=f.getSegmentAngle(i,e.options.data.content,e.totalSize,{midpoint:!0}),d=l.translate(e.pieCenter.x,e.pieCenter.y,e.innerRadius,c);u.x=d.x,u.y=d.y}var p=s.getDimensions(e.cssPrefix+"labelGroup"+i+"-inner"),m=p.w/2,g=p.h/4;n=u.x+(e.lineCoordGroups[i][0].x-u.x)/1.8,o=u.y+(e.lineCoordGroups[i][0].y-u.y)/1.8,n-=m,o+=g}return"translate("+n+","+o+")"}))},getIncludes:function(e){var t=!1,r=!1,i=!1;switch(e){case"label":t=!0;break;case"value":r=!0;break;case"percentage":i=!0;break;case"label-value1":case"label-value2":t=!0,r=!0;break;case"label-percentage1":case"label-percentage2":t=!0,i=!0}return{mainLabel:t,value:r,percentage:i}},computeOuterLabelCoords:function(e){e.__labels.outer.each((function(t,r){return c.getIdealOuterLabelPositions(e,r)})),c.resolveOuterLabelCollisions(e)},resolveOuterLabelCollisions:function(e){if("none"!==e.options.labels.outer.format){var t=e.options.data.content.length;c.checkConflict(e,0,"clockwise",t),c.checkConflict(e,t-1,"anticlockwise",t)}},checkConflict:function(e,t,r,i){var n,o;if(!(i<=1)){var a=e.outerLabelGroupData[t].hs;if(("clockwise"!==r||"right"===a)&&("anticlockwise"!==r||"left"===a)){var l="clockwise"===r?t+1:t-1,u=e.outerLabelGroupData[t],d=e.outerLabelGroupData[l],f={labelHeights:e.outerLabelGroupData[0].h,center:e.pieCenter,lineLength:e.outerRadius+e.options.labels.outer.pieDistance,heightChange:e.outerLabelGroupData[0].h+1};if("clockwise"===r){for(n=0;n<=t;n++)if(o=e.outerLabelGroupData[n],!c.isLabelHidden(e,n)&&s.rectIntersect(o,d)){c.adjustLabelPos(e,l,u,f);break}}else for(n=i-1;n>=t;n--)if(o=e.outerLabelGroupData[n],!c.isLabelHidden(e,n)&&s.rectIntersect(o,d)){c.adjustLabelPos(e,l,u,f);break}c.checkConflict(e,l,r,i)}}},isLabelHidden:function(e,t){var r=e.options.labels.outer.hideWhenLessThanPercentage;return null!==r&&d.percentageMath.abs(s)?Math.sqrt(i.lineLength*i.lineLength-s*s):Math.sqrt(s*s-i.lineLength*i.lineLength),o="right"===r.hs?i.center.x+n:i.center.x-n-e.outerLabelGroupData[t].w,e.outerLabelGroupData[t].x=o,e.outerLabelGroupData[t].y=a},getIdealOuterLabelPositions:function(e,t){var r=e.svg.select("#"+e.cssPrefix+"labelGroup"+t+"-outer").node();if(r){var i=r.getBBox(),n=f.getSegmentAngle(t,e.options.data.content,e.totalSize,{midpoint:!0}),s=e.pieCenter.x,o=e.pieCenter.y-(e.outerRadius+e.options.labels.outer.pieDistance),a=l.rotate(s,o,e.pieCenter.x,e.pieCenter.y,n),u="right";n>180?(a.x-=i.width+8,u="left"):a.x+=8,e.outerLabelGroupData[t]={x:a.x,y:a.y,w:i.width,h:i.height,hs:u}}}},f={effectMap:{none:o.easeLinear,bounce:o.easeBounce,linear:o.easeLinear,sin:o.easeSin,elastic:o.easeElastic,back:o.easeBack,quad:o.easeQuad,circle:o.easeCircle,exp:o.easeExp},create:function(e){var t=e.pieCenter,r=e.options.colors,i=(e.options.effects.load,e.options.misc.colors.segmentStroke),n=e.svg.insert("g","#"+e.cssPrefix+"title").attr("transform",(function(){return l.getPieTranslateCenter(t)})).attr("class",e.cssPrefix+"pieChart"),s=o.arc().innerRadius(e.innerRadius).outerRadius(e.outerRadius).startAngle(0).endAngle((function(t){return t.value/e.totalSize*2*Math.PI}));n.selectAll("."+e.cssPrefix+"arc").data(e.options.data.content).enter().append("g").attr("class",e.cssPrefix+"arc").append("path").attr("id",(function(t,r){return e.cssPrefix+"segment"+r})).attr("fill",(function(t,i){var n=r[i];return e.options.misc.gradient.enabled&&(n="url(#"+e.cssPrefix+"grad"+i+")"),n})).style("stroke",i).style("stroke-width",1).attr("data-index",(function(e,t){return t})).attr("d",s),e.svg.selectAll("g."+e.cssPrefix+"arc").attr("transform",(function(t,r){var i=0;return r>0&&(i=f.getSegmentAngle(r-1,e.options.data.content,e.totalSize)),"rotate("+i+")"})),e.arc=s},addGradients:function(e){var t=e.svg.append("defs").selectAll("radialGradient").data(e.options.data.content).enter().append("radialGradient").attr("gradientUnits","userSpaceOnUse").attr("cx",0).attr("cy",0).attr("r","120%").attr("id",(function(t,r){return e.cssPrefix+"grad"+r}));t.append("stop").attr("offset","0%").style("stop-color",(function(t,r){return e.options.colors[r]})),t.append("stop").attr("offset",e.options.misc.gradient.percentage+"%").style("stop-color",e.options.misc.gradient.color)},addSegmentEventHandlers:function(e){var t=e.svg.selectAll("."+e.cssPrefix+"arc");(t=t.merge(e.__labels.inner.merge(e.__labels.outer))).on("click",(function(){var t,r=o.select(this);if(r.attr("class")===e.cssPrefix+"arc")t=r.select("path");else{var i=r.attr("data-index");t=o.select("#"+e.cssPrefix+"segment"+i)}var n=t.attr("class")===e.cssPrefix+"expanded";f.onSegmentEvent(e,e.options.callbacks.onClickSegment,t,n),"none"!==e.options.effects.pullOutSegmentOnClick.effect&&(n?f.closeSegment(e,t.node()):f.openSegment(e,t.node()))})),t.on("mouseover",(function(){var t,r,i=o.select(this);if(i.attr("class")===e.cssPrefix+"arc"?t=i.select("path"):(r=i.attr("data-index"),t=o.select("#"+e.cssPrefix+"segment"+r)),e.options.effects.highlightSegmentOnMouseover){r=t.attr("data-index");var n=e.options.colors[r];t.style("fill",s.getColorShade(n,e.options.effects.highlightLuminosity))}e.options.tooltips.enabled&&(r=t.attr("data-index"),m.showTooltip(e,r));var a=t.attr("class")===e.cssPrefix+"expanded";f.onSegmentEvent(e,e.options.callbacks.onMouseoverSegment,t,a)})),t.on("mousemove",(function(){m.moveTooltip(e)})),t.on("mouseout",(function(){var t,r,i=o.select(this);if(i.attr("class")===e.cssPrefix+"arc"?t=i.select("path"):(r=i.attr("data-index"),t=o.select("#"+e.cssPrefix+"segment"+r)),e.options.effects.highlightSegmentOnMouseover){r=t.attr("data-index");var n=e.options.colors[r];e.options.misc.gradient.enabled&&(n="url(#"+e.cssPrefix+"grad"+r+")"),t.style("fill",n)}e.options.tooltips.enabled&&(r=t.attr("data-index"),m.hideTooltip(e,r));var s=t.attr("class")===e.cssPrefix+"expanded";f.onSegmentEvent(e,e.options.callbacks.onMouseoutSegment,t,s)}))},onSegmentEvent:function(e,t,r,i){if(s.isFunction(t)){var n=parseInt(r.attr("data-index"),10);t({segment:r.node(),index:n,expanded:i,data:e.options.data.content[n]})}},openSegment:function(e,t){e.isOpeningSegment||(e.isOpeningSegment=!0,f.maybeCloseOpenSegment(e),o.select(t).transition().ease(f.effectMap[e.options.effects.pullOutSegmentOnClick.effect]).duration(e.options.effects.pullOutSegmentOnClick.speed).attr("transform",(function(t,r){var i=e.arc.centroid(t),n=i[0],s=i[1],o=Math.sqrt(n*n+s*s),a=parseInt(e.options.effects.pullOutSegmentOnClick.size,10);return"translate("+n/o*a+","+s/o*a+")"})).on("end",(function(r,i){e.currentlyOpenSegment=t,e.isOpeningSegment=!1,o.select(t).attr("class",e.cssPrefix+"expanded")})))},maybeCloseOpenSegment:function(e){"undefined"!==typeof e&&e.svg.selectAll("."+e.cssPrefix+"expanded").size()>0&&f.closeSegment(e,e.svg.select("."+e.cssPrefix+"expanded").node())},closeSegment:function(e,t){o.select(t).transition().duration(400).attr("transform","translate(0,0)").on("end",(function(r,i){o.select(t).attr("class",""),e.currentlyOpenSegment=null}))},getCentroid:function(e){var t=e.getBBox();return{x:t.x+t.width/2,y:t.y+t.height/2}},getSegmentAngle:function(e,t,r,i){var n,s=a({compounded:!0,midpoint:!1},i),o=t[e].value;if(s.compounded){n=0;for(var l=0;l<=e;l++)n+=t[l].value}"undefined"===typeof n&&(n=o);var u=n/r*360;return s.midpoint&&(u-=o/r*360/2),u}},p={offscreenCoord:-1e4,addTitle:function(e){e.__title=e.svg.selectAll("."+e.cssPrefix+"title").data([e.options.header.title]).enter().append("text").text((function(e){return e.text})).attr("id",e.cssPrefix+"title").attr("class",e.cssPrefix+"title").attr("x",p.offscreenCoord).attr("y",p.offscreenCoord).attr("text-anchor",(function(){return"top-center"===e.options.header.location||"pie-center"===e.options.header.location?"middle":"left"})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionTitle:function(e){var t,r=e.textComponents,i=e.options.header.location,n=e.options.misc.canvasPadding,s=e.options.size.canvasWidth,o=e.options.header.titleSubtitlePadding;t="top-left"===i?n.left:(s-n.right)/2+n.left,t+=e.options.misc.pieCenterOffset.x;var a=n.top+r.title.h;"pie-center"===i&&(a=e.pieCenter.y,r.subtitle.exists?a=a-(r.title.h+o+r.subtitle.h)/2+r.title.h:a+=r.title.h/4),e.__title.attr("x",t).attr("y",a)},addSubtitle:function(e){var t=e.options.header.location;e.__subtitle=e.svg.selectAll("."+e.cssPrefix+"subtitle").data([e.options.header.subtitle]).enter().append("text").text((function(e){return e.text})).attr("x",p.offscreenCoord).attr("y",p.offscreenCoord).attr("id",e.cssPrefix+"subtitle").attr("class",e.cssPrefix+"subtitle").attr("text-anchor",(function(){return"top-center"===t||"pie-center"===t?"middle":"left"})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionSubtitle:function(e){var t,r=e.options.misc.canvasPadding,i=e.options.size.canvasWidth;t="top-left"===e.options.header.location?r.left:(i-r.right)/2+r.left,t+=e.options.misc.pieCenterOffset.x;var n=p.getHeaderHeight(e);e.__subtitle.attr("x",t).attr("y",n)},addFooter:function(e){e.__footer=e.svg.selectAll("."+e.cssPrefix+"footer").data([e.options.footer]).enter().append("text").text((function(e){return e.text})).attr("x",p.offscreenCoord).attr("y",p.offscreenCoord).attr("id",e.cssPrefix+"footer").attr("class",e.cssPrefix+"footer").attr("text-anchor",(function(){var t="left";return"bottom-center"===e.options.footer.location?t="middle":"bottom-right"===e.options.footer.location&&(t="left"),t})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionFooter:function(e){var t,r=e.options.footer.location,i=e.textComponents.footer.w,n=e.options.size.canvasWidth,s=e.options.size.canvasHeight,o=e.options.misc.canvasPadding;t="bottom-left"===r?o.left:"bottom-right"===r?n-i-o.right:n/2,e.__footer.attr("x",t).attr("y",s-o.bottom)},getHeaderHeight:function(e){var t;if(e.textComponents.title.exists){var r=e.textComponents.title.h+e.options.header.titleSubtitlePadding+e.textComponents.subtitle.h;t="pie-center"===e.options.header.location?e.pieCenter.y-r/2+r:r+e.options.misc.canvasPadding.top}else if("pie-center"===e.options.header.location){var i=e.options.misc.canvasPadding.bottom+e.textComponents.footer.h;t=(e.options.size.canvasHeight-i)/2+e.options.misc.canvasPadding.top+e.textComponents.subtitle.h/2}else t=e.options.misc.canvasPadding.top+e.textComponents.subtitle.h;return t}},m={addTooltips:function(e){var t=e.svg.insert("g").attr("class",e.cssPrefix+"tooltips");t.selectAll("."+e.cssPrefix+"tooltip").data(e.options.data.content).enter().append("g").attr("class",e.cssPrefix+"tooltip").attr("id",(function(t,r){return e.cssPrefix+"tooltip"+r})).style("opacity",0).append("rect").attr("rx",e.options.tooltips.styles.borderRadius).attr("ry",e.options.tooltips.styles.borderRadius).attr("x",-e.options.tooltips.styles.padding).attr("opacity",e.options.tooltips.styles.backgroundOpacity).style("fill",e.options.tooltips.styles.backgroundColor),t.selectAll("."+e.cssPrefix+"tooltip").data(e.options.data.content).append("text").attr("fill",(function(t){return e.options.tooltips.styles.color})).style("font-size",(function(t){return e.options.tooltips.styles.fontSize})).style("font-weight",(function(t){return e.options.tooltips.styles.fontWeight})).style("font-family",(function(t){return e.options.tooltips.styles.font})).text((function(t,r){var i=e.options.tooltips.string;return"caption"===e.options.tooltips.type&&(i=t.caption),m.replacePlaceholders(e,i,r,{label:t.label,value:t.value,percentage:t.percentage})})),t.selectAll("."+e.cssPrefix+"tooltip rect").attr("width",(function(t,r){return s.getDimensions(e.cssPrefix+"tooltip"+r).w+2*e.options.tooltips.styles.padding})).attr("height",(function(t,r){return s.getDimensions(e.cssPrefix+"tooltip"+r).h+2*e.options.tooltips.styles.padding})).attr("y",(function(t,r){return-s.getDimensions(e.cssPrefix+"tooltip"+r).h/2+1}))},showTooltip:function(e,t){var r=e.options.tooltips.styles.fadeInSpeed;m.currentTooltip===t&&(r=1),m.currentTooltip=t,o.select("#"+e.cssPrefix+"tooltip"+t).transition().duration(r).style("opacity",(function(){return 1})),m.moveTooltip(e)},moveTooltip:function(e){o.selectAll("#"+e.cssPrefix+"tooltip"+m.currentTooltip).attr("transform",(function(t){var r=o.pointer(this.parentNode);return"translate("+(r[0]+e.options.tooltips.styles.padding+2)+","+(r[1]-2*e.options.tooltips.styles.padding-2)+")"}))},hideTooltip:function(e,t){o.select("#"+e.cssPrefix+"tooltip"+t).style("opacity",(function(){return 0})),o.select("#"+e.cssPrefix+"tooltip"+m.currentTooltip).attr("transform",(function(t,r){return"translate("+(e.options.size.canvasWidth+1e3)+","+(e.options.size.canvasHeight+1e3)+")"}))},replacePlaceholders:function(e,t,r,i){s.isFunction(e.options.tooltips.placeholderParser)&&e.options.tooltips.placeholderParser(r,i);var n=function(){return function(e){var t=arguments[1];return i.hasOwnProperty(t)?i[arguments[1]]:arguments[0]}};return t.replace(/\{(\w+)\}/g,n(i))}},g=function(s,l){if(this.element=s,"string"===typeof s){var u=s.replace(/^#/,"");this.element=document.getElementById(u)}var c={};a(!0,c,i,l),this.options=c,null!==this.options.misc.cssPrefix?this.cssPrefix=this.options.misc.cssPrefix:(this.cssPrefix="p"+r+"_",r++),n.initialCheck(this)&&(o.select(this.element).attr(e,t),b.call(this),h.call(this))};g.prototype.recreate=function(){n.initialCheck(this)&&(b.call(this),h.call(this))},g.prototype.redraw=function(){this.element.innerHTML="",h.call(this)},g.prototype.destroy=function(){this.element.innerHTML="",o.select(this.element).attr(e,null)},g.prototype.getOpenSegment=function(){var e=this.currentlyOpenSegment;if(null!==e&&"undefined"!==typeof e){var t=parseInt(o.select(e).attr("data-index"),10);return{element:e,index:t,data:this.options.data.content[t]}}return null},g.prototype.openSegment=function(e){(e=parseInt(e,10))<0||e>this.options.data.content.length-1||f.openSegment(this,o.select("#"+this.cssPrefix+"segment"+e).node())},g.prototype.closeSegment=function(){f.maybeCloseOpenSegment(this)},g.prototype.updateProp=function(e,t){switch(e){case"header.title.text":var r=s.processObj(this.options,e);s.processObj(this.options,e,t),o.select("#"+this.cssPrefix+"title").html(t),(""===r&&""!==t||""!==r&&""===t)&&this.redraw();break;case"header.subtitle.text":var i=s.processObj(this.options,e);s.processObj(this.options,e,t),o.select("#"+this.cssPrefix+"subtitle").html(t),(""===i&&""!==t||""!==i&&""===t)&&this.redraw();break;case"callbacks.onload":case"callbacks.onMouseoverSegment":case"callbacks.onMouseoutSegment":case"callbacks.onClickSegment":case"effects.pullOutSegmentOnClick.effect":case"effects.pullOutSegmentOnClick.speed":case"effects.pullOutSegmentOnClick.size":case"effects.highlightSegmentOnMouseover":case"effects.highlightLuminosity":s.processObj(this.options,e,t);break;default:s.processObj(this.options,e,t),this.destroy(),this.recreate()}};var b=function(){this.options.data.content=l.sortPieData(this),this.options.data.smallSegmentGrouping.enabled&&(this.options.data.content=s.applySmallSegmentGrouping(this.options.data.content,this.options.data.smallSegmentGrouping)),this.options.colors=s.initSegmentColors(this),this.totalSize=l.getTotalPieSize(this.options.data.content);for(var e=this.options.labels.percentage.decimalPlaces,t=0;te.getDygraph().getArea().h-10?{seriesName:"ANNOTATIONS"}:i<15?{seriesName:"ANOMALY_RATE"}:e.chart.getAttribute("chartType")?e.getDygraph().findStackedPoint(t.offsetX,t.offsetY):e.getDygraph().findClosestPoint(t.offsetX,t.offsetY)}(t,r),n=i.seriesName;if(n){var s=e.getDygraph().getPropertiesForSeries(n);if(s){var o=e.chart.getPayloadDimensionIds();if(null!=o&&o.length)return o[s.column-1]||s.name}}},o=function(o,a,l){if(n!==a){i=l,n=a,t=o.offsetX,r=o.offsetY;var u=s(o,l);u&&(e.sdk.trigger("highlightHover",e.chart,a,u),e.chart.trigger("highlightHover",a,u))}},a=function(o,a,l){if(n!==a){i=l,n=a,t=o.offsetX,r=o.offsetY;var u=s(o,l);e.sdk.trigger("highlightClick",e.chart,a,u),e.chart.trigger("highlightClick",a,u)}},l=function(o){if(!(Math.abs(o.offsetX-t)<5&&Math.abs(o.offsetY-r)<5)){t=o.offsetX,r=o.offsetY;var a=s(o,i);a&&(e.sdk.trigger("highlightHover",e.chart,n,a),e.chart.trigger("highlightHover",n,a))}},u=function(){e.sdk.trigger("highlightBlur",e.chart),e.chart.trigger("highlightBlur")},c=function(){r=null,i=null,n=null,e.off("highlightCallback",o),e.off("mousemove",l),e.off("mouseout",u),e.off("click",a)};return{toggle:function(t){return t?e.on("highlightCallback",o).on("mousemove",l).on("mouseout",u).on("click",a):c()},destroy:c}}},67514:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=m(r(7494)),n=m(r(58960)),s=m(r(81220)),o=m(r(93492)),a=r(93501),l=r(79635),u=r(56778),c=m(r(97962)),d=m(r(60579)),f=m(r(34613)),p=m(r(49021));function m(e){return e&&e.__esModule?e:{default:e}}function g(e){return function(e){if(Array.isArray(e))return b(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return b(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return b(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function b(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);r1&&j.length>1,stackedGraphNaNFill:"none",plotter:c,errorBars:v,axes:{x:o?{ticker:i.default.dateTicker,axisLabelFormatter:t.formatXAxis,axisLabelWidth:60}:{drawAxis:!1},y:l?y(y(y({},P&&{ticker:P}),{},{axisLabelFormatter:w},u&&{axisLabelWidth:u}),{},{pixelsPerLabel:15}):{drawAxis:!1}},ylabel:t.getAttribute("hasYlabel")&&t.getUnitSign({long:!0,withoutConversion:(0,a.isHeatmap)(n)})}},S=function(){var e=h.chart.getThemeAttribute("themeGridColor");return{axisLineColor:e,gridLineColor:e}},M=function(){var e=t.getPayloadDimensionIds(),r=t.getPayload().labels;if(null==e||!e.length||null==r||!r.length)return{visibility:!1};var i=r.length-e.length,n=Array(i>0?i:0).fill(!0),s=t.getAttribute("selectedLegendDimensions");return{visibility:[].concat(g(e.map(s.length?t.isDimensionVisible:function(){return!0})),g(n))}},D=function(){var e=t.getAttributes(),r=e.outOfLimits,i=e.getValueRange,n=e.staticValueRange,s=e.chartType,o=t.getPayload(),l=o.data,u=o.labels,c=t.getDateWindow(),d=r||0===l.length;return{file:d?[[0]]:l,labels:d?["X"]:u,dateWindow:c,valueRange:n||((0,a.isHeatmap)(s)?[0,t.getVisibleDimensionIds().length]:i(t,{dygraph:!0}))}},B=function(){return t.isSparkline()?{drawGrid:!1,drawAxis:!1,ylabel:void 0,yLabelWidth:0,highlightCircleSize:3,fillAlpha:1,strokeWidth:0}:null},E=function(){var e=t.getPayloadDimensionIds();return e.length?{colors:e.map(t.selectDimensionColor)}:{}},T=y(y({},h),{},{getChartWidth:function(){return v?v.getArea().w:h.getChartWidth()},getChartHeight:function(){return v?v.getArea().h:100},getPreceded:function(){if(!v)return-1;if(1e3*t.getFirstEntry()u+c)){i.zoomMoved=!0,i.dragEndX=(0,s.dragGetX_)(e,i);var d=i.dragStartX,f=i.dragEndX;n.clearRect(0,0,o.width,o.height),n.fillStyle="rgba(128,128,128,0.3)",n.fillRect(Math.min(d,f),a.y,Math.abs(f-d),a.h),i.prevEndX=f,r=f}}},o=function n(s,o,a){if(a.isZooming){o.clearZoomRect_(),a.destroy();var l=function(e){return-1===r||Math.abs(t-r)<5?null:[Math.round(e.toDataXCoord(t)/1e3),Math.round(e.toDataXCoord(r)/1e3)].sort((function(e,t){return e-t}))}(o);e.sdk.trigger("highlightEnd",e.chart,l),e.chart.trigger("highlightEnd",l)}e.off("mousemove",i),e.off("mouseup",n)};return e.on("mousedown",(function(s,a,l){e.sdk.trigger("highlightStart",e.chart),l.initializeMouseDown(s,a,l),n.default.startZoom(s,a,l),t=l.dragStartX,r=-1,e.on("mousemove",i).on("mouseup",o)}))}},37108:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i,n=(i=r(7494))&&i.__esModule?i:{default:i},s=r(72247);t.default=function(e){var t,r,i=function(e,t,i){if(i.isZooming){var n=t.canvas_ctx_,o=t.canvas_,a=o.getBoundingClientRect();if(!(e.pageYa.bottom)){i.zoomMoved=!0,i.dragEndY=(0,s.dragGetY_)(e,i);var l=i.dragStartY,u=i.dragEndY,c=t.getArea();n.clearRect(0,0,o.width,o.height),n.fillStyle="rgba(128,128,128,0.3)",n.fillRect(c.x,Math.min(l,u),c.w,Math.abs(u-l)),i.prevEndY=u,r=u}}},o=function n(s,o,a){if(a.isZooming){o.clearZoomRect_(),a.destroy();var l=-1===r||Math.abs(t-r)<5?null:[o.toDataYCoord(t),o.toDataYCoord(r)].sort((function(e,t){return e-t}));e.sdk.trigger("highlightVerticalEnd",e.chart,l)}e.off("mousemove",i),e.off("mouseup",n)};return e.on("mousedown",(function(s,a,l){e.sdk.trigger("highlightVerticalStart",e.chart),l.initializeMouseDown(s,a,l),n.default.startZoom(s,a,l),t=l.dragStartY,r=-1,e.on("mousemove",i).on("mouseup",o)}))}},1991:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=r(41062),n={warning:"#F9A825",critical:"#FF4136",clear:"#00AB44"};t.default=function(e,t){var r=e.chart.getAttribute("overlays")[t],s=r.when,o=r.status,a=e.getDygraph(),l=a.getArea().h,u=a.hidden_ctx_,c=(0,i.getArea)(a,[s,s]);if(!c)return(0,i.trigger)(e,t);var d=c.from;(0,i.trigger)(e,t,c),u.save(),u.beginPath(),u.moveTo(d-1,0),u.lineTo(d-1,l),u.globalAlpha=1,u.lineWidth=2,u.setLineDash([4,4]),u.strokeStyle=n[o],u.stroke(),u.closePath(),u.restore()}},3288:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=r(41062),n={warning:"#FFF8E1",critical:"#FFEBEF",clear:"#E5F5E8"},s={warning:"#FFC300",critical:"#F59B9B",clear:"#68C47D"},o={warning:"#F9A825",critical:"#FF4136",clear:"#00AB44"};t.default=function(e,t){var r=e.chart.getAttribute("overlays")[t],a=r.whenTriggered,l=r.whenLast,u=void 0===l?Math.floor((new Date).getTime()/1e3):l,c=r.status,d=e.getDygraph(),f=d.getArea().h,p=d.hidden_ctx_,m=(0,i.getArea)(d,[a,u]);if(!m)return(0,i.trigger)(e,t);var g=m.from,b=m.width,h=m.to;(0,i.trigger)(e,t,m),p.save(),p.beginPath(),p.rect(g,0,b,f-1),p.fillStyle=s[c],p.globalAlpha=.1,p.fill();p.beginPath(),p.moveTo(g,0),p.lineTo(g,f),p.globalAlpha=1,p.lineWidth=2,p.setLineDash([4,4]),p.strokeStyle=n[c],p.stroke(),p.beginPath(),p.moveTo(h-2,0),p.lineTo(h-2,f),p.strokeStyle=o[c],p.stroke(),p.closePath(),p.restore()}},41062:function(e,t){"use strict";t.__esModule=!0,t.trigger=t.getArea=void 0;t.getArea=function(e,t){var r=e.xAxisRange(),i=r[0],n=r[1],s=1e3*t[0],o=1e3*t[1];if(on)return null;var a=Math.max(i,s),l=Math.min(n,o),u=e.toDomXCoord(a),c=e.toDomXCoord(l);return{from:u,to:c,width:c-u}},t.trigger=function(e,t,r){return requestAnimationFrame((function(){return e.trigger("overlayedAreaChanged:"+t,r)}))}},4241:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=r(41062);t.default=function(e,t){var r=e.chart.getAttribute("overlays")[t].range;if(r){var n=e.getDygraph(),s=n.getArea().h,o=n.hidden_ctx_,a=(0,i.getArea)(n,r);if(!a)return(0,i.trigger)(e,t);var l=a.from,u=a.width;(0,i.trigger)(e,t,a),o.save(),o.beginPath(),o.rect(l,0,u,s-1),o.fillStyle="rgba(207, 213, 218, 0.12)",o.fill(),o.beginPath(),o.rect(l,0,0,s-1),o.rect(l+u,0,0,s-1),o.fill(),o.setLineDash([2,7]),o.lineWidth=1,o.strokeStyle="#CFD5DA",o.stroke(),o.stroke(),o.closePath(),o.restore()}}},34613:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i,n=(i=r(82415))&&i.__esModule?i:{default:i};t.default=function(e){var t=null,r=function(t){var r=e.chart.getAttribute("overlays")[t].type,i=n.default[r];i&&i(e,t)},i=function(){var t=e.chart.getAttribute("overlays");Object.keys(t).forEach(r)},s=function(){var t=e.getDygraph();t&&t.renderGraph_(!1)},o=function(){t&&(s(),t(),t=null)};return{toggle:function(){var r=e.chart.getAttribute("overlays");if(!Object.keys(r).length)return o();t?s():t=e.on("underlayCallback",i)},destroy:o}}},85362:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=r(41062);t.default=function(e,t){var r=e.getDygraph(),n=r.xAxisRange()[1],s=n/1e3,o=e.chart.getFirstEntry(),a=e.chart.getAttributes(),l=a.outOfLimits,u=a.error;if(l||o&&!(o>s)||u){var c=l||u?[n,n]:[o,o],d=(0,i.getArea)(r,c);(0,i.trigger)(e,t,d)}}},82415:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=a(r(1991)),n=a(r(3288)),s=a(r(4241)),o=a(r(85362));function a(e){return e&&e.__esModule?e:{default:e}}t.default={alarm:i.default,alarmRange:n.default,highlight:s.default,proceeded:o.default}},9806:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=r(35890);function n(e){return function(e){if(Array.isArray(e))return s(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return s(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return s(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function s(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);r(i||0)?e:i||0:e}),0);r.strokeStyle=r.fillStyle=a(s),r.fillRect(i-o/2,0,o,15),r.strokeRect(i-o/2,0,o,15)}))}}}},43492:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=r(93501);t.default=function(e){return function(t){if(e&&0===t.seriesIndex){var r=e.chart.getVisibleDimensionIds(),n=e.chart.getVisibleDimensionIndexesById(),s=t.dygraph,o=t.drawingContext,a=t.allSeriesPoints,l=s.layout_.setNames,u=1/0;a.forEach((function(e){var t=e[1].canvasx-e[0].canvasx;t1?n.length-1:1));r.fillRect(i,e.canvasy,a/n.length,s-e.canvasy),r.strokeRect(i,e.canvasy,a/n.length,s-e.canvasy)}))}))}}}},60810:function(e,t){"use strict";t.__esModule=!0,t.default=void 0;t.default=function(){return function(e){var t=e.drawingContext,r=e.points,i=e.dygraph.toDomYCoord(0);t.fillStyle=e.color;var n=r[1].canvasx-r[0].canvasx,s=Math.floor(2/3*n);r.forEach((function(e){var r=e.canvasx;t.fillRect(r-s/2,e.canvasy,s,i-e.canvasy),t.strokeRect(r-s/2,e.canvasy,s,i-e.canvasy)}))}}},35484:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=r(93501);function n(e){return function(e){if(Array.isArray(e))return s(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return s(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return s(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function s(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);re.length)&&(t=e.length);for(var r=0,i=new Array(t);rd));l++);for(m>g&&(p*=-1),a=0;a<=c;a++)u=m+a*p,f.push({v:u})}var O=n("axisLabelFormatter");for(a=0;a.08}))),[{label_v:w+k,label:"i"}])}},76081:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i=a(r(26712)),n=a(r(58960)),s=r(24298),o=a(r(93492));function a(e){return e&&e.__esModule?e:{default:e}}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function u(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,i=new Array(t);rt?t:e.clientHeight);e.firstChild.G__height=r,e.firstChild.style.height=r+"px";var i=t;e.firstChild.G__width=i,e.firstChild.style.width=i+"px",p.setOptions({}),p.update(!0),f.trigger("resize")}));var h=t.getAttributes().loaded;r=(0,s.unregister)(t.onAttributeChange("hoverX",b),!h&&t.onceAttributeChange("loaded",b),t.onAttributeChange("theme",(function(){var e=m(),t=e.color,r=e.strokeColor;p.setOptions({strokeColor:r,pointer:{color:t}})})));var y=e.clientWidth,v=.9*(e.clientHeight>y?y:e.clientHeight);e.firstChild.G__height=v,e.firstChild.style.height=v+"px";var _=y;e.firstChild.G__width=_,e.firstChild.style.width=_+"px",p.setOptions({}),f.trigger("resize"),b()}},unmount:function(){r&&r(),c&&c(),p=null,a=null,u=null,f.unmount()},render:b,getMinMax:g})}},63902:function(e,t){"use strict";t.__esModule=!0,t.default=void 0;var r=function(e,t){for(var r in t.prototype)t.hasOwnProperty(r)&&(e[r]=t[r]);function i(){this.constructor=e}return i.prototype=t.prototype,e.prototype=new i,e.__super__=t.prototype,e},i=function(e,t){var r={};for(var i in e)e.hasOwnProperty(i)&&(r[i]=e[i]);for(var n in t)t.hasOwnProperty(n)&&(r[n]=t[n]);return r},n=function(e){return"#"===e.charAt(0)?e.substring(1,7):e};function s(e,t){null==e&&(e=!0),this.clear=null==t||t,e&&u.add(this)}function o(){return o.__super__.constructor.apply(this,arguments)}function a(e){if(this.gauge=e,void 0===this.gauge)throw new Error("The element isn't defined.");this.ctx=this.gauge.ctx,this.canvas=this.gauge.canvas,a.__super__.constructor.call(this,!1,!1),this.setOptions()}function l(e){var t,r;this.canvas=e,l.__super__.constructor.call(this),this.percentColors=null,"undefined"!==typeof G_vmlCanvasManager&&(this.canvas=window.G_vmlCanvasManager.initElement(this.canvas)),this.ctx=this.canvas.getContext("2d"),t=this.canvas.clientHeight,r=this.canvas.clientWidth,this.canvas.height=t,this.canvas.width=r,this.gp=[new a(this)],this.setOptions()}s.prototype.animationSpeed=32,s.prototype.update=function(e){var t;return null==e&&(e=!1),!(!e&&this.displayedValue===this.value)&&(this.ctx&&this.clear&&this.ctx.clearRect(0,0,this.canvas.width,this.canvas.height),t=this.value-this.displayedValue,Math.abs(t/this.animationSpeed)<=.001?this.displayedValue=this.value:this.displayedValue=this.displayedValue+t/this.animationSpeed,this.render(),!0)},r(o,s),o.prototype.displayScale=1,o.prototype.forceUpdate=!0,o.prototype.setMinValue=function(e,t){var r,i,n,s,o;if(this.minValue=e,null==t&&(t=!0),t){for(this.displayedValue=this.minValue,o=[],i=0,n=(s=this.gp||[]).length;i.5&&(this.options.angle=.5),this.configDisplayScale(),this},o.prototype.configDisplayScale=function(){var e,t,r,i;return!1===this.options.highDpiSupport?delete this.displayScale:(t=window.devicePixelRatio||1,e=this.ctx.webkitBackingStorePixelRatio||this.ctx.mozBackingStorePixelRatio||this.ctx.msBackingStorePixelRatio||this.ctx.oBackingStorePixelRatio||this.ctx.backingStorePixelRatio||1,this.displayScale=t/e),i=this.canvas.G__width||this.canvas.width,r=this.canvas.G__height||this.canvas.height,this.canvas.width=i*this.displayScale,this.canvas.height=r*this.displayScale,this.canvas.style.width=i+"px",this.canvas.style.height=r+"px",this.canvas.G__width=i,this.canvas.G__height=r,this},o.prototype.parseValue=function(e){return e=parseFloat(e)||Number(e),isFinite(e)?e:0},r(a,s),a.prototype.displayedValue=0,a.prototype.value=0,a.prototype.options={strokeWidth:.035,length:.1,color:"#000000",iconPath:null,iconScale:1,iconAngle:0},a.prototype.img=null,a.prototype.setOptions=function(e){if(null==e&&(e=null),this.options=i(this.options,e),this.length=2*this.gauge.radius*this.gauge.options.radiusScale*this.options.length,this.strokeWidth=this.canvas.height*this.options.strokeWidth,this.maxValue=this.gauge.maxValue,this.minValue=this.gauge.minValue,this.animationSpeed=this.gauge.animationSpeed,this.options.angle=this.gauge.options.angle,this.options.iconPath)return this.img=new Image,this.img.src=this.options.iconPath},a.prototype.render=function(){var e,t,r,i,n,s,o,a,l;if(e=this.gauge.getAngle.call(this,this.displayedValue),a=Math.round(this.length*Math.cos(e)),l=Math.round(this.length*Math.sin(e)),s=Math.round(this.strokeWidth*Math.cos(e-Math.PI/2)),o=Math.round(this.strokeWidth*Math.sin(e-Math.PI/2)),t=Math.round(this.strokeWidth*Math.cos(e+Math.PI/2)),r=Math.round(this.strokeWidth*Math.sin(e+Math.PI/2)),this.ctx.beginPath(),this.ctx.fillStyle=this.options.color,this.ctx.arc(0,0,this.strokeWidth,0,2*Math.PI,!1),this.ctx.fill(),this.ctx.beginPath(),this.ctx.moveTo(s,o),this.ctx.lineTo(a,l),this.ctx.lineTo(t,r),this.ctx.fill(),this.img)return i=Math.round(this.img.width*this.options.iconScale),n=Math.round(this.img.height*this.options.iconScale),this.ctx.save(),this.ctx.translate(a,l),this.ctx.rotate(e+Math.PI/180*(90+this.options.iconAngle)),this.ctx.drawImage(this.img,-i/2,-n/2,i,n),this.ctx.restore()},r(l,o),l.prototype.elem=null,l.prototype.value=[20],l.prototype.maxValue=80,l.prototype.minValue=0,l.prototype.displayedAngle=0,l.prototype.displayedValue=0,l.prototype.lineWidth=40,l.prototype.paddingTop=.1,l.prototype.paddingBottom=.1,l.prototype.percentColors=null,l.prototype.options={colorStart:"#6fadcf",colorStop:void 0,gradientType:0,strokeColor:"#e0e0e0",pointer:{length:.8,strokeWidth:.035,iconScale:1},angle:.15,lineWidth:.44,radiusScale:1,limitMax:!1,limitMin:!1},l.prototype.setOptions=function(e){var t,r,i,n,s;for(null==e&&(e=null),l.__super__.setOptions.call(this,e),this.configPercentColors(),this.extraPadding=0,this.options.angle<0&&(n=Math.PI*(1+this.options.angle),this.extraPadding=Math.sin(n)),this.availableHeight=this.canvas.height*(1-this.paddingTop-this.paddingBottom),this.lineWidth=this.availableHeight*this.options.lineWidth,this.radius=(this.availableHeight-this.lineWidth/2)/(1+this.extraPadding),this.ctx.clearRect(0,0,this.canvas.width,this.canvas.height),r=0,i=(s=this.gp).length;r=s;r=0<=s?++i:--i)a=parseInt(n(this.options.percentColors[r][1]).substring(0,2),16),t=parseInt(n(this.options.percentColors[r][1]).substring(2,4),16),e=parseInt(n(this.options.percentColors[r][1]).substring(4,6),16),o.push(this.percentColors[r]={pct:this.options.percentColors[r][0],color:{r:a,g:t,b:e}});return o}},l.prototype.set=function(e){var t,r,i,n,s,o,l,c,d;for(e instanceof Array||(e=[e]),r=i=0,l=e.length-1;0<=l?i<=l:i>=l;r=0<=l?++i:--i)e[r]=this.parseValue(e[r]);if(e.length>this.gp.length)for(r=n=0,c=e.length-this.gp.length;0<=c?nc;r=0<=c?++n:--n)(t=new a(this)).setOptions(this.options.pointer),this.gp.push(t);else e.lengththis.maxValue?this.options.limitMax?d=this.maxValue:this.maxValue=d+1:d=a;n=0<=a?++s:--s)if(e<=this.percentColors[n].pct){!0===t?(l=this.percentColors[n-1]||this.percentColors[0],i=this.percentColors[n],o=(e-l.pct)/(i.pct-l.pct),r={r:Math.floor(l.color.r*(1-o)+i.color.r*o),g:Math.floor(l.color.g*(1-o)+i.color.g*o),b:Math.floor(l.color.b*(1-o)+i.color.b*o)}):r=this.percentColors[n].color;break}return"rgb("+[r.r,r.g,r.b].join(",")+")"},l.prototype.getColorForValue=function(e,t){var r;return r=(e-this.minValue)/(this.maxValue-this.minValue),this.getColorForPercentage(r,t)},l.prototype.renderTicks=function(e,t,r,i){var n,s,o,a,l,u,c,d,f,p,m,g,b,h,y,v,_,O,A,w;if(e!=={}){for(u=e.divisions||0,O=e.subDivisions||0,o=e.divColor||"#fff",h=e.subColor||"#fff",a=e.divLength||.7,v=e.subLength||.2,f=parseFloat(this.maxValue)-parseFloat(this.minValue),p=parseFloat(f)/parseFloat(e.divisions),y=parseFloat(p)/parseFloat(e.subDivisions),n=parseFloat(this.minValue),s=0+y,l=(d=f/400)*(e.divWidth||1),_=d*(e.subWidth||1),g=[],A=c=0,m=u+1;c0?g.push(function(){var e,t,r;for(r=[],e=0,t=O-1;ethis.maxValue&&(a=this.maxValue),p=this.radius*this.options.radiusScale,g.height&&(this.ctx.lineWidth=this.lineWidth*g.height,f=this.lineWidth/2*(g.offset||1-g.height),p=this.radius*this.options.radiusScale+f),this.ctx.strokeStyle=g.strokeStyle,this.ctx.beginPath(),this.ctx.arc(0,0,p,this.getAngle(l),this.getAngle(a),!1),this.ctx.stroke();else void 0!==this.options.customFillStyle?t=this.options.customFillStyle(this):null!==this.percentColors?t=this.getColorForValue(this.displayedValue,this.options.generateGradient):void 0!==this.options.colorStop?((t=0===this.options.gradientType?this.ctx.createRadialGradient(m,r,9,m,r,70):this.ctx.createLinearGradient(0,0,m,0)).addColorStop(0,this.options.colorStart),t.addColorStop(1,this.options.colorStop)):t=this.options.colorStart,this.ctx.strokeStyle=t,this.ctx.beginPath(),this.ctx.arc(m,r,u,(1+this.options.angle)*Math.PI,e,!1),this.ctx.lineWidth=this.lineWidth,this.ctx.stroke(),this.ctx.strokeStyle=this.options.strokeColor,this.ctx.beginPath(),this.ctx.arc(m,r,u,e,(2-this.options.angle)*Math.PI,!1),this.ctx.stroke(),this.ctx.save(),this.ctx.translate(m,r);for(this.options.renderTicks&&this.renderTicks(this.options.renderTicks,m,r,u),this.ctx.restore(),this.ctx.translate(m,r),n=0,o=(d=this.gp).length;n=0;n+=-1)i=a[n],u.elements.splice(i,1);return u.animId=t?null:requestAnimationFrame(u.run)}if(!1===e)return!0===u.animId&&cancelAnimationFrame(u.animId),u.animId=requestAnimationFrame(u.run)}};t.default=l},1271:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var i,n=r(24298),s=(i=r(58960))&&i.__esModule?i:{default:i};function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function a(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,a);return u(u({cursor:"pointer"},o),{},{children:t||o.children,active:o.active||o["aria-expanded"],hoverIndicator:i,padding:s})})).withConfig({displayName:"button__Button",componentId:"sc-oqiqxn-0"})(["border:initial;padding:",";height:auto;line-height:0;background:",";"," color:",";svg{fill:",";stroke:",";}"," &:hover{",";color:",";svg{fill:",";stroke:",";}}"],(function(e){return e.padding}),(function(e){var t=e.theme;return e.active?(0,s.getColor)("borderSecondary")({theme:t}):"initial"}),s.cursor,(function(e){var t=e.active,r=e.disabled,i=e.theme;return(0,s.getColor)(d({active:t,disabled:r}))({theme:i})}),(function(e){var t=e.active,r=e.disabled,i=e.theme;return e.stroked?"none":(0,s.getColor)(d({active:t,disabled:r}))({theme:i})}),(function(e){var t=e.active,r=e.disabled,i=e.theme;return e.stroked?(0,s.getColor)(d({active:t,disabled:r}))({theme:i}):"none"}),(function(e){var t=e.active,r=e.hoverIndicator;return(t||r)&&"\n border-radius: 4px;\n "}),(function(e){var t=e.theme,r=e.hoverIndicator,i=e.disabled;return r&&!i&&"background: "+(0,s.getColor)("mainChartTboxHover")({theme:t})+";"}),(function(e){var t=e.active,r=e.disabled,i=e.theme;return(0,s.getColor)(d({active:t,disabled:r}))({theme:i})}),(function(e){var t=e.theme,r=e.stroked,i=e.disabled;return r?"none":(0,s.getColor)(d({defaultColor:"text",disabled:i}))({theme:t})}),(function(e){var t=e.theme,r=e.stroked,i=e.disabled;return r?(0,s.getColor)(d({defaultColor:"text",disabled:i}))({theme:t}):"none"}));t.default=(0,o.withTooltip)(f)},3701:function(e,t,r){"use strict";t.__esModule=!0,t.default=t.Button=void 0;var i=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var i={__proto__:null},n=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var s in e)if("default"!==s&&Object.prototype.hasOwnProperty.call(e,s)){var o=n?Object.getOwnPropertyDescriptor(e,s):null;o&&(o.get||o.set)?Object.defineProperty(i,s,o):i[s]=e[s]}return i.default=e,r&&r.set(e,i),i}(r(67294)),n=l(r(2568)),s=r(66557),o=l(r(71896));t.Button=o.default;var a=["svg","size","width","height"];function l(e){return e&&e.__esModule?e:{default:e}}function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t
    `;return u}z.prototype.select=function(e){var t=e.selectedX,n=e.selectedPoints,r=e.selectedRow,i=e.dygraph.getOption("legend");if("never"!==i){var a=z.generateLegendHTML(e.dygraph,t,n,this.one_em_width_,r);if(a instanceof Node&&a.nodeType===Node.DOCUMENT_FRAGMENT_NODE?(this.legend_div_.innerHTML="",this.legend_div_.appendChild(a)):this.legend_div_.innerHTML=a,this.legend_div_.style.display="","follow"===i){var o,s=e.dygraph.plotter_.area,l=this.legend_div_.offsetWidth,c=e.dygraph.getOptionForAxis("axisLabelWidth","y"),u=e.dygraph.getHighlightSeries();u&&(o=n.find((e=>e.name===u)))||(o=n[0]);const t=e.dygraph.getNumericOption("legendFollowOffsetX"),r=e.dygraph.getNumericOption("legendFollowOffsetY");var d=o.x*s.w+t,h=o.y*s.h+r;d+l+1>s.w&&(d=d-2*t-l-(c-s.x)),this.legend_div_.style.left=c+d+"px",this.legend_div_.style.top=h+"px"}else if("onmouseover"===i&&this.is_generated_div_){s=e.dygraph.plotter_.area,l=this.legend_div_.offsetWidth;this.legend_div_.style.left=s.x+s.w-l-1+"px",this.legend_div_.style.top=s.y+"px"}}else this.legend_div_.style.display="none"},z.prototype.deselect=function(e){"always"!==e.dygraph.getOption("legend")&&(this.legend_div_.style.display="none");var t=function(e){var t=document.createElement("span");t.setAttribute("style","margin: 0; padding: 0 0 0 1em; border: 0;"),e.appendChild(t);var n=t.offsetWidth;return e.removeChild(t),n}(this.legend_div_);this.one_em_width_=t;var n=z.generateLegendHTML(e.dygraph,void 0,void 0,t,null);n instanceof Node&&n.nodeType===Node.DOCUMENT_FRAGMENT_NODE?(this.legend_div_.innerHTML="",this.legend_div_.appendChild(n)):this.legend_div_.innerHTML=n},z.prototype.didDrawChart=function(e){this.deselect(e)},z.prototype.predraw=function(e){if(this.is_generated_div_){e.dygraph.graphDiv.appendChild(this.legend_div_);var t=e.dygraph.plotter_.area,n=this.legend_div_.offsetWidth;this.legend_div_.style.left=t.x+t.w-n-1+"px",this.legend_div_.style.top=t.y+"px"}},z.prototype.destroy=function(){this.legend_div_=null},z.generateLegendHTML=function(e,t,n,i,a){var o,s={dygraph:e,x:t,i:a,series:[]},l={},c=e.getLabels();if(c)for(var u=1;u/g,">")),isVisible:d.visible,color:d.color};s.series.push(h),l[c[u]]=h}if("undefined"!==typeof t){var f=e.optionsViewForAxis_("x"),p=f("valueFormatter");s.xHTML=p.call(e,t,f,c[0],e,a,0);var g=[],v=e.numAxes();for(u=0;u":" "),n+=`${a.dashHTML} ${a.labelHTML}`)}return n}n=e.xHTML+":";for(i=0;i"),n+=` ${a.labelHTML}: ${a.yHTML}`}return n};var X=z,Z=function(){this.hasTouchInterface_="undefined"!=typeof TouchEvent,this.isMobileDevice_=/mobile|android/gi.test(navigator.appVersion),this.interfaceCreated_=!1};Z.prototype.toString=function(){return"RangeSelector Plugin"},Z.prototype.activate=function(e){return this.dygraph_=e,this.getOption_("showRangeSelector")&&this.createInterface_(),{layout:this.reserveSpace_,predraw:this.renderStaticLayer_,didDrawChart:this.renderInteractiveLayer_}},Z.prototype.destroy=function(){this.bgcanvas_=null,this.fgcanvas_=null,this.leftZoomHandle_=null,this.rightZoomHandle_=null},Z.prototype.getOption_=function(e,t){return this.dygraph_.getOption(e,t)},Z.prototype.setDefaultOption_=function(e,t){this.dygraph_.attrs_[e]=t},Z.prototype.createInterface_=function(){this.createCanvases_(),this.createZoomHandles_(),this.initInteraction_(),this.getOption_("animatedZooms")&&(console.warn("Animated zooms and range selector are not compatible; disabling animatedZooms."),this.dygraph_.updateOptions({animatedZooms:!1},!0)),this.interfaceCreated_=!0,this.addToGraph_()},Z.prototype.addToGraph_=function(){var e=this.graphDiv_=this.dygraph_.graphDiv;e.appendChild(this.bgcanvas_),e.appendChild(this.fgcanvas_),e.appendChild(this.leftZoomHandle_),e.appendChild(this.rightZoomHandle_)},Z.prototype.removeFromGraph_=function(){var e=this.graphDiv_;e.removeChild(this.bgcanvas_),e.removeChild(this.fgcanvas_),e.removeChild(this.leftZoomHandle_),e.removeChild(this.rightZoomHandle_),this.graphDiv_=null},Z.prototype.reserveSpace_=function(e){this.getOption_("showRangeSelector")&&e.reserveSpaceBottom(this.getOption_("rangeSelectorHeight")+4)},Z.prototype.renderStaticLayer_=function(){this.updateVisibility_()&&(this.resize_(),this.drawStaticLayer_())},Z.prototype.renderInteractiveLayer_=function(){this.updateVisibility_()&&!this.isChangingRange_&&(this.placeZoomHandles_(),this.drawInteractiveLayer_())},Z.prototype.updateVisibility_=function(){var e=this.getOption_("showRangeSelector");if(e)this.interfaceCreated_?this.graphDiv_&&this.graphDiv_.parentNode||this.addToGraph_():this.createInterface_();else if(this.graphDiv_){this.removeFromGraph_();var t=this.dygraph_;setTimeout((function(){t.width_=0,t.resize()}),1)}return e},Z.prototype.resize_=function(){function e(e,t,n,i){var a=i||r.getContextPixelRatio(t);e.style.top=n.y+"px",e.style.left=n.x+"px",e.width=n.w*a,e.height=n.h*a,e.style.width=n.w+"px",e.style.height=n.h+"px",1!=a&&t.scale(a,a)}var t=this.dygraph_.layout_.getPlotArea(),n=0;this.dygraph_.getOptionForAxis("drawAxis","x")&&(n=this.getOption_("xAxisHeight")||this.getOption_("axisLabelFontSize")+2*this.getOption_("axisTickSize")),this.canvasRect_={x:t.x,y:t.y+t.h+n+4,w:t.w,h:this.getOption_("rangeSelectorHeight")};var i=this.dygraph_.getNumericOption("pixelRatio");e(this.bgcanvas_,this.bgcanvas_ctx_,this.canvasRect_,i),e(this.fgcanvas_,this.fgcanvas_ctx_,this.canvasRect_,i)},Z.prototype.createCanvases_=function(){this.bgcanvas_=r.createCanvas(),this.bgcanvas_.className="dygraph-rangesel-bgcanvas",this.bgcanvas_.style.position="absolute",this.bgcanvas_.style.zIndex=9,this.bgcanvas_ctx_=r.getContext(this.bgcanvas_),this.fgcanvas_=r.createCanvas(),this.fgcanvas_.className="dygraph-rangesel-fgcanvas",this.fgcanvas_.style.position="absolute",this.fgcanvas_.style.zIndex=9,this.fgcanvas_.style.cursor="default",this.fgcanvas_ctx_=r.getContext(this.fgcanvas_)},Z.prototype.createZoomHandles_=function(){var e=new Image;e.className="dygraph-rangesel-zoomhandle",e.style.position="absolute",e.style.zIndex=10,e.style.visibility="hidden",e.style.cursor="col-resize",e.width=9,e.height=16,e.src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAQCAYAAADESFVDAAAAAXNSR0IArs4c6QAAAAZiS0dEANAAzwDP4Z7KegAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sHGw0cMqdt1UwAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAAaElEQVQoz+3SsRFAQBCF4Z9WJM8KCDVwownl6YXsTmCUsyKGkZzcl7zkz3YLkypgAnreFmDEpHkIwVOMfpdi9CEEN2nGpFdwD03yEqDtOgCaun7sqSTDH32I1pQA2Pb9sZecAxc5r3IAb21d6878xsAAAAAASUVORK5CYII=",this.isMobileDevice_&&(e.width*=2,e.height*=2),this.leftZoomHandle_=e,this.rightZoomHandle_=e.cloneNode(!1)},Z.prototype.initInteraction_=function(){var e,t,n,i,a,o,s,l,c,u,h,f,p,g,y=this,m=document,b=0,_=null,x=!1,w=!1,S=!this.isMobileDevice_,C=new v;e=function(e){var t=y.dygraph_.xAxisExtremes(),n=(t[1]-t[0])/y.canvasRect_.w;return[t[0]+(e.leftHandlePos-y.canvasRect_.x)*n,t[0]+(e.rightHandlePos-y.canvasRect_.x)*n]},t=function(e){return r.cancelEvent(e),x=!0,b=e.clientX,_=e.target?e.target:e.srcElement,"mousedown"!==e.type&&"dragstart"!==e.type||(r.addEvent(m,"mousemove",n),r.addEvent(m,"mouseup",i)),y.fgcanvas_.style.cursor="col-resize",C.cover(),!0},n=function(e){if(!x)return!1;r.cancelEvent(e);var t=e.clientX-b;if(Math.abs(t)<4)return!0;b=e.clientX;var n,i=y.getZoomHandleStatus_();_==y.leftZoomHandle_?(n=i.leftHandlePos+t,n=Math.min(n,i.rightHandlePos-_.width-3),n=Math.max(n,y.canvasRect_.x)):(n=i.rightHandlePos+t,n=Math.min(n,y.canvasRect_.x+y.canvasRect_.w),n=Math.max(n,i.leftHandlePos+_.width+3));var o=_.width/2;return _.style.left=n-o+"px",y.drawInteractiveLayer_(),S&&a(),!0},i=function(e){return!!x&&(x=!1,C.uncover(),r.removeEvent(m,"mousemove",n),r.removeEvent(m,"mouseup",i),y.fgcanvas_.style.cursor="default",S||a(),!0)},a=function(){try{var t=y.getZoomHandleStatus_();if(y.isChangingRange_=!0,t.isZoomed){var n=e(t);y.dygraph_.doZoomXDates_(n[0],n[1])}else y.dygraph_.resetZoom()}finally{y.isChangingRange_=!1}},o=function(e){var t=y.leftZoomHandle_.getBoundingClientRect(),n=t.left+t.width/2,r=(t=y.rightZoomHandle_.getBoundingClientRect()).left+t.width/2;return e.clientX>n&&e.clientX=y.canvasRect_.x+y.canvasRect_.w?i=(a=y.canvasRect_.x+y.canvasRect_.w)-o:(i+=t,a+=t);var s=y.leftZoomHandle_.width/2;return y.leftZoomHandle_.style.left=i-s+"px",y.rightZoomHandle_.style.left=a-s+"px",y.drawInteractiveLayer_(),S&&u(),!0},c=function(e){return!!w&&(w=!1,r.removeEvent(m,"mousemove",l),r.removeEvent(m,"mouseup",c),S||u(),!0)},u=function(){try{y.isChangingRange_=!0,y.dygraph_.dateWindow_=e(y.getZoomHandleStatus_()),y.dygraph_.drawGraph_(!1)}finally{y.isChangingRange_=!1}},h=function(e){if(!x&&!w){var t=o(e)?"move":"default";t!=y.fgcanvas_.style.cursor&&(y.fgcanvas_.style.cursor=t)}},f=function(e){"touchstart"==e.type&&1==e.targetTouches.length?t(e.targetTouches[0])&&r.cancelEvent(e):"touchmove"==e.type&&1==e.targetTouches.length?n(e.targetTouches[0])&&r.cancelEvent(e):i(e)},p=function(e){"touchstart"==e.type&&1==e.targetTouches.length?s(e.targetTouches[0])&&r.cancelEvent(e):"touchmove"==e.type&&1==e.targetTouches.length?l(e.targetTouches[0])&&r.cancelEvent(e):c(e)},g=function(e,t){for(var n=["touchstart","touchend","touchmove","touchcancel"],r=0;r1&&(p=h.rollingAverage(p,t.rollPeriod(),f,e)),d.push(p)}var g=[];for(e=0;e0)&&(_=Math.min(_,w),x=Math.max(x,w))}var S=.25;if(n)for(x=r.log10(x),x+=x*S,_=r.log10(_),e=0;ethis.canvasRect_.x||n+1=0;a--){var o=i[a][0];if(i[a][1].call(o,n),n.propagationStopped)break}return n.defaultPrevented},q.prototype.getPluginInstance_=function(e){for(var t=0;t!!e.valueRange)).indexOf(!0)>=0;if(null===e||void 0===e)return t||n;if("y"===e)return n;throw new Error(`axis parameter is [${e}] must be null, 'x' or 'y'.`)},q.prototype.toString=function(){var e=this.maindiv_;return"[Dygraph "+(e&&e.id?e.id:e)+"]"},q.prototype.attr_=function(e,t){return t?this.attributes_.getForSeries(e,t):this.attributes_.get(e)},q.prototype.getOption=function(e,t){return this.attr_(e,t)},q.prototype.getNumericOption=function(e,t){return this.getOption(e,t)},q.prototype.getStringOption=function(e,t){return this.getOption(e,t)},q.prototype.getBooleanOption=function(e,t){return this.getOption(e,t)},q.prototype.getFunctionOption=function(e,t){return this.getOption(e,t)},q.prototype.getOptionForAxis=function(e,t){return this.attributes_.getForAxis(e,t)},q.prototype.optionsViewForAxis_=function(e){var t=this;return function(n){var r=t.user_attrs_.axes;return r&&r[e]&&r[e].hasOwnProperty(n)?r[e][n]:("x"!==e||"logscale"!==n)&&("undefined"!=typeof t.user_attrs_[n]?t.user_attrs_[n]:(r=t.attrs_.axes)&&r[e]&&r[e].hasOwnProperty(n)?r[e][n]:"y"==e&&t.axes_[0].hasOwnProperty(n)?t.axes_[0][n]:"y2"==e&&t.axes_[1].hasOwnProperty(n)?t.axes_[1][n]:t.attr_(n))}},q.prototype.rollPeriod=function(){return this.rollPeriod_},q.prototype.xAxisRange=function(){return this.dateWindow_?this.dateWindow_:this.xAxisExtremes()},q.prototype.xAxisExtremes=function(){var e=this.getNumericOption("xRangePad")/this.plotter_.area.w;if(0===this.numRows())return[0-e,1+e];var t=this.rawData_[0][0],n=this.rawData_[this.rawData_.length-1][0];if(e){var r=n-t;t-=r*e,n+=r*e}return[t,n]},q.prototype.yAxisExtremes=function(){const e=this.gatherDatasets_(this.rolledSeries_,null),{extremes:t}=e,n=this.axes_;this.computeYAxisRanges_(t);const r=this.axes_;return this.axes_=n,r.map((e=>e.extremeRange))},q.prototype.yAxisRange=function(e){if("undefined"==typeof e&&(e=0),e<0||e>=this.axes_.length)return null;var t=this.axes_[e];return[t.computedValueRange[0],t.computedValueRange[1]]},q.prototype.yAxisRanges=function(){for(var e=[],t=0;t=this.rawData_.length||t<0||t>=this.rawData_[e].length?null:this.rawData_[e][t]},q.prototype.createInterface_=function(){var e=this.maindiv_;this.graphDiv=document.createElement("div"),this.graphDiv.style.textAlign="left",this.graphDiv.style.position="relative",e.appendChild(this.graphDiv),this.canvas_=r.createCanvas(),this.canvas_.style.position="absolute",this.canvas_.style.top=0,this.canvas_.style.left=0,this.hidden_=this.createPlotKitCanvas_(this.canvas_),this.canvas_ctx_=r.getContext(this.canvas_),this.hidden_ctx_=r.getContext(this.hidden_),this.resizeElements_(),this.graphDiv.appendChild(this.hidden_),this.graphDiv.appendChild(this.canvas_),this.mouseEventElement_=this.createMouseEventElement_(),this.layout_=new a(this);var t=this;if(this.mouseMoveHandler_=function(e){t.mouseMove_(e)},this.mouseOutHandler_=function(e){var n=e.target||e.fromElement,i=e.relatedTarget||e.toElement;r.isNodeContainedBy(n,t.graphDiv)&&!r.isNodeContainedBy(i,t.graphDiv)&&t.mouseOut_(e)},this.addAndTrackEvent(window,"mouseout",this.mouseOutHandler_),this.addAndTrackEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),!this.resizeHandler_){this.resizeHandler_=function(e){t.resize()},this.addAndTrackEvent(window,"resize",this.resizeHandler_),this.resizeObserver_=null;var n=this.getStringOption("resizable");if("undefined"===typeof ResizeObserver&&"no"!==n&&(console.error("ResizeObserver unavailable; ignoring resizable property"),n="no"),"horizontal"===n||"vertical"===n||"both"===n?e.style.resize=n:"passive"!==n&&(n="no"),"no"!==n){window.getComputedStyle(e).overflow;"visible"===window.getComputedStyle(e).overflow&&(e.style.overflow="hidden"),this.resizeObserver_=new ResizeObserver(this.resizeHandler_),this.resizeObserver_.observe(e)}}},q.prototype.resizeElements_=function(){this.graphDiv.style.width=this.width_+"px",this.graphDiv.style.height=this.height_+"px";var e=this.getNumericOption("pixelRatio"),t=e||r.getContextPixelRatio(this.canvas_ctx_);this.canvas_.width=this.width_*t,this.canvas_.height=this.height_*t,this.canvas_.style.width=this.width_+"px",this.canvas_.style.height=this.height_+"px",1!==t&&this.canvas_ctx_.scale(t,t);var n=e||r.getContextPixelRatio(this.hidden_ctx_);this.hidden_.width=this.width_*n,this.hidden_.height=this.height_*n,this.hidden_.style.width=this.width_+"px",this.hidden_.style.height=this.height_+"px",1!==n&&this.hidden_ctx_.scale(n,n)},q.prototype.destroy=function(){this.canvas_ctx_.restore(),this.hidden_ctx_.restore();for(var e=this.plugins_.length-1;e>=0;e--){var t=this.plugins_.pop();t.plugin.destroy&&t.plugin.destroy()}var n=function(e){for(;e.hasChildNodes();)n(e.firstChild),e.removeChild(e.firstChild)};this.removeTrackedEvents_(),r.removeEvent(window,"mouseout",this.mouseOutHandler_),r.removeEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),this.resizeObserver_&&(this.resizeObserver_.disconnect(),this.resizeObserver_=null),r.removeEvent(window,"resize",this.resizeHandler_),this.resizeHandler_=null,n(this.maindiv_);var i=function(e){for(var t in e)"object"===typeof e[t]&&(e[t]=null)};i(this.layout_),i(this.plotter_),i(this)},q.prototype.createPlotKitCanvas_=function(e){var t=r.createCanvas();return t.style.position="absolute",t.style.top=e.style.top,t.style.left=e.style.left,t.width=this.width_,t.height=this.height_,t.style.width=this.width_+"px",t.style.height=this.height_+"px",t},q.prototype.createMouseEventElement_=function(){return this.canvas_},q.prototype.setColors_=function(){var e=this.getLabels(),t=e.length-1;this.colors_=[],this.colorsMap_={};for(var n=this.getNumericOption("colorSaturation")||1,i=this.getNumericOption("colorValue")||.5,a=Math.ceil(t/2),o=this.getOption("colors"),s=this.visibility(),l=0;l{e.valueRange&&delete e.valueRange})),this.drawGraph_(),void(o&&o.call(this,r,i,this.yAxisRanges()));var s=null,l=null,c=null,u=null;e&&(s=this.xAxisRange(),l=[r,i]),t&&(c=this.yAxisRanges(),u=this.yAxisExtremes());const d=this;this.doAnimatedZoom(s,l,c,u,(function(){d.dateWindow_=null,d.axes_.forEach((e=>{e.valueRange&&delete e.valueRange})),o&&o.call(d,r,i,d.yAxisRanges())}))},q.prototype.doAnimatedZoom=function(e,t,n,i,a){var o,s,l=this.getBooleanOption("animatedZooms")?q.ANIMATION_STEPS:1,c=[],u=[];if(null!==e&&null!==t)for(o=1;o<=l;o++)s=q.zoomAnimationFunction(o,l),c[o-1]=[e[0]*(1-s)+s*t[0],e[1]*(1-s)+s*t[1]];if(null!==n&&null!==i)for(o=1;o<=l;o++){s=q.zoomAnimationFunction(o,l);for(var d=[],h=0;h=0;--d)for(var h=this.layout_.points[d],f=0;f=l.length)){var c=l[s];if(r.isValidPoint(c)){var u=c.canvasy;if(e>c.canvasx&&s+10)u+=(e-c.canvasx)/h*(d.canvasy-c.canvasy)}else if(e0){var h,f=l[s-1];if(r.isValidPoint(f))if((h=c.canvasx-f.canvasx)>0)u+=(c.canvasx-e)/h*(f.canvasy-c.canvasy)}(0===o||u=0){var o=0,s=this.attr_("labels");for(t=1;to&&(o=l)}var c=this.previousVerticalX_;n.clearRect(c-o-1,0,2*o+2,this.height_)}if(this.selPoints_.length>0){var u=this.selPoints_[0].canvasx;for(n.save(),t=0;t=0){e!=this.lastRow_&&(i=!0),this.lastRow_=e;for(var a=0;a=0&&s=0&&(i=!0),this.lastRow_=-1;if(this.selPoints_.length?this.lastx_=this.selPoints_[0].xval:this.lastx_=null,void 0!==t&&(this.highlightSet_!==t&&(i=!0),this.highlightSet_=t),void 0!==n&&(this.lockedSet_=n),i&&(this.updateSelection_(void 0),r)){var u=this.getFunctionOption("highlightCallback");if(u){u.call(this,{},this.lastx_,this.selPoints_,this.lastRow_,this.highlightSet_)}}return i},q.prototype.mouseOut_=function(e){this.getFunctionOption("unhighlightCallback")&&this.getFunctionOption("unhighlightCallback").call(this,e),this.getBooleanOption("hideOverlayOnMouseOut")&&!this.lockedSet_&&this.clearSelection()},q.prototype.clearSelection=function(){this.cascadeEvents_("deselect",{}),this.lockedSet_=!1,this.fadeLevel?this.animateSelection_(-1):(this.canvas_ctx_.clearRect(0,0,this.width_,this.height_),this.fadeLevel=0,this.selPoints_=[],this.lastx_=null,this.lastRow_=-1,this.highlightSet_=null)},q.prototype.getSelection=function(){if(!this.selPoints_||this.selPoints_.length<1)return-1;for(var e=0;e1&&(n=this.dataHandler_.rollingAverage(n,this.rollPeriod_,this.attributes_,t)),this.rolledSeries_.push(n)}this.drawGraph_();var r=new Date;this.drawingTimeMs_=r-e},q.PointType=void 0,q.stackPoints_=function(e,t,n,r){for(var i=null,a=null,o=null,s=-1,l=function(t){if(!(s>=t))for(var n=t;nn[1]&&(n[1]=f),f=1;n--)if(this.visibility()[n-1]){if(t){s=e[n];var h=t[0],f=t[1];for(i=null,a=null,r=0;r=h&&null===i&&(i=r),s[r][0]<=f&&(a=r);null===i&&(i=0);for(var p=i,g=!0;g&&p>0;)g=null===s[--p][1];null===a&&(a=s.length-1);var v=a;for(g=!0;g&&v0;){this.readyFns_.pop()(this)}},q.prototype.computeYAxes_=function(){var e,t,n;for(this.axes_=[],e=0;e0&&(v=0),y<0&&(y=0)),v==1/0&&(v=0),y==-1/0&&(y=1),0===(n=y-v)&&(0!==y?n=Math.abs(y):(y=1,n=1));var b=y,_=v;t&&(d?(b=y+a*n,_=v):((_=v-a*n)<0&&v>=0&&(_=0),(b=y+a*n)>0&&y<=0&&(b=0))),u.extremeRange=[_,b]}if(u.valueRange){var x=s(u.valueRange[0])?u.extremeRange[0]:u.valueRange[0],w=s(u.valueRange[1])?u.extremeRange[1]:u.valueRange[1];u.computedValueRange=[x,w]}else u.computedValueRange=u.extremeRange;if(!t){if((x=u.computedValueRange[0])===(w=u.computedValueRange[1]))if(0===x)w=1;else{var S=Math.abs(x/10);x-=S,w+=S}if(d){var C=a/(2*a-1),O=(a-1)/(2*a-1);u.computedValueRange[0]=r.logRangeFraction(x,w,C),u.computedValueRange[1]=r.logRangeFraction(x,w,O)}else n=w-x,u.computedValueRange[0]=x-n*a,u.computedValueRange[1]=w+n*a}if(f){u.independentTicks=f;var A=(k=this.optionsViewForAxis_("y"+(c?"2":"")))("ticker");u.ticks=A(u.computedValueRange[0],u.computedValueRange[1],this.plotter_.area.h,k,this),o||(o=u)}}if(void 0===o)throw'Configuration Error: At least one axis has to have the "independentTicks" option activated.';for(c=0;c0&&"e"!=e[n-1]&&"E"!=e[n-1]||e.indexOf("/")>=0||isNaN(parseFloat(e)))&&(t=!0),this.setXAxisOptions_(t)},q.prototype.setXAxisOptions_=function(e){e?(this.attrs_.xValueParser=r.dateParser,this.attrs_.axes.x.valueFormatter=r.dateValueFormatter,this.attrs_.axes.x.ticker=l.cc,this.attrs_.axes.x.axisLabelFormatter=r.dateAxisLabelFormatter):(this.attrs_.xValueParser=function(e){return parseFloat(e)},this.attrs_.axes.x.valueFormatter=function(e){return e},this.attrs_.axes.x.ticker=l.Dv,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter)},q.prototype.parseCSV_=function(e){var t,n,i=[],a=r.detectLineDelimiter(e),o=e.split(a||"\n"),s=this.getStringOption("delimiter");-1==o[0].indexOf(s)&&o[0].indexOf("\t")>=0&&(s="\t");var l=0;"labels"in this.user_attrs_||(l=1,this.attrs_.labels=o[0].split(s),this.attributes_.reparseSeries());for(var c,u=!1,d=this.attr_("labels").length,h=!1,f=l;f0&&v[0]0;)t=String.fromCharCode(65+(e-1)%26)+t.toLowerCase(),e=Math.floor((e-1)/26);return t},n=e.getNumberOfColumns(),i=e.getNumberOfRows(),a=e.getColumnType(0);if("date"==a||"datetime"==a)this.attrs_.xValueParser=r.dateParser,this.attrs_.axes.x.valueFormatter=r.dateValueFormatter,this.attrs_.axes.x.ticker=l.cc,this.attrs_.axes.x.axisLabelFormatter=r.dateAxisLabelFormatter;else{if("number"!=a)throw new Error("only 'date', 'datetime' and 'number' types are supported for column 1 of DataTable input (Got '"+a+"')");this.attrs_.xValueParser=function(e){return parseFloat(e)},this.attrs_.axes.x.valueFormatter=function(e){return e},this.attrs_.axes.x.ticker=l.Dv,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter}var o,s,c=[],u={},d=!1;for(o=1;o0&&m[0]0&&this.setAnnotations(y,!0),this.attributes_.reparseSeries()},q.prototype.cascadeDataDidUpdateEvent_=function(){this.cascadeEvents_("dataDidUpdate",{})},q.prototype.start_=function(){var e=this.file_;"function"==typeof e&&(e=e());const t=r.typeArrayLike(e);if("array"==t)this.rawData_=this.parseArray_(e),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("object"==t&&"function"==typeof e.getColumnRange)this.parseDataTable_(e),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("string"==t){if(r.detectLineDelimiter(e))this.loadedEvent_(e);else{var n;n=window.XMLHttpRequest?new XMLHttpRequest:new ActiveXObject("Microsoft.XMLHTTP");var i=this;n.onreadystatechange=function(){4==n.readyState&&(200!==n.status&&0!==n.status||i.loadedEvent_(n.responseText))},n.open("GET",e,!0),n.send(null)}}else console.error("Unknown data format: "+t)},q.prototype.updateOptions=function(e,t){"undefined"==typeof t&&(t=!1);var n=e.file,i=q.copyUserAttrs_(e),a=this.attributes_.numAxes();"rollPeriod"in i&&(this.rollPeriod_=i.rollPeriod),"dateWindow"in i&&(this.dateWindow_=i.dateWindow);var o=r.isPixelChangingOptionList(this.attr_("labels"),i);r.updateDeep(this.user_attrs_,i),this.attributes_.reparseSeries(),a=n.length?console.warn("Invalid series number in setVisibility: "+i):n[i]=e[i]);else for(i=0;i=n.length?console.warn("Invalid series number in setVisibility: "+i):n[i]=e[i]:e[i]<0||e[i]>=n.length?console.warn("Invalid series number in setVisibility: "+e[i]):n[e[i]]=t;this.predraw_()},q.prototype.size=function(){return{width:this.width_,height:this.height_}},q.prototype.setAnnotations=function(e,t){this.annotations_=e,this.layout_?(this.layout_.setAnnotations(this.annotations_),t||this.predraw_()):console.warn("Tried to setAnnotations before dygraph was ready. Try setting them in a ready() block. See dygraphs.com/tests/annotation.html")},q.prototype.annotations=function(){return this.annotations_},q.prototype.getLabels=function(){var e=this.attr_("labels");return e?e.slice():null},q.prototype.indexFromSetName=function(e){return this.setIndexByName_[e]},q.prototype.getRowForX=function(e){for(var t=0,n=this.numRows()-1;t<=n;){var r=n+t>>1,i=this.getValue(r,0);if(ie)n=r-1;else{if(t==r)return r;n=r}}return null},q.prototype.ready=function(e){this.is_initial_draw_?this.readyFns_.push(e):e.call(this,this)},q.prototype.addAndTrackEvent=function(e,t,n){r.addEvent(e,t,n),this.registeredEvents_.push({elem:e,type:t,fn:n})},q.prototype.removeTrackedEvents_=function(){if(this.registeredEvents_)for(var e=0;ee.y&&l>t.y?(l=Math.max(e.y,t.y),u=2*t.y-l):lt.y&&u>n.y?(u=Math.max(t.y,n.y),l=2*t.y-u):u1&&(a=window.devicePixelRatio,r.style.width=r.style.height=[t.size,"px"].join(""),r.width=r.height=t.size*a,i.scale(a,a)),i.translate(t.size/2,t.size/2),i.rotate((t.rotate/180-.5)*Math.PI);var o=(t.size-t.lineWidth)/2;t.scaleColor&&t.scaleLength&&(o-=t.scaleLength+2),Date.now=Date.now||function(){return+new Date};var s=function(e,t,n){var r=(n=Math.min(Math.max(-1,n||0),1))<=0;i.beginPath(),i.arc(0,0,o,0,2*Math.PI*n,r),i.strokeStyle=e,i.lineWidth=t,i.stroke()},l=function(){var e,n;i.lineWidth=1,i.fillStyle=t.scaleColor,i.save();for(var r=24;r>0;--r)r%6===0?(n=t.scaleLength,e=0):(n=.6*t.scaleLength,e=t.scaleLength-n),i.fillRect(-t.size/2+e,0,n,1),i.rotate(Math.PI/12);i.restore()},c=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||function(e){window.setTimeout(e,1e3/60)},u=function(){t.scaleColor&&l(),t.trackColor&&s(t.trackColor,t.trackWidth||t.lineWidth,1)};this.getCanvas=function(){return r},this.getCtx=function(){return i},this.clear=function(){i.clearRect(t.size/-2,t.size/-2,t.size,t.size)},this.draw=function(e){var r;t.scaleColor||t.trackColor?i.getImageData&&i.putImageData?n?i.putImageData(n,0,0):(u(),n=i.getImageData(0,0,t.size*a,t.size*a)):(this.clear(),u()):this.clear(),i.lineCap=t.lineCap,r="function"===typeof t.barColor?t.barColor(e):t.barColor,s(r,t.lineWidth,e/100)}.bind(this),this.animate=function(e,n){var r=Date.now();t.onStart(e,n);var i=function(){var a=Math.min(Date.now()-r,t.animate.duration),o=t.easing(this,a,e,n-e,t.animate.duration);this.draw(o),t.onStep(e,n,o),a>=t.animate.duration?t.onStop(e,n):c(i)}.bind(this);c(i)}.bind(this)};return function(t,n){var r={barColor:"#ef1e25",trackColor:"#f9f9f9",scaleColor:"#dfe0e0",scaleLength:5,lineCap:"round",lineWidth:3,trackWidth:void 0,size:110,rotate:0,animate:{duration:1e3,enabled:!0},easing:function(e,t,n,r,i){return(t/=i/2)<1?r/2*t*t+n:-r/2*(--t*(t-2)-1)+n},onStart:function(e,t){},onStep:function(e,t,n){},onStop:function(e,t){}};if("undefined"!==typeof e)r.renderer=e;else{if("undefined"===typeof SVGRenderer)throw new Error("Please load either the SVG- or the CanvasRenderer");r.renderer=SVGRenderer}var i={},a=0,o=function(){for(var e in this.el=t,this.options=i,r)r.hasOwnProperty(e)&&(i[e]=n&&"undefined"!==typeof n[e]?n[e]:r[e],"function"===typeof i[e]&&(i[e]=i[e].bind(this)));"string"===typeof i.easing&&"undefined"!==typeof jQuery&&jQuery.isFunction(jQuery.easing[i.easing])?i.easing=jQuery.easing[i.easing]:i.easing=r.easing,"number"===typeof i.animate&&(i.animate={duration:i.animate,enabled:!0}),"boolean"!==typeof i.animate||i.animate||(i.animate={duration:1e3,enabled:i.animate}),this.renderer=new i.renderer(t,i),this.renderer.draw(a),t.dataset&&t.dataset.percent?this.update(parseFloat(t.dataset.percent)):t.getAttribute&&t.getAttribute("data-percent")&&this.update(parseFloat(t.getAttribute("data-percent")))}.bind(this);this.update=function(e){return e=parseFloat(e),i.animate.enabled?this.renderer.animate(a,e):this.renderer.draw(e),a=e,this}.bind(this),this.disableAnimation=function(){return i.animate.enabled=!1,this},this.enableAnimation=function(){return i.animate.enabled=!0,this},o()}}()}.apply(t,[]))||(e.exports=n)},92806:function(e){"use strict";e.exports=function(e,t){for(var n={},r=Object.keys(e),i=Array.isArray(t),a=0;a>1;if(t>0&&"number"!==typeof e[0])throw new Error("Expected coords to contain numbers.");this.coords=e;const n=Math.max(2*t-5,0);this._triangles=new Uint32Array(3*n),this._halfedges=new Int32Array(3*n),this._hashSize=Math.ceil(Math.sqrt(t)),this._hullPrev=new Uint32Array(t),this._hullNext=new Uint32Array(t),this._hullTri=new Uint32Array(t),this._hullHash=new Int32Array(this._hashSize).fill(-1),this._ids=new Uint32Array(t),this._dists=new Float64Array(t),this.update()}update(){const{coords:e,_hullPrev:t,_hullNext:n,_hullTri:a,_hullHash:o}=this,l=e.length>>1;let d=1/0,h=1/0,f=-1/0,p=-1/0;for(let r=0;rf&&(f=t),n>p&&(p=n),this._ids[r]=r}const g=(d+f)/2,v=(h+p)/2;let y,m,b,_=1/0;for(let r=0;r0&&(m=r,_=t)}let S=e[2*m],C=e[2*m+1],O=1/0;for(let r=0;rr&&(t[n++]=i,r=this._dists[i])}return this.hull=t.subarray(0,n),this.triangles=new Uint32Array(0),void(this.halfedges=new Uint32Array(0))}if((0,r.IW)(x,w,S,C,A,k)<0){const e=m,t=S,n=C;m=b,S=A,C=k,b=e,A=t,k=n}const T=function(e,t,n,r,i,a){const o=n-e,s=r-t,l=i-e,c=a-t,u=o*o+s*s,d=l*l+c*c,h=.5/(o*c-s*l);return{x:e+(c*u-s*d)*h,y:t+(o*d-l*u)*h}}(x,w,S,C,A,k);this._cx=T.x,this._cy=T.y;for(let r=0;r0&&Math.abs(d-s)<=i&&Math.abs(h-c)<=i)continue;if(s=d,c=h,l===y||l===m||l===b)continue;let f=0;for(let e=0,t=this._hashKey(d,h);e=0;)if(g=p,g===f){g=-1;break}if(-1===g)continue;let v=this._addTriangle(g,l,n[g],-1,-1,a[g]);a[l]=this._legalize(v+2),a[g]=v,D++;let _=n[g];for(;p=n[_],(0,r.IW)(d,h,e[2*_],e[2*_+1],e[2*p],e[2*p+1])<0;)v=this._addTriangle(_,l,p,a[l],-1,a[_]),a[l]=this._legalize(v+2),n[_]=_,D--,_=p;if(g===f)for(;p=t[g],(0,r.IW)(d,h,e[2*p],e[2*p+1],e[2*g],e[2*g+1])<0;)v=this._addTriangle(p,l,g,-1,a[g],a[p]),this._legalize(v+2),a[p]=v,n[g]=g,D--,g=p;this._hullStart=t[l]=g,n[g]=t[_]=l,n[l]=_,o[this._hashKey(d,h)]=l,o[this._hashKey(e[2*g],e[2*g+1])]=g}this.hull=new Uint32Array(D);for(let r=0,i=this._hullStart;r0?3-n:1+n)/4}(e-this._cx,t-this._cy)*this._hashSize)%this._hashSize}_legalize(e){const{_triangles:t,_halfedges:n,coords:r}=this;let i=0,o=0;for(;;){const s=n[e],c=e-e%3;if(o=c+(e+2)%3,-1===s){if(0===i)break;e=a[--i];continue}const u=s-s%3,d=c+(e+1)%3,h=u+(s+2)%3,f=t[o],p=t[e],g=t[d],v=t[h];if(l(r[2*f],r[2*f+1],r[2*p],r[2*p+1],r[2*g],r[2*g+1],r[2*v],r[2*v+1])){t[e]=v,t[s]=f;const r=n[h];if(-1===r){let t=this._hullStart;do{if(this._hullTri[t]===h){this._hullTri[t]=e;break}t=this._hullPrev[t]}while(t!==this._hullStart)}this._link(e,r),this._link(s,n[o]),this._link(o,h);const l=u+(s+1)%3;i=n&&t[e[o]]>a;)e[o+1]=e[o--];e[o+1]=r}else{let i=n+1,a=r;d(e,n+r>>1,i),t[e[n]]>t[e[r]]&&d(e,n,r),t[e[i]]>t[e[r]]&&d(e,i,r),t[e[n]]>t[e[i]]&&d(e,n,i);const o=e[i],s=t[o];for(;;){do{i++}while(t[e[i]]s);if(a=a-n?(u(e,t,i,r),u(e,t,n,a-1)):(u(e,t,n,a-1),u(e,t,i,r))}}function d(e,t,n){const r=e[t];e[t]=e[n],e[n]=r}function h(e){return e[0]}function f(e){return e[1]}}}]); \ No newline at end of file diff --git a/web/gui/v2/8099.4838cb5caf85574b5c9d.js.LICENSE.txt b/web/gui/v2/8099.4838cb5caf85574b5c9d.js.LICENSE.txt new file mode 100644 index 00000000000000..1d685db67590b8 --- /dev/null +++ b/web/gui/v2/8099.4838cb5caf85574b5c9d.js.LICENSE.txt @@ -0,0 +1,20 @@ +/** + * @license + * Copyright 2011 Dan Vanderkam (danvdk@gmail.com) + * MIT-licenced: https://opensource.org/licenses/MIT + */ + +/** + * @license + * Part of dygraphs, see top-level LICENSE.txt file + * MIT-licenced: https://opensource.org/licenses/MIT + */ + +/**! + * easy-pie-chart + * Lightweight plugin to render simple, animated and retina optimized pie charts + * + * @license + * @author Robert Fleischmann (http://robert-fleischmann.de) + * @version 2.1.7 + **/ diff --git a/web/gui/v2/8102.3b2b80fe00d67e577367.chunk.js b/web/gui/v2/8102.3b2b80fe00d67e577367.chunk.js new file mode 100644 index 00000000000000..3d10f828877456 --- /dev/null +++ b/web/gui/v2/8102.3b2b80fe00d67e577367.chunk.js @@ -0,0 +1 @@ +!function(){try{var C="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(C._sentryDebugIds=C._sentryDebugIds||{},C._sentryDebugIds[t]="3e126cf7-e4e8-4dd2-9b0c-0efdfac74681",C._sentryDebugIdIdentifier="sentry-dbid-3e126cf7-e4e8-4dd2-9b0c-0efdfac74681")}catch(C){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8102],{48102:function(C,t,e){e.r(t),e.d(t,{default:function(){return g}});var n=e(29439),i=e(67294),l=e(93416),a=e(92432),o=e(87854),d=e.n(o),r=e(95348),c=e.n(r),p=new(d())({id:"terms.svg",use:"terms.svg-usage",viewBox:"0 0 240 240",content:'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'}),f=(c().add(p),p),H=function(){return i.createElement("svg",{height:"240px",width:"240px",viewBox:f.viewBox},i.createElement("use",{xlinkHref:"#".concat(f.id)}))},s=e(78312),V=e(97096),h=function(C){var t=C.checked,e=C.error,n=C.onChange,a=C.tagging,o=C.children;return i.createElement(l.Flex,{alignItems:"center","data-testid":"termsAndConditions"},i.createElement(s.P2,{checked:t,onChange:n,error:e,"data-ga":"signinup::click-checkbox::".concat(a),"data-testid":"termsAndConditions-checkbox"}),o||i.createElement(l.TextBig,null,"By signing up, you agree to the Netdata ",i.createElement(V.w,{tagging:a})," and"," ",i.createElement(V.F,{tagging:a})))},E=e(13477),u=e(46667),g=function(){var C=(0,E.ec)("termsAccepted",{shouldPersist:!0}),t=(0,n.Z)(C,2),e=t[0],o=t[1],d=(0,u.Z)(),r=(0,n.Z)(d,2),c=r[0],p=r[1],f=(0,i.useCallback)((function(){(0,a.L)("","","","","","terms-accept"),o(c)}),[c]);return e?null:i.createElement(l.ConfirmationDialog,{confirmLabel:"Accept","data-ga":"accept-terms-dialog","data-testid":"acceptTermsDialog",handleConfirm:f,hideIcon:!0,hideDecline:!0,isConfirmPositive:!0,isConfirmDisabled:!c,message:i.createElement(l.Flex,{gap:4,alignItems:"center",column:!0},i.createElement(H,null),i.createElement(h,{checked:c,onChange:p,tagging:"modal-view"})),title:"To use Netdata you need to read and accept our terms and conditions"})}},78312:function(C,t,e){e.d(t,{Fg:function(){return o},P2:function(){return r},Sn:function(){return c},U5:function(){return d},Yb:function(){return p},xG:function(){return a}});var n=e(71893),i=e(93416),l=e(16772),a=n.default.div.withConfig({displayName:"styled__SvgContainer",componentId:"sc-16ytcl4-0"})(["width:42px;height:42px;flex-shrink:0;display:flex;justify-content:center;align-items:center;border-radius:2px;background:white;"]),o=n.default.a.withConfig({displayName:"styled__StyledLink",componentId:"sc-16ytcl4-1"})(["display:inline-flex;align-items:center;text-decoration:none;color:",";cursor:pointer;&:hover{text-decoration:underline;color:"," !important;}&:visited{color:",";}> svg{fill:",";padding-right:",";}"],(0,i.getColor)("success"),(0,i.getColor)("success"),(0,i.getColor)("success"),(0,i.getColor)("main"),(0,i.getSizeBy)(1)),d=(0,n.default)(l.Z).withConfig({displayName:"styled__EmailInput",componentId:"sc-16ytcl4-2"})(["",""],(function(C){return C.isLastSignInMethod?"border: 2px solid green;":""})),r=(0,n.default)(i.Checkbox).withConfig({displayName:"styled__StyledCheckbox",componentId:"sc-16ytcl4-3"})(["margin:0 "," 0 0;& div:last-child{border-color:",";}"],(0,i.getSizeBy)(2),(function(C){return C.error&&(0,i.getColor)("error")})),c=(0,n.default)(i.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-16ytcl4-4"})(["&&{height:44px;}"]),p=(0,n.default)(i.Flex).attrs((function(C){var t=C.gap;return{column:!0,gap:void 0===t?8:t,alignSelf:"center",padding:[0,0,8,0],border:{side:"bottom",color:"disabled"},width:{max:"320px"}}})).withConfig({displayName:"styled__FormContainer",componentId:"sc-16ytcl4-5"})(["width:100%;"])},97096:function(C,t,e){e.d(t,{F:function(){return d},w:function(){return o}});e(92222);var n=e(67294),i=e(78312),l={link:"https://www.netdata.cloud/terms",title:"Terms And Conditions",dataGa:"signinup::click-terms::"},a=function(C){var t=C.link,e=C.title,l=C.dataGa;return function(C){var a=C.tagging;return n.createElement(i.Fg,{href:t,target:"_blank",rel:"noopener noreferrer","data-ga":"".concat(l).concat(a)},e)}},o=a({link:"https://www.netdata.cloud/privacy",title:"Privacy Policy",dataGa:"signinup::click-privacy::"}),d=a(l)}}]); \ No newline at end of file diff --git a/web/gui/v2/8276.cb877f5ee79184a9e0fb.chunk.js b/web/gui/v2/8276.cb877f5ee79184a9e0fb.chunk.js new file mode 100644 index 00000000000000..e14e3784fd0605 --- /dev/null +++ b/web/gui/v2/8276.cb877f5ee79184a9e0fb.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="48e99b0e-10dd-42e5-9cd5-7a8355cab813",e._sentryDebugIdIdentifier="sentry-dbid-48e99b0e-10dd-42e5-9cd5-7a8355cab813")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8276],{51091:function(e,t,n){"use strict";var a=n(87462),r=n(45987),l=(n(57327),n(41539),n(88449),n(2490),n(59849),n(21249),n(57640),n(9924),n(92222),n(71893)),o=n(10928),i=n.n(o),c=n(67294),s=n(93416),u=["items","isBig","showBackButton","testid"],m=(0,l.default)(s.Box).withConfig({displayName:"breadcrumbs__StyledItemContainer",componentId:"sc-3u39st-0"})([""]),d=(0,l.css)(["&:hover{color:",";}"],(function(e){var t=e.withHover,n=e.theme;return t&&(0,s.getColor)("success")({theme:n})})),p=(0,l.default)(s.Text).withConfig({displayName:"breadcrumbs__StyledText",componentId:"sc-3u39st-1"})(["",""],d),g=(0,l.default)(s.TextSmall).withConfig({displayName:"breadcrumbs__StyledTextSmall",componentId:"sc-3u39st-2"})(["",""],d);t.Z=function(e){var t=e.items,n=e.isBig,l=e.showBackButton,o=void 0===l||l,d=e.testid,f=void 0===d?"":d,b=(0,r.Z)(e,u),h=(0,c.useMemo)((function(){return null!==t&&void 0!==t&&t.length&&o?i()(t.filter((function(e){return!!e.onClick}))).onClick:null}),[t,o]);if(null===t||void 0===t||!t.length)return null;var E=n?p:g;return c.createElement(s.Flex,(0,a.Z)({gap:4},b),o&&c.createElement(s.Button,{onClick:h,icon:"chevron_left",label:"Back",neutral:!0,flavour:"hollow",small:!0,padding:[0,2,0,1],textTransform:"uppercase","data-testid":"".concat(f,"-breadcrumbs-backButton")}),c.createElement(s.Flex,{gap:2,alignItems:"center"},t.map((function(e,t){var n=e.isDisabled,r=e.name,l=e.onClick;return c.createElement(m,(0,a.Z)({key:t,alignItems:"center"},l&&{cursor:"pointer",onClick:l},{"data-testid":"".concat(f,"-breadcrumbs-level-").concat(t)}),c.createElement(E,{color:n&&"textLite","data-testid":"".concat(f,"-breadcrumbs-level-").concat(t),withHover:!!l},0!==t&&" / ",r))}))))}},80959:function(e,t,n){"use strict";var a=n(87462),r=n(45987),l=n(67294),o=n(93416),i=["message","title","footer"];t.Z=function(e){var t=e.message,n=e.title,c=e.footer,s=(0,r.Z)(e,i),u=(null===s||void 0===s?void 0:s["data-testid"])||"functionError";return l.createElement(o.Flex,(0,a.Z)({alignItems:"center",column:!0,"data-testid":u,flex:!0,gap:3,justifyContent:"center",padding:[0,20]},s),l.createElement(o.H3,{"data-testid":"".concat(u,"-title")},n),l.createElement(o.TextBig,{color:"textDescription","data-testid":"".concat(u,"-message")},t),c)}},27308:function(e,t,n){"use strict";var a=n(87462),r=n(67294),l=n(79655),o=n(93416),i=n(91008),c=n(33335),s=n(99826);t.Z=function(e){var t=(0,c.gI)("billing:ReadAll"),n=(0,s.Z)().url;return n?r.createElement(o.Flex,{background:"sideBarMini",border:{side:"all",color:"border"},padding:[1,2],round:!0},r.createElement(i.Z,(0,a.Z)({align:"bottom",as:l.rU,boxProps:{as:o.Flex},color:"text",Component:o.TextMicro,content:"Upgrade your plan in order to use this feature",disabled:!t,hoverColor:"textFocus",showToolTip:!0,strong:!0,to:n},e),"Upgrade now!")):null}},36412:function(e,t,n){"use strict";n.d(t,{Z:function(){return S}});var a=n(45987),r=n(67294),l=n(87462),o=(n(82526),n(41817),n(93416)),i=n(82351),c=["description","isRequired","title"],s=function(e){var t=e.description,n=e.isRequired,l=e.title,s=(0,a.Z)(e,c),u=s["data-testid"]?"".concat(s["data-testid"],"-label"):"fieldLabel";return r.createElement(o.Flex,{"data-testid":"".concat(u,"-container"),gap:1},r.createElement(o.TextSmall,{color:"textLite","data-testid":u},l,n&&" *"),t&&r.createElement(i.Z,{align:"top",content:t,"data-testid":"".concat(u,"-info"),plain:!0},r.createElement(o.Icon,{color:"textLite",name:"information",size:"small"})))},u=["data-testid","description","isRequired","onChange","title"],m=function(e){var t=e["data-testid"],n=void 0===t?"input":t,i=e.description,c=e.isRequired,m=e.onChange,d=e.title,p=(0,a.Z)(e,u);return r.createElement(o.Flex,{column:!0,"data-testid":n,flex:"grow",gap:1},r.createElement(s,{"data-testid":n,description:i,isRequired:c,title:d}),r.createElement(o.TextInput,(0,l.Z)({"data-testid":"".concat(n,"-field"),size:"tiny",onChange:function(e){var t=e.target;return m(t.value)}},p)))},d=(n(21249),n(57640),n(9924),n(26833),n(92222),["data-testid","description","getDataGa","fields","id","isRequired","onChange","secrets","setSecrets","title"]),p=["getValue","id","onChange"],g=function(e){var t,n=e["data-testid"],i=void 0===n?"select":n,c=e.description,u=e.getDataGa,m=e.fields,g=e.id,f=e.isRequired,b=e.onChange,h=e.secrets,E=e.setSecrets,v=e.title,y=(0,a.Z)(e,d);return r.createElement(o.Flex,{column:!0,"data-testid":i,flex:"grow",gap:1},r.createElement(s,{"data-testid":i,description:c,isRequired:f,title:v}),r.createElement(o.Select,(0,l.Z)({"data-testid":"".concat(i,"-field"),menuPortalTarget:document.body,onChange:function(e){return b(e)},styles:{size:"tiny"},menuPlacement:"auto"},y)),Object.values((null===m||void 0===m?void 0:m[null===(t=y.value)||void 0===t?void 0:t.value])||{}).map((function(e){var t=e.getValue,n=e.id,o=e.onChange,c=(0,a.Z)(e,p);return r.createElement(T,(0,l.Z)({"data-ga":u("".concat(g,"-").concat(n,"-").concat(c.component)),"data-testid":"".concat(i,"-").concat(n),key:n,id:n,onChange:o({id:g,setSecrets:E,subsetId:n}),value:t({id:g,secrets:h,subsetId:n})},c))})))},f=n(4942),b=n(29439),h=(n(85827),n(41539),n(25387),n(2490),n(72608),n(47941),n(69720),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(57557)),E=n.n(h),v=n(73955),y=n.n(v),x=n(31790),w=n(25185);function C(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function O(e){for(var t=1;t span{font-weight:bold;margin-left:4px;}"]),m=(0,r.default)(l.TextSmall).withConfig({displayName:"styled__LearnMoreTest",componentId:"sc-wc4x41-3"})(["position:relative;"]),d=(0,r.default)(l.ModalContent).attrs({background:"dropdown",height:{base:150,max:150,min:45},overflow:{vertical:"auto"},width:{base:121,max:140,min:70}}).withConfig({displayName:"styled__ModalContent",componentId:"sc-wc4x41-4"})(["box-shadow:0 18px 28px rgb(9 30 66 / 15%),0 0 1px rgb(9 30 66 / 31%);"]),p=(0,r.default)(l.ModalCloseButton).attrs({color:"text",height:"16px",width:"16px"}).withConfig({displayName:"styled__ModalClose",componentId:"sc-wc4x41-5"})(["&:hover{fill:",";}"],(0,l.getColor)("selected")),g=(0,r.default)(l.Flex).attrs((function(e){return i(i({},e.hasBorder?{border:{side:"bottom",color:"disabled"}}:{}),{},{column:!0,padding:[3,4]})})).withConfig({displayName:"styled__ModalSection",componentId:"sc-wc4x41-6"})([""]),f=(0,r.default)(l.Icon).attrs({name:"warning_triangle",height:"18px",width:"18px",color:["yellow","amber"]}).withConfig({displayName:"styled__WarningIcon",componentId:"sc-wc4x41-7"})(["position:absolute;top:0;right:0;z-index:1;"]),b=(0,r.default)(l.Icon).attrs({height:"12px",width:"12px",color:"white"}).withConfig({displayName:"styled__SystemIcon",componentId:"sc-wc4x41-8"})(["position:relative;top:2px;"])},62195:function(e,t,n){"use strict";var a=n(29439),r=(n(41539),n(88674),n(17727),n(67294)),l=n(50308),o=n.n(l),i=n(93416),c=n(36285),s=n(93017),u=n(39979),m=n(54005),d=n(63346),p=n(46667),g=n(23154),f=(0,u.Z)(i.Button);t.Z=function(e){var t=e.onConfirm,n=e.onDecline,l=void 0===n?o():n,u=e.onCancellingEnd,b=void 0===u?o():u,h=(0,c.Zn)(),E=(0,s.Z)(),v=(0,a.Z)(E,2),y=v[0],x=v[1],w=(0,g.Z)().id,C=(0,m.Z)(),O=C.sendLog,P=C.isReady,Z=(0,p.Z)(),T=(0,a.Z)(Z,4),S=T[0],k=T[2],I=T[3],j=(0,r.useCallback)((function(){k(),h({productId:w}).then((function(){y({header:"Successfully canceled subscription",text:"You are now on Community plan"}),O({feature:"TrialOptOut",isSuccess:!0})})).catch((function(){x({header:"Failed to cancel the subscription",text:"Remained on Business plan"}),O({feature:"TrialOptOut",isFailure:!0,error:"Failed to cancel the subscription"})})).finally((function(){b(),I()}))}),[w,P]),F=(0,r.useCallback)((function(){t?t():j()}),[t,j]);return r.createElement(i.Modal,{backdropProps:{backdropBlur:!0}},r.createElement(d.ZP,{feature:"TrialOptOut"},r.createElement(i.ModalContent,{width:{base:140}},r.createElement(i.ModalHeader,null,r.createElement(i.Flex,{gap:2,alignItems:"center"},r.createElement(i.H4,null,"Go to Community plan"))),r.createElement(i.ModalBody,null,S?r.createElement(i.Flex,{height:"100px"},r.createElement(i.TextBig,null,"Changing billing plan...")):r.createElement(i.Flex,{gap:2,column:!0},r.createElement(i.H3,null,"Are You Sure?"),r.createElement(i.TextBig,null,"It looks like you have chosen to opt-out of your free 30-day business trial. Are you sure you do not want to experience all the features Netdata has to offer?"),r.createElement(i.TextBig,null,"By opting out, you will switch to the community plan immediately."))),r.createElement(i.ModalFooter,null,r.createElement(i.Flex,{justifyContent:"end",gap:4,padding:[1,2]},r.createElement(f,{feature:"TrialOptOut",label:"Yes, I am sure!",flavour:"hollow",small:!0,onClick:F,disabled:!w||S,textTransform:""}),r.createElement(f,{feature:"TrialOptOut",label:"No, I want the trial!",small:!0,onClick:l,disabled:S,textTransform:""}))))))}},96009:function(e,t,n){"use strict";n.d(t,{Bk:function(){return o},NY:function(){return r},_j:function(){return l},fe:function(){return i},v8:function(){return a}});var a={default:"successBackground",warning:"warningBackground",critical:"errorBackground"},r={default:{background:"successSemi",border:"success"},warning:{background:"warningSemi",border:"warning"},critical:{background:"errorSemi",border:"error"}},l=[30,15,3,2,1],o="dismissedTrialWelcome",i="dismissedTrialWarningDate"},66987:function(e,t,n){"use strict";var a=n(87462),r=n(67294),l=n(93416),o=n(50308),i=n.n(o),c=n(60511),s={banner:{color:"main"},sidebar:{lineHeight:"1.6",color:"main"},freePlanUpgrade:{lineHeight:"1.6",color:"main"},billing:{color:"textLite"}};t.Z=function(e){var t=e.flavour,n=e.onUpdateClick,o=void 0===n?i():n,u=(0,c.Z)(),m=u.daysRemaining,d=u.canUpgrade,p=u.trialEndsAt,g=(0,r.useMemo)((function(){return{isBanner:"banner"==t,isSidebar:"sidebar"==t,isBilling:"billing"==t,isFreePlanUpgrade:"freePlanUpgrade"==t}}),[t]);return r.createElement(l.Flex,{justifyContent:g.isBilling?"start":"center",alignItems:"center",width:"100%",gap:2},g.isFreePlanUpgrade?r.createElement(l.Text,s[t],"Upgrade your plan for unlimited access and Business features."):r.createElement(l.Text,s[t],"You have ",r.createElement(l.Text,(0,a.Z)({strong:!0},s[t]),"".concat(m," days"))," ","left to explore all the features of Netdata Business."," ",g.isBilling&&r.createElement(r.Fragment,null,"Trial ends at"," ",r.createElement(l.Text,(0,a.Z)({strong:!0},s[t]),p),"."," "),g.isBanner&&d?r.createElement(l.Box,{"data-testid":"upgrade-to-business-banner",onClick:o,as:l.Text,cursor:"pointer",textDecoration:"underline",color:"main"},"Consider upgrading for unlimited access."):r.createElement(r.Fragment,null,"Consider upgrading for unlimited access.")))}},29292:function(e,t,n){"use strict";var a=n(45987),r=n(67294),l=n(50308),o=n.n(l),i=n(93416),c=n(54005),s=["flavour","onOptOutClick"],u={default:"Or you can opt to downgrade immediately",billing:"Or you can opt to downgrade immediately"};t.Z=function(e){var t=e.flavour,n=void 0===t?"default":t,l=e.onOptOutClick,m=void 0===l?o():l,d=(0,a.Z)(e,s),p=(0,c.Z)(),g=p.sendLog,f=p.isReady,b=(0,r.useCallback)((function(){m(),g({feature:"TrialOptOut",isStart:!0})}),[f]);return r.createElement(i.Text,d,"After the trial, you'll automatically switch to the free Community plan."," ",r.createElement(i.Box,{"data-testid":"upgrade-to-business-banner",onClick:b,as:i.Text,cursor:"pointer",textDecoration:"underline",color:"primary"},u[n]),".")}},60511:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var a=n(29439),r=n(67294),l=n(33335),o=n(33937),i=n(13477),c=(n(41539),n(64211),n(2490),n(41874),n(33926)),s=n(36285),u=n(80699),m=n(96009),d=n(38314),p=n(14808),g=function(e){var t=new Date(e||void 0).toLocaleDateString();return"Invalid Date"!==t?t:null},f=function(){var e,t=(0,s.RP)(),n=t.loaded,f=t.value,b=t.refresh,h=(0,u.Z)().isFailure,E=f||{},v=E.slug,y=E.trialEndsAt,x=(0,r.useMemo)((function(){return function(e){if(!e)return null;var t=new Date(e)-new Date;return Math.ceil(t/864e5)}(y)}),[y]),w=n&&!!y,C=(0,l.gI)("billing:Manage"),O=(0,o.pI)("trialModalDismissed"),P=(0,a.Z)(O,1)[0],Z=p.Z3&&C&&(w||h)&&!localStorage.getItem(m.Bk)&&!P,T=(0,r.useState)(localStorage.getItem(m.fe)),S=(0,a.Z)(T,2),k=S[0],I=S[1],j=(0,r.useMemo)((function(){return p.Z3&&w}),[w]),F=(0,r.useState)(),D=(0,a.Z)(F,2),A=D[0],M=D[1],N=(e=x)>15?"default":e>5?"warning":"critical",B=!(0,i.Iy)("isAnonymous")&&C;return(0,r.useEffect)((function(){var e=function(e,t,n,a){if(t<0)return!1;var r=new Date(e||void 0);if("Invalid Date"==r)return!1;var l=new Date(a||void 0);if("Invalid Date"==l)return!0;var o=(0,c.Z)({start:l,end:r}).days;return n.some((function(e){return e>=t&&e1&&void 0!==b[1]?b[1]:{}).onSuccess,s=i.onFail,t.next=3,n.getPromise((0,E.Ad)({id:e,key:"ids"}));case 3:return u=t.sent,m=l.map((function(e){return e.id})),d=u.filter((function(e){return!m.includes(e)})),a((0,E.Ad)({id:e,key:"ids"}),d),t.next=9,Promise.all(d.map((function(e){return n.getPromise((0,k.n2)({id:e}))})));case 9:return p=t.sent,t.prev=10,t.next=13,F(e,m);case 13:a(I.IV,(0,j.rZ)(p)),l.forEach((function(t){var n=t.id,a=t.slug;r((0,k.n2)({id:n})),r((0,I.xh)([e,a]))})),c&&c(),l.length>1?(0,T.$T)("Rooms were successfully deleted from Space!"):(g=(0,o.Z)(l,1),f=g[0],(0,T.$T)("Room ".concat(f.name," was successfully deleted!"))),t.next=23;break;case 19:t.prev=19,t.t0=t.catch(10),a((0,E.Ad)({id:e,key:"ids"}),u),s&&s(t.t0);case 23:case"end":return t.stop()}}),t,null,[[10,19]])})));return function(e){return t.apply(this,arguments)}}()}),[e])}(t),c=(0,h.gI)("space:Delete"),s=(0,h.gI)("room:Create"),u=function(e,t){if(e){var n=Array.isArray(e)?e.map((function(e){return{id:e.id,name:e.name}})):[{id:e.id,name:e.name}];i(n,{onSuccess:t.resetRowSelection})}},m=function(e,t){var n=t.length;return n?1===n?(0,D.CE)(t[0].name):(0,D.CE)(n):""},d=function(e,t){var r=t.length;if(!r)return"";var l=M(M({},1===r?{name:t[0].name}:{roomsLength:r}),{},{spaceName:n});return a.createElement(D.M5,l)},p=(0,r.TH)().pathname,g=(0,r.s0)(),f=(0,a.useCallback)((function(e){return g("".concat(p,"/").concat(e))}),[p,g]),b=(0,a.useMemo)((function(){return{goto:{handleAction:function(e){var t=e.slug;return f(t)},icon:"chevron_right",tooltipText:"Room settings"},delete:{confirmLabel:"Yes, delete",confirmationMessage:function(e){return a.createElement(D.M5,{name:e.name,spaceName:n})},confirmationTitle:function(e){return(0,D.R4)(e.name)},dataGa:function(e){var t=e.slug;return"manage-rooms::click-delete::".concat(t)},declineLabel:"Cancel",handleAction:u,isDisabled:function(e){return e.untouchable||!c},tooltipText:"Delete room"}}}),[c,f]);return{bulkActions:(0,a.useMemo)((function(){return{addEntry:{dataGa:"manage-rooms::click::add-room",handleAction:l,isDisabled:!s,tooltipText:"Create room"},delete:{confirmationMessage:d,confirmationTitle:m,confirmLabel:"Yes, delete",dataGa:"manage-rooms::delete-bulk",declineLabel:"Cancel",handleAction:u,isDisabled:!c,tooltipText:"Delete rooms"}}}),[s,c,d,m]),rowActions:b,onClickRow:f}};function B(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function L(e){for(var t=1;t1&&void 0!==u[1]?u[1]:{}).onSuccess,i=l.onError,t.next=3,n.getPromise((0,ie.WA)({id:e,key:"ids"}));case 3:return c=t.sent,s=c.filter((function(e){return!e.includes(r)})),a((0,ie.WA)({id:e,key:"ids"}),s),t.prev=6,t.next=9,(0,Je.Sz)(e,r);case 9:o&&o(),t.next=16;break;case 12:t.prev=12,t.t0=t.catch(6),a((0,ie.WA)({id:e,key:"ids"}),c),i&&i();case 16:case"end":return t.stop()}}),t,null,[[6,12]])})));return function(e){return t.apply(this,arguments)}}()}),[e])}(t),l=function(e){return(0,Z._8)((function(t){var n=t.snapshot,a=t.set;return function(){var t=(0,C.Z)(P().mark((function t(r,l){var o,i,s,u,m,d,p,g=arguments;return P().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return i=(o=g.length>2&&void 0!==g[2]?g[2]:{}).onSuccess,s=o.onError,t.next=3,n.getPromise((0,ce.W3)(r));case 3:return u=t.sent,t.next=6,Promise.all(r.map(function(){var t=(0,C.Z)(P().mark((function t(a){var r;return P().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return t.next=2,n.getPromise((0,ie.ZN)({id:a,spaceId:e}));case 2:return r=t.sent,t.abrupt("return",{mId:a,role:r});case 4:case"end":return t.stop()}}),t)})));return function(e){return t.apply(this,arguments)}}()));case 6:return m=t.sent,d=l.role,p=(0,c.Z)(l,et),r.forEach((function(e){a((0,ce.mX)({id:e}),(function(e){return at(at({},e),p)}))})),d&&r.forEach((function(t){a((0,ie.ZN)({id:t,spaceId:e}),d)})),t.prev=10,t.next=13,rt(e,r,l);case 13:i&&i(),t.next=21;break;case 16:t.prev=16,t.t0=t.catch(10),u.forEach((function(e){var t=e.id,n=(0,c.Z)(e,tt);return a((0,ce.mX)({id:t}),at({id:t},n))})),d&&m.forEach((function(t){var n=t.mId,r=t.role;return a((0,ie.ZN)({id:n,spaceId:e}),r)})),s&&s();case 21:case"end":return t.stop()}}),t,null,[[10,16]])})));return function(e,n){return t.apply(this,arguments)}}()}),[e])}(t),o=(0,h.gI)("space:InviteUser"),s=(0,ct.HK)(),u=(0,a.useCallback)((function(e){var t=e.role,n=e.members;l(n,{role:t},{onSuccess:s})}),[]),m=function(e){return function(t){if(e){var n=Array.isArray(e)?e.map((function(e){return e.user.id})):[null===e||void 0===e?void 0:e.user.id];u({role:t,members:n})}}},d=function(e,t){if(e){var n=Array.isArray(e)?e.map((function(e){return e.user.id})):[null===e||void 0===e?void 0:e.user.id];r(n,{onSuccess:t.resetRowSelection})}},p=(0,a.useCallback)((function(){return"Delete User"}),[]),g=(0,a.useCallback)((function(e){return a.createElement(a.Fragment,null,"You are about to delete ",a.createElement("strong",null,e.name),".",a.createElement("br",null),"Are you sure you want to continue?")}),[]),f=(0,a.useCallback)((function(e,t){var n=t.length;return n?1===n?"Delete User":"Delete Users":""}),[]),b=(0,a.useCallback)((function(e,t){var n=t.length;return n?a.createElement(a.Fragment,null,"You are about to delete"," ",a.createElement("strong",null,1===n?t[0].name:"".concat(t.length," users")),".",a.createElement("br",null),"Are you sure you want to continue?"):""}),[]);return{rowActions:(0,a.useMemo)((function(){return{userSettings:{CustomUIAction:function(e){var t=e.data,n=(0,c.Z)(e,st);return a.createElement(it,(0,i.Z)({ids:[t.user.id]},n))},handleAction:m,tooltipText:"Change Role",isDisabled:function(e){return e.disabled},disabledTooltipText:"Changing roles is disabled"},delete:{handleAction:d,confirmationTitle:p,confirmationMessage:g,isDisabled:function(e){return e.disabled},disabledTooltipText:"Delete is disabled"}}}),[]),bulkActions:(0,a.useMemo)((function(){return{addEntry:{handleAction:n,tooltipText:"Invite user",isDisabled:function(){return!o},disabledTooltipText:"Invitations are disabled"},userSettings:{CustomUIAction:function(e){var t=e.data,n=(0,c.Z)(e,ut);return a.createElement(it,(0,i.Z)({ids:t.map((function(e){return e.user.id}))},n))},handleAction:m,tooltipText:"Change Roles",disabledTooltipText:"Changing roles is disabled"},delete:{handleAction:d,confirmationTitle:f,confirmationMessage:b,disabledTooltipText:"Delete is disabled"}}}),[o,f,b])}},dt=function(){var e=(0,v.th)(),t=(0,v.vu)(e,"name"),n=(0,ie.Qk)(),r=(0,je.Iy)("id"),l=(0,y.Z)(),i=(0,o.Z)(l,4),c=i[0],s=i[2],u=i[3],m=(0,a.useState)(""),d=(0,o.Z)(m,2),p=d[0],g=d[1],f=(0,a.useState)([]),b=(0,o.Z)(f,2),E=b[0],x=b[1],w=mt({selectedRows:E,spaceId:e,startIsInviting:s}),C=w.rowActions,O=w.bulkActions,P=(0,h.gI)("space:RemoveUser"),Z=(0,h.gI)("user:ChangeRoles"),T=(0,h.M2)(),S=P||Z,k=(0,a.useMemo)((function(){return function(e){var t=e.fromRolePermissions,n=e.userList,a=e.canModifyUser,r=e.currentUserId;return n.reduce((function(e,n){var l=r===(null===n||void 0===n?void 0:n.id),o=t.includes(n.role);return e.push({name:n.name,email:n.email,user:{avatarURL:n.avatarURL,deactivated:n.deactivated,name:n.name,email:n.email,id:n.id},type:n.role,disabled:!o||!a||l}),e}),[])}({userList:n,currentUserId:r,canModifyUser:S,fromRolePermissions:T})}),[n]);return{columns:$e,spaceName:t,members:n,currentUserId:r,data:k,isInviting:c,globalFilter:p,rowActions:C,bulkActions:O,columnVisibility:{email:!1,name:!1},onRowSelected:x,setGlobalFilter:g,startIsInviting:s,stopIsInviting:u,canModifyUser:S}},pt=function(e){var t=dt(),n=t.columns,r=t.spaceName,l=t.data,o=t.isInviting,c=t.rowActions,s=t.bulkActions,u=t.columnVisibility,m=t.stopIsInviting,p=t.setGlobalFilter,g=t.onRowSelected;return a.createElement(_.ZP,{tab:"Users"},a.createElement(d.Flex,(0,i.Z)({column:!0,height:"100%",overflow:"hidden"},e),a.createElement(d.H3,null,"Members of ",r),a.createElement(d.TextSmall,{margin:[1,0,3]},"Learn more about Netdata role-based access model on"," ",a.createElement(Oe.Z,{href:Pe.R,target:"_blank",rel:"noopener noreferrer",Component:d.TextSmall},"our documentation")),a.createElement(d.Table,{onSearch:p,data:l,dataColumns:n,enableSorting:!0,enableSelection:!0,onRowSelected:g,bulkActions:s,rowActions:c,columnVisibility:u,testPrefixCallback:function(e){return e.name}})),o&&a.createElement(Ke.l,{onClose:m,isSubmodal:!0}))},gt=n(33582),ft=n(43656),bt=(0,a.memo)((function(e){var t=(0,v.th)(),n=(0,v.OS)("name"),r=(0,ft.Z)().nodes;(0,gt.Z)(t);var l="Connect Nodes to ".concat(n);return a.createElement(_.ZP,{tab:"Nodes"},a.createElement(d.Flex,(0,i.Z)({column:!0,"data-testid":"manageClaimedNodes",height:"100%",overflow:"hidden",gap:3},e),a.createElement(d.H3,{"data-testid":"manageClaimedNodes-header"},l),a.createElement($.Z,{showClaimNodeOnEmptySpace:!0,showClaimModalWithRoomSelection:!0,enableSelection:!0,customNodes:r,isSpace:!0})))})),ht=bt,Et=(n(91058),n(77184)),vt=n(45736),yt=n(3689),xt=function(e){var t=e.name,n=e.spaceName;return a.createElement(a.Fragment,null,"You are about to delete ",a.createElement("strong",null,t)," channel from ",a.createElement("strong",null,n)," space.",a.createElement("br",null),"This cannot be undone. Are you sure you want to continue?")},wt=n(31790),Ct=(n(64211),n(41874),n(26833),n(25185)),Ot=["integration","testId"],Pt=["testId","tooltipContent"],Zt=["data-testid","enabled","id","integration","isAvailable","name","spaceId","tooltipContent"];function Tt(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function St(e){for(var t=1;t1?e.filter((function(e){return e.value})):e;G(n),H(a)}},options:B,placeholder:"Select rooms",title:"Rooms",value:S}),a.createElement(zt.Z,{component:"select","data-ga":j("notification-select"),"data-testid":"".concat(Qt,"-notifications"),isRequired:!0,onChange:function(e){var t=e.value;U(t)},options:Object.values(yt.Nq),placeholder:"Select notifications",title:"Notifications",value:yt.Nq[g]}))),a.createElement(Ct.QD,{gap:2,"data-testid":"".concat(Qt,"-dynamicFields"),hasBorder:!0},a.createElement(d.Text,{"data-testid":"".concat(Qt,"-dynamicFields-header")},"Integration configuration"),a.createElement(d.Flex,{column:!0,"data-testid":"".concat(Qt,"-dynamicFields-body"),gap:3},Object.values(y).map((function(e){var t=e.id,n=e.getValue,r=e.onChange,l=(0,c.Z)(e,Yt);return a.createElement(zt.Z,(0,i.Z)({"data-ga":j("".concat(t,"-").concat(l.component)),"data-testid":"".concat(Qt,"-").concat(t),getDataGa:j,key:t,id:t,onChange:null===r||void 0===r?void 0:r({id:t,setRequiredSubsets:Q,setSecrets:W}),secrets:k,setSecrets:W,value:null===n||void 0===n?void 0:n({id:t,secrets:k})},l))}))))),a.createElement(Ct.QD,{alignItems:"end","data-testid":"".concat(Qt,"-footer"),justifyContent:"center"},a.createElement(d.Button,{"data-ga":j("new"===n?"create-click":"edit-click"),"data-testid":"".concat(Qt,"-confirmButton"),disabled:!ee,label:"OK",onClick:ne,textTransform:"uppercase"}))))},$t=["data-testid"],Jt=function(e){var t=e["data-testid"],n=void 0===t?"channelList":t,r=(0,c.Z)(e,$t),l=Wt(),o=l.bulkActions,s=l.channelData,u=l.currentChannelId,m=l.columns,p=l.dataGa,g=l.onFilter,f=l.onModalClose,b=l.rowActions,h=l.search,E=l.setSearch;return a.createElement(_.ZP,{tab:"Notifications::Channels"},a.createElement(d.Flex,(0,i.Z)({column:!0,"data-testid":n,gap:4,height:"100%",width:"100%",margin:[3,0]},r),a.createElement(d.Table,{bulkActions:o,data:s,dataColumns:m,dataGa:p,globalFilter:h,globalFilterFn:g,onSearch:E,rowActions:b,testPrefix:"channelList",testPrefixCallback:function(e){return e.name}}),!!u&&a.createElement(Xt,{"data-testid":"editChannelModal",id:u,onClose:f})))},en=n(58591);function tn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function nn(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{};return[(0,d.useInputValue)({maxChars:20,value:e.name||""})||{},(0,d.useInputValue)({maxChars:50,value:e.description||""})||{}]},Tn=n(14809),Sn=n(54131),kn=n(15418),In=["nameInput","descriptionInput"],jn=function(e){var t=e.nameInput,n=e.descriptionInput,r=(0,c.Z)(e,In),l=(0,o.Z)(t,4),s=l[0],u=l[1],m=l[2],p=l[3],g=(0,o.Z)(n,4),f=g[0],b=g[1],h=g[2],E=g[3],v=(0,a.useMemo)((function(){return(0,Sn.x3)(s)}),[s]),y=(0,a.useMemo)((function(){return(0,Sn.uB)(f)}),[f]);return a.createElement(d.Flex,(0,i.Z)({column:!0,gap:4},r),a.createElement(kn.Z,{Component:d.TextInput,permission:"space:UpdateMeta",label:"Name",value:s,onChange:u,error:Sn.Qj[v],isDirty:p,instantFeedback:"all",fieldIndicator:m,"data-testid":"textInputs-spaceName",containerStyles:{width:{base:150}}}),a.createElement(kn.Z,{Component:d.TextInput,permission:"space:UpdateMeta",label:"Description",value:f,onChange:b,error:Sn.Qj[y],isDirty:E,instantFeedback:"all",fieldIndicator:h,"data-testid":"textInputs-spaceDescription",containerStyles:{width:{base:150}}}))},Fn=n(57387),Dn=["id"],An=function(e){var t=e.id,n=(0,c.Z)(e,Dn);return a.createElement(d.Flex,(0,i.Z)({column:!0,gap:1},n),a.createElement(d.Text,{strong:!0},"Space Id"),a.createElement(Fn.ZP,{confirmationText:"Space ID copied to your clipboard."},t))},Mn=n(72671),Nn=n(20428),Bn=["nameInput","descriptionInput","onClose"],Ln=function(e){var t=e.nameInput,n=e.descriptionInput,r=e.onClose,l=(0,c.Z)(e,Bn),s=(0,v.OS)(),u=(0,Nn.Z)(s.id),m=(0,a.useState)(!1),p=(0,o.Z)(m,2),g=p[0],f=p[1],b=(0,o.Z)(t,1)[0],h=(0,o.Z)(n,1)[0],E=function(){r(),f(!1)},y=function(){return f(!1)},x=(0,a.useCallback)((function(){f(!0),u({name:b,description:h},{onSuccess:E,onFail:y})}),[b,h]),w=(0,a.useMemo)((function(){var e=(0,Sn.x3)(b),t=(0,Sn.uB)(h);return!!(g||e||t)||s.name===b&&(s.description===h||!s.description&&!h)}),[s,b,h,g]);return a.createElement(d.Button,(0,i.Z)({label:"SAVE",onClick:x,isLoading:g,disabled:w,"data-testid":"saveSpace-button"},l))},Rn=["onClose"],_n=(0,a.memo)((function(e){var t=e.onClose,n=(0,c.Z)(e,Rn),r=(0,v.OS)(),l=(0,v.GM)("ids"),s=(0,y.Z)(),u=(0,o.Z)(s,4),m=u[0],p=u[2],g=u[3],f=Zn({name:r.name,description:r.description}),b=(0,o.Z)(f,2),h=b[0],E=b[1];if(!r.id)return null;var x=1===l.filter((function(e){return!(0,Pn.Ly)(e)})).length;return a.createElement(_.ZP,{tab:"Info"},a.createElement(d.Flex,(0,i.Z)({column:!0,"data-testid":"manageSpace",flex:"grow",justifyContent:"between",padding:[0,0,6,0],width:{max:150}},n),a.createElement(d.Flex,{column:!0,"data-testid":"manageSpace-settings"},a.createElement(d.H3,{margin:[0,0,4,0]},"Info"),a.createElement(d.Flex,{column:!0,gap:4},a.createElement(jn,{"data-testid":"manageSpace-nameInputs",nameInput:h,descriptionInput:E}),a.createElement(An,{"data-testid":"manageSpace-spaceIdInput",id:r.id,width:{base:150}}),a.createElement(d.TextInput,{label:"Your role in space",value:(0,Qe.fm)(r.roleInSpace),disabled:!0,containerStyles:{width:{base:150}}}),a.createElement(d.TextInput,{label:"Plan",value:(0,Qe.db)(r.planName),disabled:!0,containerStyles:{width:{base:150}}}))),a.createElement(d.Flex,{"data-testid":"manageSpace-actions",justifyContent:"between",alignItems:"center"},a.createElement(d.Flex,{"data-testid":"manageSpace-deleteLeaveActions",gap:4},a.createElement(Xe.Z,{align:"top",content:x&&On.T$.leave,isBasic:!0,stretch:"align"},a.createElement(d.Flex,{alignItems:"center"},a.createElement(d.Button,{"data-ga":"manage-space-tab::click-leave-space::manage-space-modal","data-testid":"manageSpace-leave",disabled:x,icon:"switch_off",flavour:"borderless",label:"Leave space",neutral:!0,padding:[0],width:"fit-content",onClick:p}))),a.createElement(Mn.Z,{"data-testid":"manageSpace-delete",id:r.id,isLastSpace:x,name:r.name,onClose:t})),a.createElement(Ln,{"data-testid":"manageSpace-save",nameInput:h,descriptionInput:E,onClose:t}))),m&&a.createElement(Tn.Z,{id:r.id,name:r.name,onClose:g}))}),(function(){return!0})),Un=n(51091);function Vn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function Hn(e){for(var t=1;t0?{id:"space",name:"Space",price:ma.format(s),qty:1,month:d,total:ma.format(i)}:null;return n?[isNaN(m)?null:{id:"nodes",name:"Nodes",price:ma.format(c),qty:u||0,month:d,total:ma.format(m)},e].filter(Boolean):[e]}),[n,s,i,m,d]);return a.createElement(a.Fragment,null,!g&&a.createElement(a.Fragment,null,a.createElement(va,{dataColumns:ha,data:f}),a.createElement(Kn,null)),a.createElement(ya,{needsCommitment:n,price:t,spaceTotal:i,commitment:u,currentCommitment:r,onlyCommitment:l,balance:o,nodesTotal:m,zeroSubscriptionTotal:g,checkoutIsDisabled:p}))},wa=n(93433);n(23157);function Ca(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function Oa(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{}).title,t=void 0===e?"Checkout":e;return function(e){return function(n){return a.createElement(g.u6,{onClose:n.onClose},a.createElement(f.x,{onClose:n.onClose,title:t}),a.createElement(d.Flex,{column:!0,padding:[4,4,0,4],justifyContent:"between",height:"calc(100vh - 60px)",overflow:"auto"},a.createElement(e,n)))}}};function Ya(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}var qa=3e5,Ka=Ga({title:"Update plan"})((function(e){var t=e.title,n=e.onClose,r=e.prices,c=e.currentPlan,s=void 0===c?{}:c,u=e.children,m=za({prices:r,currentPlan:s}),p=m.recurringIndex,g=m.setRecurringIndex,f=m.price,b=m.needsCommitment,E=m.commitment,v=m.handleCommitmentChange,y=(0,a.useState)(""),x=(0,o.Z)(y,2),w=x[0],C=x[1],O=(0,a.useState)(0),P=(0,o.Z)(O,2),Z=P[0],T=P[1],S=(0,a.useState)(qa),k=(0,o.Z)(S,2),I=k[0],j=k[1],F=(0,h.gI)("billing:Manage"),D=(0,Gn.fG)(function(e){for(var t=1;t0;return 0==r?a.createElement(d.TextSmall,null,"You are using all your committed nodes."):l?a.createElement(d.TextSmall,null,"You are using"," ",a.createElement(d.TextSmall,{strong:!0,color:"error"},r," ",(0,Ja.V6)(r).toLowerCase()," more")," ","than your committed nodes."):a.createElement(d.TextSmall,null,"You are using"," ",a.createElement(d.TextSmall,{strong:!0,color:"success"},t," out of ",n)," ","committed ",(0,Ja.V6)(n).toLowerCase(),".")},rr=function(e){var t=e.interval,n=e.currentPeriodTo,r=e.committedNodes,l=e.nodesLastPeriod,o=e.showPromotion,i=e.cancelling,c=e.onCancelPlan,s=(0,Xa.rA)().localeDateString;return a.createElement(d.Flex,{column:!0,gap:1},!!n&&a.createElement(d.TextSmall,null,a.createElement(d.TextSmall,{strong:!0,"data-testid":"billingPricingDetails-interval"},"Billing ".concat(Aa.go[t]))," ","(renews ",s(new Date(n),{long:!1}),")"),o&&"month"===t&&a.createElement(Oe.Z,{onClick:c,disabled:i},a.createElement(nr,null,"Save 25% by changing your billing frequency to yearly")),"year"===t&&a.createElement(a.Fragment,null,a.createElement(d.TextSmall,null,"Committed Nodes:"," ",a.createElement(d.Text,{strong:!0,"data-testid":"billingPricingDetails-committedNodesNumber"},r)),"number"===typeof l&&a.createElement(ar,{nodesLastPeriod:l,committedNodes:r})))},lr=function(e){var t=e.features,n=e.showViewDetails;return a.createElement(d.Flex,{column:!0,gap:1},t.map((function(e){return a.createElement(d.Flex,{gap:2,key:e},a.createElement(d.Box,{width:5},a.createElement(d.Icon,{name:"check",width:"20px",height:"20px",color:"primary"})),a.createElement(d.TextSmall,null,e))})),n&&a.createElement(d.Flex,{gap:2},a.createElement(d.Box,{width:5}),a.createElement(Oe.Z,{Component:d.Flex,as:on.rU,cursor:"pointer",alignItems:"center",gap:1,color:"text",hoverColor:"textFocus",to:"all-plans"},a.createElement(d.Text,{strong:!0},"View full details"),a.createElement(d.Icon,{name:"chevron_right",width:"16px",height:"16px"}))))},or=function(e){return function(t){var n=t.inModal,r=t.recurringIndex,l=t.setRecurringIndex,o=(0,c.Z)(t,er);return n?a.createElement(d.Flex,{width:"100%",justifyContent:"between",alignItems:"center"},a.createElement(e,o),a.createElement(d.Flex,{gap:3,padding:[0,10]},Aa.FC.map((function(e,t){return a.createElement(d.RadioButton,{key:e,checked:r===t,onChange:function(){return l(t)},"data-testid":"billingPaidPlans-".concat(e,"-radioButton")},a.createElement(d.Text,{color:"textDescription"},(0,Qe.fm)(Aa.go[e])))})))):a.createElement(e,o)}}((function(e){var t=e.children;return a.createElement(d.H3,{"data-testid":"billingPricingDetails-activePlanName"},t)})),ir=function(e){var t,n=e.slug,r=e.version,l=e.prices,o=e.recurringIndex,c=void 0===o?0:o,s=e.setRecurringIndex,u=e.currentPlan,m=void 0===u?{}:u,p=e.isActive,g=e.showAllDetails,f=e.showTeaserDetails,b=e.cancelling,h=e.onCancelPlan,E=e.title,v=e.features,y=e.nodesLastPeriod,x=e.inModal,w=(0,Fa.Z)().onTrial,C=p?m.pricing:(null===(t=l[c])||void 0===t?void 0:t.pricing)||{},O=(0,Qn.BT)(n);return a.createElement(d.Flex,{column:!0,gap:2},a.createElement(d.Flex,{column:!0},a.createElement(d.Flex,{alignItems:"center",gap:2},a.createElement(or,{inModal:x,recurringIndex:c,setRecurringIndex:s},E),p&&a.createElement(a.Fragment,null,a.createElement(d.Pill,{flavour:"success","data-testid":"active-plan",icon:"checkmark_s"},w?"Trial":"Active"),w&&!g&&a.createElement($a.Z,null,a.createElement(d.TextBig,{color:"primary"},"Upgrade Now!")))),r&&a.createElement(d.TextBig,null,"(",r,")")),(g||!p||"free"===n)&&a.createElement(tr,(0,i.Z)({recurringIndex:c,slug:n},C)),f&&!(null===v||void 0===v||!v[r])&&a.createElement(lr,{features:v[r],showViewDetails:!O}),p&&!(0,Qn.BT)(n)&&!w&&a.createElement(rr,(0,i.Z)({},m,{showPromotion:!0,cancelling:b,onCancelPlan:h,nodesLastPeriod:y})))},cr=n(16294),sr=n(80697),ur=n(66987),mr=n(29292),dr=n(23154),pr=["slug","version","prices","isSmall","isActive","currentPlan","showAllPlans","showTeaserDetails","allPlansView","nodesLastPeriod"];function gr(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function fr(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{}).label;T({label:e},!0)}),[T,S]),M=(0,Qn.$s)({currentPlan:m,slug:t,version:n,onTrial:k}),N=j||u&&(0,Qn.BT)(t)||"earlyBird"==m.slug&&"free"==t||"free"==m.slug&&"2023.02"==m.version&&"free"==t&&"2023.11"==n,B=(0,a.useState)({}),L=(0,o.Z)(B,2),R=L[0],_=L[1],U=(0,y.Z)(),V=(0,o.Z)(U,4),H=V[0],W=V[2],z=V[3],G=(0,we.Z)(),Y=(0,o.Z)(G,2),q=Y[0],K=Y[1],Q=(0,Gn.Zn)(),X="cancel"===R.type&&!(0,Qn.BT)(m.slug)&&!H,$="cancel"===R.type&&"free"==t&&"2023.11"==n,J=(0,dr.Z)().id,ee=(0,ct.HK)(),te=(0,a.useCallback)((function(){W(),F(),Q({productId:J}).then((function(){q({header:"Successfully canceled subscription",text:"You are now on Community plan"}),Z({isSuccess:!0,description:"cancel-subscription"})})).catch((function(){K({header:"Failed to cancel the subscription",text:"Remained on ".concat(w," plan")}),Z({isFailure:!0,description:"cancel-subscription",error:"Failed to cancel the subscription"})})).finally((function(){D(),ee()}))}),[Z,S]),ne=(0,a.useCallback)((function(e){if(!j&&!N){if((0,Qn.BT)(t))return _({type:"cancel"}),void Z(e,!0);if((0,Qn.BT)(m.slug))return se("checkout"),void Z(e,!0);se("update"),Z(e,!0)}}),[Z,S]),ae=!!m.billingEmail,re=ae?Ka:Qa,le=(0,en.m$)(),oe=le.billingModalType,ie=le.billingModalSlug,ce=["checkout","update"].includes(oe)&&t==ie,se=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",n=(0,en.m$)();(0,en.xs)(fr(fr({},n),{},{billingModalType:e,billingModalSlug:""==e?e:t}))},ue=(0,y.Z)(),me=(0,o.Z)(ue,4),de=me[0],pe=me[2],ge=me[3];return a.createElement(br,(0,i.Z)({column:!0,height:s?"auto":p?50:75,flex:s,padding:s?[6,10]:[0,10],gap:2,justifyContent:"between",showAllPlans:p},v),a.createElement(ir,{slug:t,prices:l,isActive:u,currentPlan:m,cancelling:j,showAllDetails:p,showTeaserDetails:g,title:w,version:n,features:C,nodesLastPeriod:E,onCancelPlan:function(){return ne({action:cr.Nw.buttonClicked,label:w})}}),!b&&a.createElement(kn.Z,{Component:d.Button,permission:"billing:Manage","data-ga":"upgrade-button::click-".concat(ca({slug:t}),"::current-").concat(ca(m),"::billing"),"data-testid":"billingPlan-".concat(t,"-").concat(u?"currentPlan":"upgradeButton"),label:M,flavour:(0,Qn.rQ)(null===m||void 0===m?void 0:m.slug,t),disabled:N,width:"100%",onClick:function(){return ne({action:cr.Nw.buttonClicked,label:M,dataGa:"upgrade-button::click-".concat(ca({slug:t}),"::current-").concat(ca(m),"::billing")})},isLoading:j&&u,loadingLabel:"Cancelling"}),!p&&a.createElement(a.Fragment,null,k&&a.createElement(ur.Z,{flavour:"billing"}),(k||!(0,Qn.BT)(m.slug))&&a.createElement(d.Flex,{column:!0,gap:4},a.createElement(d.Flex,{gap:6,alignItems:"baseline"},k&&a.createElement(sr.Z,null),!(0,Qn.BT)(m.slug)&&a.createElement(Oe.Z,{Component:d.Flex,as:on.rU,cursor:"pointer",alignItems:"center",gap:1,color:"text",hoverColor:"textFocus",to:"all-plans",padding:[4,0,0],disabled:j,onClick:function(){return A({label:O?"Change plan":"View plans"})},"data-testid":"billingPlan-viewMorePlansLink","data-ga":"change-plan::click::billing"},a.createElement("span",null,O?"Change plan":"View plans"),a.createElement(d.Icon,{name:"chevron_right",width:"16px",height:"16px"}))),k&&a.createElement(a.Fragment,null,a.createElement(mr.Z,{flavour:"billing",color:"textLite",onOptOutClick:pe}),de&&a.createElement(ja.Z,{onDecline:ge,onCancellingEnd:function(){}})))),(X||$)&&a.createElement(Da,{title:w,onConfirm:te,onClose:function(){z(),_({})}}),ce&&a.createElement(re,{title:w,onClose:function(){z(),se()},prices:l,currentPlan:m,showProrations:ae},a.createElement(ir,{slug:t,prices:l,currentPlan:m,title:w,inModal:!0})))},Er=(n(69720),["slug","isSmall","currentPlan","showAllPlans","allPlansView","showTeaserDetails"]),vr=(0,m.default)(d.Flex).withConfig({displayName:"enterprise__StyledWrapper",componentId:"sc-1an0ywv-0"})(["width:","};min-width:280px;place-self:",";"],(function(e){return e.showAllPlans?"25vw":"auto"}),(function(e){return e.showAllPlans?"center":"auto"})),yr=function(e){var t=e.slug,n=e.isSmall,r=e.currentPlan,l=e.showAllPlans,o=e.allPlansView,s=void 0!==o&&o,u=e.showTeaserDetails,m=(0,c.Z)(e,Er),p=(0,Be.Z)().sendButtonClickedLog,g=(0,Gn.x1)().cancelling,f=Aa.Vc.enterprise.features,b=(0,a.useCallback)((function(){window.open("https://www.netdata.cloud/contact-us/?subject=on-prem","_blank","noopener,noreferrer"),p({feature:"OnPremContact"})}),[p]);return a.createElement(vr,(0,i.Z)({column:!0,height:n?"auto":l?50:75,flex:n,padding:n?[6,10]:[0,10],gap:2,justifyContent:"between",showAllPlans:l},m),a.createElement(d.Flex,{column:!0,gap:2},a.createElement(d.Flex,{column:!0},a.createElement(d.Flex,{alignItems:"center",gap:2},a.createElement(d.H3,{"data-testid":"billingPricingDetails-activePlanName"},"Enterprise On-Premise"))),a.createElement(d.H0,null,ma.format(3),a.createElement(d.TextSmall,{color:"textLite"},"/",a.createElement(Oe.Z,{Component:d.TextSmall,href:"https://www.netdata.cloud/pricing/#do-i-have-to-pay-for-nodes-that-are-no-longer-online",rel:"noopener noreferrer",target:"_blank"},"Node"),"/month (billed yearly)")),u&&a.createElement(lr,{features:f[2023.11]||[]})),!s&&a.createElement(kn.Z,{Component:d.Button,permission:"billing:Manage","data-ga":"upgrade-button::click-".concat(ca({slug:t}),"::current-").concat(ca(r),"::billing"),"data-testid":"billingPlan-".concat(t,"-upgradeButton"),label:"Contact us",flavour:"hollow",disabled:g,width:"100%",onClick:b}))},xr=function(e){var t=e.plans,n=e.isSmall,r=e.currentPlan,l=e.showAllPlans,i=e.showTeaserDetails,c=e.isPro;return a.createElement(a.Fragment,null,c&&(Object.entries(t.pro)||[]).map((function(e){var t=(0,o.Z)(e,2),c=t[0],s=t[1];return a.createElement(hr,{key:c,slug:"pro",version:c,prices:s,isActive:"pro"===r.slug&&r.version==c,isSmall:n,currentPlan:r,showAllPlans:l,showTeaserDetails:i})})),(Object.entries(t.business)||[]).map((function(e){var t=(0,o.Z)(e,2),c=t[0],s=t[1];return a.createElement(hr,{key:c,slug:"business",version:c,prices:s,isActive:"business"===r.slug&&r.version==c,isSmall:n,currentPlan:r,showAllPlans:l,showTeaserDetails:i})})),a.createElement(yr,{slug:"enterprise",isSmall:n,currentPlan:r,showAllPlans:l,showTeaserDetails:i}))},wr=(n(43290),m.default.div.withConfig({displayName:"styled__Container",componentId:"sc-1oa2kv1-0"})(["display:grid;",""],(function(e){var t=e.numberOfPlans,n=[e.showAllPlans?"200px":"1fr"].concat((0,wa.Z)(Array(t).fill("1.5fr")));return"grid-template-columns: ".concat(n.join(" "),";")}))),Cr=m.default.div.withConfig({displayName:"styled__Header",componentId:"sc-1oa2kv1-1"})(["display:contents;> *{background:",";border-bottom:1px solid ",";}"],(0,d.getColor)("panelBg"),(0,d.getColor)("placeholder")),Or=(0,m.default)(d.Collapsible).withConfig({displayName:"styled__Content",componentId:"sc-1oa2kv1-2"})(["display:contents;"]),Pr=function(){var e=(0,Gn.Gl)(),t=e.loaded,n=e.value,a=(0,Gn.RP)(),r=a.loaded,l=a.value,o=(0,Qn.Gj)({currentPlan:l,plans:n});return t&&r?o:0},Zr=function(e){var t,n,r,l,o=e.isSmall,i=e.plans,c=e.currentPlan,s=e.showAllPlans,u=void 0!==s&&s,m=e.showTeaserDetails,d=void 0!==m&&m,p=e.children,g="earlyBird"===c.slug,f="pro"===c.slug,b=Pr(),h=(0,dr.Z)().version,E=null===(t=(null===(n=i.free)||void 0===n?void 0:n[null===c||void 0===c?void 0:c.version])||(null===(r=i.free)||void 0===r?void 0:r[h]))||void 0===t?void 0:t[0],v=i.earlyBird?null===(l=Object.values(i.earlyBird)[0])||void 0===l?void 0:l[0]:{};return a.createElement(wr,{numberOfPlans:b,showAllPlans:u},p,g?a.createElement(hr,{slug:"earlyBird",version:null===v||void 0===v?void 0:v.version,prices:null===v||void 0===v?void 0:v.prices,isSmall:o,isActive:"earlyBird"===c.slug,currentPlan:c,showAllPlans:u}):a.createElement(hr,{slug:"free",version:null===E||void 0===E?void 0:E.version,prices:null===E||void 0===E?void 0:E.prices,isSmall:o,isActive:"free"===c.slug&&c.version==(null===E||void 0===E?void 0:E.version),currentPlan:c,showAllPlans:u,showTeaserDetails:d}),a.createElement(xr,{plans:i,isSmall:o,currentPlan:c,showAllPlans:u,showTeaserDetails:d,isPro:f}))},Tr=["children"],Sr=function(e){var t=e.children,n=(0,c.Z)(e,Tr);return a.createElement(d.Flex,(0,i.Z)({alignItems:"center",gap:2},n),t)},kr=function(){return a.createElement(d.Flex,{column:!0,gap:2,width:65},a.createElement(d.TextMicro,{color:"bright"},a.createElement(d.TextMicro,{color:"bright",strong:!0},"Available credit")," ","for you to use on any plan subscriptions with us."),a.createElement(d.TextMicro,{color:"bright"},"It is ok to change your mind, we will give you full flexibility! You can change the plan level, billing frequency or committed nodes, we won't hold you to any choice. When applicable, we'll credit you back on any unused amount."))},Ir=function(e){var t=e.currentPlan,n=(0,Gn.SH)(),r=(0,o.Z)(n,2),l=r[0],i=r[1],c=!!t.billingEmail&&!i,s=(0,h.gI)("billing:ReadAll");return a.createElement(d.Flex,{column:!0,gap:2},a.createElement(d.H3,null,"Plan & Billing"),s&&a.createElement(a.Fragment,null,a.createElement(Sr,null,a.createElement(d.Flex,{gap:1,alignItems:"center"},a.createElement(d.Text,null,"Credit:"),a.createElement(d.Text,{"data-testid":"billingHeader-credits",strong:!0},ma.format(t.balance.amount||0)),a.createElement(Xe.Z,{content:kr,align:"bottom",isBasic:!0},a.createElement(d.Icon,{name:"information",width:"16px",height:"16px",color:"textLite"}))),a.createElement(qn,null),a.createElement(d.Text,null,"Billing email:"," ",a.createElement(d.Text,{strong:!0,"data-testid":"billingHeader-email"},t.billingEmail||"-")),a.createElement(qn,null),a.createElement(Oe.Z,{Component:d.Flex,cursor:c?"pointer":"default",disabled:!c,onClick:l,alignItems:"center",gap:1,"data-ga":"billing-options::click::billing","data-testid":"billingHeader-goToPortal"},a.createElement("span",null,"Billing options and Invoices"),a.createElement(d.Icon,{name:"nav_arrow_goto",width:"12px",height:"12px"}))),a.createElement(qn,{height:1,width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}})))};n(34553),n(2707),n(69810),n(86035);var jr=n(69119),Fr=n(71002),Dr=n(7069),Ar=n(54559),Mr=n(13882),Nr=n(83946);var Br=n(28789),Lr=n(23148),Rr=n(36337),_r=n(26495),Ur=n(95665),Vr=n(92501),Hr=n(4822),Wr=n(69019);function zr(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function Gr(e){for(var t=1;t360?(this._resolveAnimations().update(this,{opacity:0}),!1):(0===this.opacity&&this._resolveAnimations().update(this,{opacity:1}),{x:t.x,y:t.y})};var Kr=function(e){return{nodes:{label:"Daily count",type:"bar",color:(0,d.getColor)("text")({theme:e}),backgroundColor:(0,d.getColor)("primary")({theme:e}),borderColor:(0,d.getColor)("primary")({theme:e}),borderWidth:2,pointStyle:"rectangle",usePointStyle:!0},p90:{label:"Period count",type:"line",color:(0,d.getColor)("text")({theme:e}),borderColor:(0,d.getColor)(["purple","lilac"])({theme:e}),borderWidth:2,fill:!1,stepped:!0},committed_nodes:{label:"Committed nodes",type:"line",color:(0,d.getColor)("text")({theme:e}),borderColor:(0,d.getColor)(["blue","aquamarine"])({theme:e}),borderWidth:2,fill:!1,borderDash:[1,2],borderDashOffset:1,pointStyle:!1}}},Qr={border:{side:"all",color:"inputBorder",padding:[3]},round:!0},Xr=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),a=e.getDate(),r=new Date(0);return r.setFullYear(t,n,a-1),r.setHours(23,59,59,999),r}(),$r=(0,jr.default)(function(e,t){if((0,Mr.Z)(2,arguments),!t||"object"!==(0,Fr.Z)(t))return new Date(NaN);var n=t.years?(0,Nr.Z)(t.years):0,a=t.months?(0,Nr.Z)(t.months):0,r=t.weeks?(0,Nr.Z)(t.weeks):0,l=t.days?(0,Nr.Z)(t.days):0,o=t.hours?(0,Nr.Z)(t.hours):0,i=t.minutes?(0,Nr.Z)(t.minutes):0,c=t.seconds?(0,Nr.Z)(t.seconds):0,s=(0,Ar.default)(e,a+12*n),u=(0,Dr.default)(s,l+7*r),m=1e3*(c+60*(i+60*o));return new Date(u.getTime()-m)}(Xr,{months:1})),Jr=function(e){var t=e.onNodesLastPeriodFetch,n=(0,Hr.by)("offset"),r=(0,v.th)(),i=(0,a.useState)((function(){return{start:$r,end:Xr}})),c=(0,o.Z)(i,2),s=c[0],u=c[1],p=(0,Nt.Z)((function(){return{enabled:!!r&&!!s.start&&!!s.end,fetch:function(){return(0,Wr.fA)(r,{after:Math.floor((0,Br.default)((0,Xa.tb)(s.start,n))/1e3),before:Math.floor((0,Br.default)((0,Xa.tb)(s.end,n))/1e3)})},initialValue:{labels:[],data:[]},onSuccess:function(e){var n=e.data,a=e.labels;if(n.length){var r=n.at(-1),l=r[a.indexOf("nodes")],o=r[a.indexOf("timestamp")];t((function(e){return e.timestamp>o?e:{timestamp:o,value:l}}))}}}}),[r,s,t]),g=(0,o.Z)(p,3),f=g[0],b=g[1],h=g[2],E=(0,a.useContext)(m.ThemeContext),y=(0,a.useMemo)((function(){var e=f.data.map((function(e){return(0,o.Z)(e,1)[0]})),t=f.labels.reduce((function(t,n,a){return a?[Gr(Gr({},Kr(E)[n]),{},{data:e.map((function(e,t){return f.data[t][a]})),order:Yr[n]})].concat((0,wa.Z)(t)):t}),[]);return{labels:e,datasets:t}}),[f]),x=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.data,r=t.annotations,i=void 0===r?[]:r,c=(0,Xa.rA)().localeDateString,s=(0,a.useRef)(!1),u=(0,a.useRef)(),m=(0,a.useMemo)((function(){return{interaction:{axis:"x"},plugins:{legend:{position:"bottom",align:"start",onClick:function(e,t,n){var a=n.legendItems.findIndex((function(e){return e.text===t.text}));n.chart.isDatasetVisible(a)?n.chart.hide(a):n.chart.show(a)},labels:{generateLabels:function(e){return e.data.datasets.map((function(t,n){return{fontColor:t.color,text:t.label,fillStyle:t.backgroundColor,strokeStyle:t.borderColor,pointStyle:"Daily count"===t.label?"rect":"line",hidden:!e.isDatasetVisible(n),order:t.order}})).sort((function(e,t){return e.order-t.order}))},usePointStyle:!0},onHover:function(e,t){!s.current&&u.current&&(s.current=!0,u.current.innerHTML=qr[t.text],u.current.style.left=e.x+"px",u.current.style.top=e.y+"px",u.current.style.visibility="visible",u.current.style.transform="translateY(-100%)")},onLeave:function(){u.current&&(s.current=!1,u.current.innerHTML="",u.current.style.visibility="hidden")}},tooltip:{enabled:!0,mode:"nearest",intersect:!1,yAlign:"bottom",usePointStyle:!0,position:"follow",backgroundColor:(0,d.getColor)("tooltip")({theme:e}),color:(0,d.getColor)("bright")({theme:e}),callbacks:{title:function(e){var t=(0,o.Z)(e,1)[0];return t?c(1e3*t.label,{month:"2-digit",day:"2-digit",year:"numeric",long:!1,dateStyle:void 0}):""},labelPointStyle:function(e){return{pointStyle:"Total Nodes"===e.dataset.label?"rect":"line"}},label:function(e){return" ".concat(e.formattedValue," ").concat(e.dataset.label||"")}}},annotation:{annotations:i.reduce((function(t,a,r){return Gr(Gr({},t),{},(0,l.Z)({},"annotation".concat(r),{type:"line",value:n.findIndex((function(e){return e[0]===a.timestamp})),borderColor:(0,d.getRgbColor)("attention",.3)({theme:e}),borderDashOffset:0,borderWidth:10,drawTime:"afterDatasetsDraw",label:{drawTime:"afterDatasetsDraw",display:!1,backgroundColor:(0,d.getRgbColor)("attention",.8)({theme:e}),borderWidth:0,color:"white",content:a.name,textAlign:"center"},scaleID:"x",enter:function(e,t){var n=e.chart,a=n.options.plugins.annotation.annotations["annotation".concat(r)];a.label.display=!0,a.label.position=t.y/e.chart.chartArea.height>.5?"start":"end",n.update()},leave:function(e){var t=e.chart;t.options.plugins.annotation.annotations["annotation".concat(r)].label.display=!1,t.update()}}))}),{})}},responsive:!0,maintainAspectRatio:!1,scales:{x:{ticks:{callback:function(e){return c(1e3*this.getLabelForValue(e),{month:"2-digit",day:"2-digit",year:"numeric",long:!1,dateStyle:void 0})},color:(0,d.getColor)("textLite")({theme:e})}},y:{beginAtZero:!0,ticks:{color:(0,d.getColor)("textLite")({theme:e})}}}}}),[e,n]);return[m,u]}(E,f),w=(0,o.Z)(x,2),C=w[0],O=w[1];return a.createElement(d.Flex,{column:!0,gap:4},a.createElement(d.Flex,{alignItems:"center",justifyContent:"between"},a.createElement(d.H3,null,"Usage"),a.createElement(Ur.Z,{values:s,utc:n,onChange:u,tagging:"billing-usage",isPlaying:!1,onlyDates:!0,accessorProps:Qr,padding:[4,0],width:"auto",maxDate:Xr})),a.createElement(d.Flex,{position:"relative",height:90},b?a.createElement(cn.Z,{title:"Loading billing data..."}):h?a.createElement(Vr.NY,{title:"Chart couldn't be loaded"}):a.createElement(_r.kL,{type:"bar",data:y,options:C}),a.createElement(d.Flex,{ref:O,background:"main",color:"generic",position:"absolute",round:!0,padding:[3],width:{max:75},sx:{visibility:"hidden"},onMouseOver:function(){return O.current.style.visibility="hidden"}})))},el=function(e){var t=e.isSmall,n=e.currentPlan,r=e.loaded,l=e.currentLoaded,i=e.plans,c=(0,a.useState)({timestamp:null,value:null}),s=(0,o.Z)(c,2),u=s[0],m=s[1],d=i[n.slug]?i[n.slug][n.version]:[];return a.createElement(a.Fragment,null,a.createElement(Ir,{currentPlan:n}),r&&l?a.createElement(a.Fragment,null,(0,Qn.BT)(n.slug)?a.createElement(Zr,{isSmall:t,plans:i,currentPlan:n,showTeaserDetails:!0}):a.createElement(hr,{slug:n.slug,version:n.version,prices:d,width:t?"auto":120,padding:[0],height:"auto",isSmall:t,isActive:!0,currentPlan:n,allPlansView:!0,nodesLastPeriod:u.value}),a.createElement(qn,{height:1,width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}}),a.createElement(Jr,{onNodesLastPeriodFetch:m})):a.createElement(cn.Z,{title:r?"Loading billing info...":"Loading Netdata plans..."}))},tl=n(92432),nl={year:"Yearly",month:"Monthly"},al=function(e){var t,n=e.currentPlan,r=(0,v.uk)(),l=(0,Gn.SH)(),i=(0,o.Z)(l,2),c=i[0],s=i[1],u=(0,Gn.x1)(),m=u.cancelling,p=u.startCancelling,g=u.stopCancelling,f=!!n.billingEmail&&!s,b=(0,y.Z)(),E=(0,o.Z)(b,2),x=E[0],w=E[1],C=(0,we.Z)(),O=(0,o.Z)(C,2),P=O[0],Z=O[1],T=(0,Gn.Zn)(),S=(Aa.Vc[n.slug]||Aa.Vc.free).title,k=(0,dr.Z)().id,I=(0,h.gI)("billing:Manage"),j=(0,h.gI)("billing:ReadAll"),F=(Aa.Vc[null===n||void 0===n?void 0:n.slug]||Aa.Vc.free).title,D=nl[null===n||void 0===n?void 0:n.interval];return a.createElement(d.Flex,{column:!0,gap:2},a.createElement(d.Flex,{gap:2,alignItems:"center"},a.createElement(Oe.Z,{as:on.rU,to:"/spaces/".concat(r,"/settings/billing"),disabled:m,color:"text",hoverColor:"textLite",showToolTip:!0,content:"Back to Plan & Billing",align:"bottom",isBasic:!0},a.createElement(d.Icon,{name:"arrow_left",width:"20px",height:"20px",margin:[1.5,0,0]})),a.createElement(d.H3,null,"All Plans")),a.createElement(Sr,null,a.createElement(d.Flex,{gap:1,alignItems:"center"},a.createElement(d.Text,null,"Active plan:"),a.createElement(d.Text,{strong:!0},F)),a.createElement(qn,null),D&&a.createElement(a.Fragment,null,a.createElement(d.Flex,{gap:1,alignItems:"center"},a.createElement(d.Text,null,"Billing frequency:"),a.createElement(d.Text,{strong:!0},D)),a.createElement(qn,null)),"Yearly"===D&&a.createElement(d.Flex,{gap:1,alignItems:"center"},a.createElement(d.Text,null,"Committed Nodes:"),a.createElement(d.Text,{strong:!0},(null===n||void 0===n?void 0:n.committedNodes)||0)),a.createElement(qn,null),j&&a.createElement(a.Fragment,null,a.createElement(d.Flex,{gap:1,alignItems:"center"},a.createElement(d.Text,null,"Credit:"),a.createElement(d.Text,{strong:!0},ma.format((null===n||void 0===n||null===(t=n.balance)||void 0===t?void 0:t.amount)||0)),a.createElement(Xe.Z,{content:kr,align:"bottom",isBasic:!0},a.createElement(d.Icon,{name:"information",width:"16px",height:"16px",color:"textLite"}))),a.createElement(qn,null),a.createElement(d.Flex,{gap:1,alignItems:"center"},a.createElement(d.Text,null,"Billing email:"),a.createElement(d.Text,{strong:!0},(null===n||void 0===n?void 0:n.billingEmail)||"-")),a.createElement(qn,null)),a.createElement(Oe.Z,{Component:d.Flex,cursor:f?"pointer":"default",disabled:!f,onClick:c,alignItems:"center",gap:1},a.createElement("span",null,"Billing options and Invoices"),a.createElement(d.Icon,{name:"nav_arrow_goto",width:"12px",height:"12px"})),!(0,Qn.BT)(null===n||void 0===n?void 0:n.slug)&&a.createElement(a.Fragment,null,a.createElement(qn,null),a.createElement(Oe.Z,{Component:d.Flex,cursor:"pointer",onClick:w,alignItems:"center",gap:1,disabled:m||!I,"data-ga":"cancel-plan::click::billing"},a.createElement(d.Text,{textDecoration:"underline"},m?"Canceling plan...":"Cancel plan")))),a.createElement(qn,{height:1,width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}}),x&&a.createElement(Da,{title:"Community",onConfirm:function(){p(),T({productId:k}).then((function(){return(0,tl.L)("billing","cancel-plan","global-view",{slug:null===n||void 0===n?void 0:n.slug,interval:null===n||void 0===n?void 0:n.interval,success:!0}),P({header:"Successfully canceled subscription",text:"You are now on Community plan"})})).catch((function(){return(0,tl.L)("billing","cancel-plan","global-view",{slug:null===n||void 0===n?void 0:n.slug,interval:null===n||void 0===n?void 0:n.interval,success:!1}),Z({header:"Failed to cancel the subscription",text:"Remained on ".concat(S," plan")})})).finally((function(){g()}))},onClose:w}))},rl=["center","end"],ll=["children"],ol=function(){return a.createElement(d.Icon,{name:"check",color:"primary"})},il=function(){return a.createElement(d.Icon,{name:"checkmark_partial_s",color:"textLite"})},cl=function(){return a.createElement(d.Icon,{name:"chevron_down_thin",color:"text"})},sl=function(){return a.createElement(d.Icon,{name:"chevron_up_thin",color:"text"})},ul=function(e){var t=e.center,n=e.end,r=(0,c.Z)(e,rl);return a.createElement(d.Flex,{alignItems:"center",justifyContent:t?"center":n?"end":"start",padding:[2]},a.createElement(d.Text,r))},ml=function(e){var t=e.children,n=(0,c.Z)(e,ll);return a.createElement(ul,(0,i.Z)({center:!0},n),t?a.createElement(d.Flex,{alignItems:"center",gap:2},t,a.createElement(ol,null)):a.createElement(ol,null))},dl=function(e){return a.createElement(ul,(0,i.Z)({center:!0},e),"UNLIMITED")},pl=function(e){return a.createElement(ul,(0,i.Z)({center:!0},e),a.createElement(il,null))},gl=function(e){return a.createElement(ul,(0,i.Z)({center:!0},e),"SOON")},fl=(n(91038),function(e){return function(t){var n=t.index,r=t.title,l=void 0===r?"":r,o=t.showAllPlans,i=t.onToggle,c=t.collapsed,s=Pr();return a.createElement(wr,{numberOfPlans:s,showAllPlans:o},a.createElement(Cr,{onClick:function(){return i(n)}},a.createElement(ul,{strong:!0},l),Array.from(Array(s-1).keys()).map((function(e){return a.createElement(ul,{key:e})})),a.createElement(ul,{end:!0},c[n]?a.createElement(sl,null):a.createElement(cl,null))),a.createElement(Or,{open:!c[n]},a.createElement(e,t)))}}),bl=(0,a.memo)(fl((function(e){var t=e.isPro;return a.createElement(a.Fragment,null,a.createElement(ul,null,"Scalability"),a.createElement(ul,{center:!0},"Vertical and Horizontal"),t&&a.createElement(ul,{center:!0},"Vertical and Horizontal"),a.createElement(ul,{center:!0},"Vertical and Horizontal"),a.createElement(ul,{center:!0},"Vertical and Horizontal"),a.createElement(ul,null,"High Availability"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Data Retention"),a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"Data Privacy"),a.createElement(ul,{center:!0},"Data stored On-Prem and visualized on Netdata Cloud"),t&&a.createElement(ul,{center:!0},"Data stored On-Prem and visualized on Netdata Cloud"),a.createElement(ul,{center:!0},"Data stored On-Prem and visualized on Netdata Cloud"),a.createElement(ul,{center:!0},"Data stored and visualized On-Prem"),a.createElement(ul,null,"Configuration"),a.createElement(ul,{center:!0},"Manual, IaC or in App(UI)"),t&&a.createElement(ul,{center:!0},"Manual, IaC or in App(UI)"),a.createElement(ul,{center:!0},"Manual, IaC or in App(UI)"),a.createElement(ul,{center:!0},"Manual, IaC or in App(UI)"))}))),hl=(0,a.memo)(fl((function(e){var t=e.isPro,n=e.freePlanLimitattions;return a.createElement(a.Fragment,null,a.createElement(ul,null,"Customizable charts"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Infrastructure wide Dashboards"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Centralized Alerts Management"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Infrastrucure Organization (Rooms)"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Custom Dashboards"),a.createElement(ml,null,null!==n&&void 0!==n&&n.maxDashboards?a.createElement(d.Text,null,"(Limited to ",n.maxDashboards," per Room)"):null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Access dashboards from anywhere in the world"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Role Based Access Control (RBAC)"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Auditing"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Invite Team Members"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Functions"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Netdata Assistant"),a.createElement(ml,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,"Mobile App"),a.createElement(pl,null),t&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(pl,null),a.createElement(ul,null,"Centralized Management of Integrations"),a.createElement(gl,null),t&&a.createElement(gl,null),a.createElement(gl,null),a.createElement(gl,null))}))),El=(0,a.memo)(fl((function(e){var t=e.isPro,n=e.freePlanLimitattions;return a.createElement(a.Fragment,null,a.createElement(ul,null,"Active Connected Nodes"),null!==n&&void 0!==n&&n.maxNodes?a.createElement(ul,{center:!0},"Max of ",n.maxNodes):a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"Active Custom Dashboards"),null!==n&&void 0!==n&&n.maxDashboards?a.createElement(ul,{center:!0},"Max of ",n.maxDashboards," per Room"):a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"Infrastructure metrics"),a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"APM metrics"),a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"Custom metrics"),a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"Synthetic checks"),a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"Monitor system journal logs"),a.createElement(dl,null),t&&a.createElement(dl,null),a.createElement(dl,null),a.createElement(dl,null),a.createElement(ul,null,"Auditing Events"),a.createElement(ul,{center:!0},"4 hours"),t&&a.createElement(ul,{center:!0},"7 days"),a.createElement(ul,{center:!0},"90 days"),a.createElement(ul,{center:!0},"As required"),a.createElement(ul,null,"Topology Events"),a.createElement(ul,{center:!0},"4 hours"),t&&a.createElement(ul,{center:!0},"7 days"),a.createElement(ul,{center:!0},"14 days"),a.createElement(ul,{center:!0},"As required"),a.createElement(ul,null,"Alert Events"),a.createElement(ul,{center:!0},"4 hours"),t&&a.createElement(ul,{center:!0},"7 days"),a.createElement(ul,{center:!0},"60 days"),a.createElement(ul,{center:!0},"As required"),a.createElement(ul,null,"Alert Notification Integrations"),a.createElement(ul,{center:!0},"Email, Discord"),t&&a.createElement(ul,{center:!0},"Email, Discord, Webhook"),a.createElement(ul,{center:!0},"Email, Discord, Webhook, Mattermost, Opsgenie, PagerDuty, RocketChat, Slack"),a.createElement(ul,{center:!0},a.createElement(d.Text,null,"Same as Business plan"),a.createElement("br",null),a.createElement(d.TextSmall,null,"(Custom requests can be handled)")),a.createElement(ul,null,"User Administration"),a.createElement(ul,{center:!0},"Basic"),t&&a.createElement(ul,{center:!0},"Intermediate"),a.createElement(ul,{center:!0},"Advanced"),a.createElement(ul,{center:!0},"Advanced"))}))),vl=(0,a.memo)(fl((function(e){var t=e.isEarlyBird,n=e.isPro;return a.createElement(a.Fragment,null,a.createElement(ul,null,a.createElement(d.Text,{id:"administrators"},"Administrators"),a.createElement("br",null),a.createElement(d.TextSmall,{color:"textDescription",id:"same-as-managers-but-unable-to-manage-users-or-rooms"},"Users with this role can control Spaces, War Rooms, Nodes, Users and Billing. They can also access any Room in the Space.")),a.createElement(ml,null),n&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,a.createElement(d.Text,{id:"troubleshooters"},"Troubleshooters"),a.createElement("br",null),a.createElement(d.TextSmall,{color:"textDescription"},"Users with this role can use Netdata to troubleshoot, not manage entities. They can access any Room in the Space.")),a.createElement(pl,null),n&&a.createElement(ml,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,a.createElement(d.Text,{id:"managers"},"Managers"),a.createElement("br",null),a.createElement(d.TextSmall,{color:"textDescription"},"Users with this role can manage Rooms and Users. They can access any Room in the Space.")),a.createElement(pl,null),n&&a.createElement(pl,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,a.createElement(d.Text,{id:"observers"},"Observers"),a.createElement("br",null),a.createElement(d.TextSmall,{color:"textDescription"},"Users with this role can only view data in specific Rooms.")),a.createElement(pl,null),n&&a.createElement(pl,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,a.createElement(d.Text,{id:"billing"},"Billing"),a.createElement("br",null),a.createElement(d.TextSmall,{color:"textDescription"},"Users with this role can handle billing options and invoices.")),a.createElement(pl,null),n&&a.createElement(pl,null),a.createElement(ml,null),a.createElement(ml,null),a.createElement(ul,null,a.createElement(d.Text,{id:"billing"},"Member"),a.createElement("br",null),a.createElement(d.TextSmall,{color:"textDescription"},"This role allows users to manage rooms and invite fellow Member teammates. These users cannot see all rooms in the Space but can see all Nodes since they are always on the All Nodes.")),t?a.createElement(ml,null):a.createElement(pl,null),n&&a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null))}))),yl=(0,a.memo)(fl((function(e){var t=e.isPro;return a.createElement(a.Fragment,null,a.createElement(ul,null,"Service Availability"),a.createElement(ul,{center:!0},"Best Effort (99.5% in last 12 months)"),t&&a.createElement(ul,{center:!0},"Best Effort (99.5% in last 12 months)"),a.createElement(ul,{center:!0},"99.9% annually (excl. scheduled maintenance)"),a.createElement(ul,{center:!0},"Same as Business plan"),a.createElement(ul,null,"Technical Support"),a.createElement(ul,{center:!0},"Public Forums, Tickets & Chat"),t&&a.createElement(ul,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(ul,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(ul,{center:!0},"Custom Design to Meet Requirements"),a.createElement(ul,null,"Initial Deployment Consulting Services"),a.createElement(ul,{center:!0},"Public Forums, Tickets & Chat"),t&&a.createElement(ul,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(ul,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(ul,{center:!0},"Remote or On Site Consultation and Training During Deployment"))}))),xl=function(e){var t,n=e.plans,r=e.currentPlan,l="free"==(null===r||void 0===r?void 0:r.slug)&&"2023.02"==(null===r||void 0===r?void 0:r.version),c="earlyBird"===(null===r||void 0===r?void 0:r.slug),s="pro"===(null===r||void 0===r?void 0:r.slug),u=null===(t=Object.values((null===n||void 0===n?void 0:n.free)||{}))||void 0===t||null===(t=t[0])||void 0===t||null===(t=t[0])||void 0===t?void 0:t.planLimitations,m=(0,a.useState)((function(){return[!1,!0,!0,!0,!0]})),d=(0,o.Z)(m,2),p=d[0],g=d[1],f={isOldCommunity:l,isEarlyBird:c,isPro:s,freePlanLimitattions:u,showAllPlans:!0,onToggle:function(e){return g((function(t){return t[e]=!t[e],(0,wa.Z)(t)}))},collapsed:p};return a.createElement(a.Fragment,null,a.createElement(bl,(0,i.Z)({title:"General",index:0},f)),a.createElement(hl,(0,i.Z)({title:"Features",index:1},f)),a.createElement(El,(0,i.Z)({title:"Usage Allowances",index:2},f)),a.createElement(vl,(0,i.Z)({title:"User Roles",index:3},f)),a.createElement(yl,(0,i.Z)({title:"Support",index:4},f)))},wl=function(e){var t=e.plans,n=e.loaded,r=e.currentLoaded,l=e.currentPlan;return a.createElement(d.Flex,{column:!0,height:"100%",gap:3},a.createElement(al,{currentPlan:l}),n&&r?a.createElement(d.Flex,{column:!0,gap:5,height:"100%",padding:[0,0,20,0]},a.createElement(Zr,{plans:t,currentPlan:l,showAllPlans:!0},a.createElement(d.Flex,{column:!0,height:50,justifyContent:"center"},a.createElement(d.Text,{strong:!0},"Pricing"))),a.createElement(d.Flex,{column:!0,gap:5,height:"100%",overflow:{vertical:"auto"}},a.createElement(xl,{plans:t,currentPlan:l}))):a.createElement(cn.Z,{title:n?"Loading billing info...":"Loading Netdata plans..."}))},Cl={Community:0,Pro:1,Business:2},Ol={month:"Monthly",year:"Yearly"},Pl=function(e){var t=e.from,n=e.to,a=e.fromNodes,r=e.toNodes,l=e.interval,o=e.success,i=void 0===o||o,c=Ol[l]||l;if(t===n&&a===r)return{header:i?"Successful billing cycle change":"Failed to update billing cycle",text:i?"Subscription billing cycle changed to ".concat(c):"Subscription is still on ".concat(c," billing cycle")};if(t===n)return{header:i?"Successful change of commitment":"Failed to change the commitment",text:i?"Commitment ".concat(aCl[n];return{header:i?"Successfully ".concat(s?"downgraded":"upgraded"," plan"):"Failed to ".concat(s?"downgrade":"upgrade"," plan"),text:i?"Subsrciprion ".concat(s?"downgraded":"upgraded"," from ").concat(t," to ").concat(n," plan (").concat(c,")"):"Remained on ".concat(t," plan")}},Zl=function(){!function(){var e=(0,on.lr)(),t=(0,o.Z)(e,2),n=t[0],r=t[1],l=(0,we.Z)(),i=(0,o.Z)(l,2),c=i[0],s=i[1];(0,a.useEffect)((function(){var e=n.get("from"),t=n.get("from_committed_nodes"),a=n.get("interval"),l=n.get("result"),o=n.get("to"),i=n.get("to_committed_nodes");switch(l){case"success":var u={from:e,fromNodes:t,interval:a,to:o,toNodes:i,success:!0};c(Pl(u)),(0,tl.L)("billing","callback","global-view",u);break;case"failure":var m={from:e,fromNodes:t,interval:a,to:o,toNodes:i,success:!1};s(Pl(m)),(0,tl.L)("billing","callback","global-view",m)}n&&l&&(n.delete("from"),n.delete("fromNodes"),n.delete("interval"),n.delete("result"),n.delete("to"),n.delete("toNodes"),r(n))}),[n])}();var e=(0,Gn.Gl)(),t=e.loaded,n=e.value,l=(0,Gn.RP)(),i=l.loaded,c=l.value,s=(0,zn.m)();return a.createElement(_.ZP,{feature:"Billing"},a.createElement(d.Flex,{column:!0,height:"calc(100% - 30px)",gap:3},a.createElement(r.Z5,null,a.createElement(r.AW,{path:"/all-plans",element:a.createElement(wl,{isSmall:s,currentPlan:c,loaded:t,currentLoaded:i,plans:n})}),a.createElement(r.AW,{path:"/",element:a.createElement(el,{isSmall:s,currentPlan:c,loaded:t,currentLoaded:i,plans:n})}))))},Tl=n(41898),Sl=["containerProps"],kl=["settingsTab"];function Il(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function jl(e){for(var t=1;t1?arguments[1]:void 0,n),c=o>2?arguments[2]:void 0,s=void 0===c?n:r(c,n);s>i;)t[i++]=e;return t}},43290:function(e,t,n){"use strict";var a=n(82109),r=n(21285),l=n(51223);a({target:"Array",proto:!0},{fill:r}),l("fill")},73955:function(e,t,n){var a=n(79833),r=0;e.exports=function(e){var t=++r;return a(e)+t}}}]); \ No newline at end of file diff --git a/web/gui/v2/8459.576da4e194a7e4007f03.css b/web/gui/v2/8459.576da4e194a7e4007f03.css new file mode 100644 index 00000000000000..d46b4b2ba4f784 --- /dev/null +++ b/web/gui/v2/8459.576da4e194a7e4007f03.css @@ -0,0 +1,2 @@ +.react-datepicker__year-read-view--down-arrow,.react-datepicker__month-read-view--down-arrow,.react-datepicker__month-year-read-view--down-arrow,.react-datepicker__navigation-icon::before{border-color:#ccc;border-style:solid;border-width:3px 3px 0 0;content:"";display:block;height:9px;position:absolute;top:6px;width:9px}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle{margin-left:-4px;position:absolute;width:0}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::after,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::after{box-sizing:content-box;position:absolute;border:8px solid transparent;height:0;width:1px;content:"";z-index:-1;border-width:8px;left:-8px}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before{border-bottom-color:#aeaeae}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle{top:0;margin-top:-8px}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::after{border-top:none;border-bottom-color:#f0f0f0}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::after{top:0}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before{top:-1px;border-bottom-color:#aeaeae}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle{bottom:0;margin-bottom:-8px}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::after{border-bottom:none;border-top-color:#fff}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::after{bottom:0}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before{bottom:-1px;border-top-color:#aeaeae}.react-datepicker-wrapper{display:inline-block;padding:0;border:0}.react-datepicker{font-family:"Helvetica Neue", helvetica, arial, sans-serif;font-size:0.8rem;background-color:#fff;color:#000;border:1px solid #aeaeae;border-radius:0.3rem;display:inline-block;position:relative}.react-datepicker--time-only .react-datepicker__triangle{left:35px}.react-datepicker--time-only .react-datepicker__time-container{border-left:0}.react-datepicker--time-only .react-datepicker__time,.react-datepicker--time-only .react-datepicker__time-box{border-bottom-left-radius:0.3rem;border-bottom-right-radius:0.3rem}.react-datepicker__triangle{position:absolute;left:50px}.react-datepicker-popper{z-index:1}.react-datepicker-popper[data-placement^=bottom]{padding-top:10px}.react-datepicker-popper[data-placement=bottom-end] .react-datepicker__triangle,.react-datepicker-popper[data-placement=top-end] .react-datepicker__triangle{left:auto;right:50px}.react-datepicker-popper[data-placement^=top]{padding-bottom:10px}.react-datepicker-popper[data-placement^=right]{padding-left:8px}.react-datepicker-popper[data-placement^=right] .react-datepicker__triangle{left:auto;right:42px}.react-datepicker-popper[data-placement^=left]{padding-right:8px}.react-datepicker-popper[data-placement^=left] .react-datepicker__triangle{left:42px;right:auto}.react-datepicker__header{text-align:center;background-color:#f0f0f0;border-bottom:1px solid #aeaeae;border-top-left-radius:0.3rem;padding:8px 0;position:relative}.react-datepicker__header--time{padding-bottom:8px;padding-left:5px;padding-right:5px}.react-datepicker__header--time:not(.react-datepicker__header--time--only){border-top-left-radius:0}.react-datepicker__header:not(.react-datepicker__header--has-time-select){border-top-right-radius:0.3rem}.react-datepicker__year-dropdown-container--select,.react-datepicker__month-dropdown-container--select,.react-datepicker__month-year-dropdown-container--select,.react-datepicker__year-dropdown-container--scroll,.react-datepicker__month-dropdown-container--scroll,.react-datepicker__month-year-dropdown-container--scroll{display:inline-block;margin:0 15px}.react-datepicker__current-month,.react-datepicker-time__header,.react-datepicker-year-header{margin-top:0;color:#000;font-weight:bold;font-size:0.944rem}.react-datepicker-time__header{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.react-datepicker__navigation{align-items:center;background:none;display:flex;justify-content:center;text-align:center;cursor:pointer;position:absolute;top:2px;padding:0;border:none;z-index:1;height:32px;width:32px;text-indent:-999em;overflow:hidden}.react-datepicker__navigation--previous{left:2px}.react-datepicker__navigation--next{right:2px}.react-datepicker__navigation--next--with-time:not(.react-datepicker__navigation--next--with-today-button){right:85px}.react-datepicker__navigation--years{position:relative;top:0;display:block;margin-left:auto;margin-right:auto}.react-datepicker__navigation--years-previous{top:4px}.react-datepicker__navigation--years-upcoming{top:-4px}.react-datepicker__navigation:hover *::before{border-color:#a6a6a6}.react-datepicker__navigation-icon{position:relative;top:-1px;font-size:20px;width:0}.react-datepicker__navigation-icon--next{left:-2px}.react-datepicker__navigation-icon--next::before{transform:rotate(45deg);left:-7px}.react-datepicker__navigation-icon--previous{right:-2px}.react-datepicker__navigation-icon--previous::before{transform:rotate(225deg);right:-7px}.react-datepicker__month-container{float:left}.react-datepicker__year{margin:0.4rem;text-align:center}.react-datepicker__year-wrapper{display:flex;flex-wrap:wrap;max-width:180px}.react-datepicker__year .react-datepicker__year-text{display:inline-block;width:4rem;margin:2px}.react-datepicker__month{margin:0.4rem;text-align:center}.react-datepicker__month .react-datepicker__month-text,.react-datepicker__month .react-datepicker__quarter-text{display:inline-block;width:4rem;margin:2px}.react-datepicker__input-time-container{clear:both;width:100%;float:left;margin:5px 0 10px 15px;text-align:left}.react-datepicker__input-time-container .react-datepicker-time__caption{display:inline-block}.react-datepicker__input-time-container .react-datepicker-time__input-container{display:inline-block}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input{display:inline-block;margin-left:10px}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input{width:auto}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input[type=time]::-webkit-inner-spin-button,.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input[type=time]::-webkit-outer-spin-button{-webkit-appearance:none;margin:0}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input[type=time]{-moz-appearance:textfield}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__delimiter{margin-left:5px;display:inline-block}.react-datepicker__time-container{float:right;border-left:1px solid #aeaeae;width:85px}.react-datepicker__time-container--with-today-button{display:inline;border:1px solid #aeaeae;border-radius:0.3rem;position:absolute;right:-87px;top:0}.react-datepicker__time-container .react-datepicker__time{position:relative;background:white;border-bottom-right-radius:0.3rem}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box{width:85px;overflow-x:hidden;margin:0 auto;text-align:center;border-bottom-right-radius:0.3rem}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list{list-style:none;margin:0;height:calc(195px + 1.7rem / 2);overflow-y:scroll;padding-right:0;padding-left:0;width:100%;box-sizing:content-box}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item{height:30px;padding:5px 10px;white-space:nowrap}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item:hover{cursor:pointer;background-color:#f0f0f0}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--selected{background-color:#216ba5;color:white;font-weight:bold}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--selected:hover{background-color:#216ba5}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--disabled{color:#ccc}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--disabled:hover{cursor:default;background-color:transparent}.react-datepicker__week-number{color:#ccc;display:inline-block;width:1.7rem;line-height:1.7rem;text-align:center;margin:0.166rem}.react-datepicker__week-number.react-datepicker__week-number--clickable{cursor:pointer}.react-datepicker__week-number.react-datepicker__week-number--clickable:hover{border-radius:0.3rem;background-color:#f0f0f0}.react-datepicker__day-names,.react-datepicker__week{white-space:nowrap}.react-datepicker__day-names{margin-bottom:-8px}.react-datepicker__day-name,.react-datepicker__day,.react-datepicker__time-name{color:#000;display:inline-block;width:1.7rem;line-height:1.7rem;text-align:center;margin:0.166rem}.react-datepicker__day,.react-datepicker__month-text,.react-datepicker__quarter-text,.react-datepicker__year-text{cursor:pointer}.react-datepicker__day:hover,.react-datepicker__month-text:hover,.react-datepicker__quarter-text:hover,.react-datepicker__year-text:hover{border-radius:0.3rem;background-color:#f0f0f0}.react-datepicker__day--today,.react-datepicker__month-text--today,.react-datepicker__quarter-text--today,.react-datepicker__year-text--today{font-weight:bold}.react-datepicker__day--highlighted,.react-datepicker__month-text--highlighted,.react-datepicker__quarter-text--highlighted,.react-datepicker__year-text--highlighted{border-radius:0.3rem;background-color:#3dcc4a;color:#fff}.react-datepicker__day--highlighted:hover,.react-datepicker__month-text--highlighted:hover,.react-datepicker__quarter-text--highlighted:hover,.react-datepicker__year-text--highlighted:hover{background-color:#32be3f}.react-datepicker__day--highlighted-custom-1,.react-datepicker__month-text--highlighted-custom-1,.react-datepicker__quarter-text--highlighted-custom-1,.react-datepicker__year-text--highlighted-custom-1{color:magenta}.react-datepicker__day--highlighted-custom-2,.react-datepicker__month-text--highlighted-custom-2,.react-datepicker__quarter-text--highlighted-custom-2,.react-datepicker__year-text--highlighted-custom-2{color:green}.react-datepicker__day--holidays,.react-datepicker__month-text--holidays,.react-datepicker__quarter-text--holidays,.react-datepicker__year-text--holidays{position:relative;border-radius:0.3rem;background-color:#ff6803;color:#fff}.react-datepicker__day--holidays .holiday-overlay,.react-datepicker__month-text--holidays .holiday-overlay,.react-datepicker__quarter-text--holidays .holiday-overlay,.react-datepicker__year-text--holidays .holiday-overlay{position:absolute;bottom:100%;left:50%;transform:translateX(-50%);background-color:#333;color:#fff;padding:4px;border-radius:4px;white-space:nowrap;visibility:hidden;opacity:0;transition:visibility 0s, opacity 0.3s ease-in-out}.react-datepicker__day--holidays:hover,.react-datepicker__month-text--holidays:hover,.react-datepicker__quarter-text--holidays:hover,.react-datepicker__year-text--holidays:hover{background-color:#cf5300}.react-datepicker__day--holidays:hover .holiday-overlay,.react-datepicker__month-text--holidays:hover .holiday-overlay,.react-datepicker__quarter-text--holidays:hover .holiday-overlay,.react-datepicker__year-text--holidays:hover .holiday-overlay{visibility:visible;opacity:1}.react-datepicker__day--selected,.react-datepicker__day--in-selecting-range,.react-datepicker__day--in-range,.react-datepicker__month-text--selected,.react-datepicker__month-text--in-selecting-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--selected,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--selected,.react-datepicker__year-text--in-selecting-range,.react-datepicker__year-text--in-range{border-radius:0.3rem;background-color:#216ba5;color:#fff}.react-datepicker__day--selected:hover,.react-datepicker__day--in-selecting-range:hover,.react-datepicker__day--in-range:hover,.react-datepicker__month-text--selected:hover,.react-datepicker__month-text--in-selecting-range:hover,.react-datepicker__month-text--in-range:hover,.react-datepicker__quarter-text--selected:hover,.react-datepicker__quarter-text--in-selecting-range:hover,.react-datepicker__quarter-text--in-range:hover,.react-datepicker__year-text--selected:hover,.react-datepicker__year-text--in-selecting-range:hover,.react-datepicker__year-text--in-range:hover{background-color:#1d5d90}.react-datepicker__day--keyboard-selected,.react-datepicker__month-text--keyboard-selected,.react-datepicker__quarter-text--keyboard-selected,.react-datepicker__year-text--keyboard-selected{border-radius:0.3rem;background-color:#bad9f1;color:#000}.react-datepicker__day--keyboard-selected:hover,.react-datepicker__month-text--keyboard-selected:hover,.react-datepicker__quarter-text--keyboard-selected:hover,.react-datepicker__year-text--keyboard-selected:hover{background-color:#1d5d90}.react-datepicker__day--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range),.react-datepicker__month-text--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range),.react-datepicker__quarter-text--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range),.react-datepicker__year-text--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range){background-color:rgba(33,107,165,0.5)}.react-datepicker__month--selecting-range .react-datepicker__day--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__day--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__month--selecting-range .react-datepicker__month-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__month-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__month--selecting-range .react-datepicker__quarter-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__quarter-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__month--selecting-range .react-datepicker__year-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__year-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range){background-color:#f0f0f0;color:#000}.react-datepicker__day--disabled,.react-datepicker__month-text--disabled,.react-datepicker__quarter-text--disabled,.react-datepicker__year-text--disabled{cursor:default;color:#ccc}.react-datepicker__day--disabled:hover,.react-datepicker__month-text--disabled:hover,.react-datepicker__quarter-text--disabled:hover,.react-datepicker__year-text--disabled:hover{background-color:transparent}.react-datepicker__input-container{position:relative;display:inline-block;width:100%}.react-datepicker__input-container .react-datepicker__calendar-icon{position:absolute;padding:0.5rem;box-sizing:content-box}.react-datepicker__view-calendar-icon input{padding:6px 10px 5px 25px}.react-datepicker__year-read-view,.react-datepicker__month-read-view,.react-datepicker__month-year-read-view{border:1px solid transparent;border-radius:0.3rem;position:relative}.react-datepicker__year-read-view:hover,.react-datepicker__month-read-view:hover,.react-datepicker__month-year-read-view:hover{cursor:pointer}.react-datepicker__year-read-view:hover .react-datepicker__year-read-view--down-arrow,.react-datepicker__year-read-view:hover .react-datepicker__month-read-view--down-arrow,.react-datepicker__month-read-view:hover .react-datepicker__year-read-view--down-arrow,.react-datepicker__month-read-view:hover .react-datepicker__month-read-view--down-arrow,.react-datepicker__month-year-read-view:hover .react-datepicker__year-read-view--down-arrow,.react-datepicker__month-year-read-view:hover .react-datepicker__month-read-view--down-arrow{border-top-color:#b3b3b3}.react-datepicker__year-read-view--down-arrow,.react-datepicker__month-read-view--down-arrow,.react-datepicker__month-year-read-view--down-arrow{transform:rotate(135deg);right:-16px;top:0}.react-datepicker__year-dropdown,.react-datepicker__month-dropdown,.react-datepicker__month-year-dropdown{background-color:#f0f0f0;position:absolute;width:50%;left:25%;top:30px;z-index:1;text-align:center;border-radius:0.3rem;border:1px solid #aeaeae}.react-datepicker__year-dropdown:hover,.react-datepicker__month-dropdown:hover,.react-datepicker__month-year-dropdown:hover{cursor:pointer}.react-datepicker__year-dropdown--scrollable,.react-datepicker__month-dropdown--scrollable,.react-datepicker__month-year-dropdown--scrollable{height:150px;overflow-y:scroll}.react-datepicker__year-option,.react-datepicker__month-option,.react-datepicker__month-year-option{line-height:20px;width:100%;display:block;margin-left:auto;margin-right:auto}.react-datepicker__year-option:first-of-type,.react-datepicker__month-option:first-of-type,.react-datepicker__month-year-option:first-of-type{border-top-left-radius:0.3rem;border-top-right-radius:0.3rem}.react-datepicker__year-option:last-of-type,.react-datepicker__month-option:last-of-type,.react-datepicker__month-year-option:last-of-type{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border-bottom-left-radius:0.3rem;border-bottom-right-radius:0.3rem}.react-datepicker__year-option:hover,.react-datepicker__month-option:hover,.react-datepicker__month-year-option:hover{background-color:#ccc}.react-datepicker__year-option:hover .react-datepicker__navigation--years-upcoming,.react-datepicker__month-option:hover .react-datepicker__navigation--years-upcoming,.react-datepicker__month-year-option:hover .react-datepicker__navigation--years-upcoming{border-bottom-color:#b3b3b3}.react-datepicker__year-option:hover .react-datepicker__navigation--years-previous,.react-datepicker__month-option:hover .react-datepicker__navigation--years-previous,.react-datepicker__month-year-option:hover .react-datepicker__navigation--years-previous{border-top-color:#b3b3b3}.react-datepicker__year-option--selected,.react-datepicker__month-option--selected,.react-datepicker__month-year-option--selected{position:absolute;left:15px}.react-datepicker__close-icon{cursor:pointer;background-color:transparent;border:0;outline:0;padding:0 6px 0 0;position:absolute;top:0;right:0;height:100%;display:table-cell;vertical-align:middle}.react-datepicker__close-icon::after{cursor:pointer;background-color:#216ba5;color:#fff;border-radius:50%;height:16px;width:16px;padding:2px;font-size:12px;line-height:1;text-align:center;display:table-cell;vertical-align:middle;content:"×"}.react-datepicker__today-button{background:#f0f0f0;border-top:1px solid #aeaeae;cursor:pointer;text-align:center;font-weight:bold;padding:5px 0;clear:left}.react-datepicker__portal{position:fixed;width:100vw;height:100vh;background-color:rgba(0,0,0,0.8);left:0;top:0;justify-content:center;align-items:center;display:flex;z-index:2147483647}.react-datepicker__portal .react-datepicker__day-name,.react-datepicker__portal .react-datepicker__day,.react-datepicker__portal .react-datepicker__time-name{width:3rem;line-height:3rem}@media (max-width: 400px), (max-height: 550px){.react-datepicker__portal .react-datepicker__day-name,.react-datepicker__portal .react-datepicker__day,.react-datepicker__portal .react-datepicker__time-name{width:2rem;line-height:2rem}}.react-datepicker__portal .react-datepicker__current-month,.react-datepicker__portal .react-datepicker-time__header{font-size:1.44rem}.react-datepicker__children-container{width:13.8rem;margin:0.4rem;padding-right:0.2rem;padding-left:0.2rem;height:auto}.react-datepicker__aria-live{position:absolute;clip-path:circle(0);border:0;height:1px;margin:-1px;overflow:hidden;padding:0;width:1px;white-space:nowrap}.react-datepicker__calendar-icon{width:1em;height:1em;vertical-align:-0.125em} + diff --git a/web/gui/v2/8459.add89d7bb0434b110cd3.chunk.js b/web/gui/v2/8459.add89d7bb0434b110cd3.chunk.js new file mode 100644 index 00000000000000..e33052518d4f4b --- /dev/null +++ b/web/gui/v2/8459.add89d7bb0434b110cd3.chunk.js @@ -0,0 +1,2 @@ +/*! For license information please see 8459.add89d7bb0434b110cd3.chunk.js.LICENSE.txt */ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="dd03e7fa-79a5-447e-aedd-dfe0e1d0ddba",e._sentryDebugIdIdentifier="sentry-dbid-dd03e7fa-79a5-447e-aedd-dfe0e1d0ddba")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8459],{66007:function(e,t){"use strict";var n="user-astronaut",r=[],a="f4fb",o="M370.7 96.1C346.1 39.5 289.7 0 224 0S101.9 39.5 77.3 96.1C60.9 97.5 48 111.2 48 128v64c0 16.8 12.9 30.5 29.3 31.9C101.9 280.5 158.3 320 224 320s122.1-39.5 146.7-96.1c16.4-1.4 29.3-15.1 29.3-31.9V128c0-16.8-12.9-30.5-29.3-31.9zM336 144v16c0 53-43 96-96 96H208c-53 0-96-43-96-96V144c0-26.5 21.5-48 48-48H288c26.5 0 48 21.5 48 48zM189.3 162.7l-6-21.2c-.9-3.3-3.9-5.5-7.3-5.5s-6.4 2.2-7.3 5.5l-6 21.2-21.2 6c-3.3 .9-5.5 3.9-5.5 7.3s2.2 6.4 5.5 7.3l21.2 6 6 21.2c.9 3.3 3.9 5.5 7.3 5.5s6.4-2.2 7.3-5.5l6-21.2 21.2-6c3.3-.9 5.5-3.9 5.5-7.3s-2.2-6.4-5.5-7.3l-21.2-6zM112.7 316.5C46.7 342.6 0 407 0 482.3C0 498.7 13.3 512 29.7 512H128V448c0-17.7 14.3-32 32-32H288c17.7 0 32 14.3 32 32v64l98.3 0c16.4 0 29.7-13.3 29.7-29.7c0-75.3-46.7-139.7-112.7-165.8C303.9 338.8 265.5 352 224 352s-79.9-13.2-111.3-35.5zM176 448c-8.8 0-16 7.2-16 16v48h32V464c0-8.8-7.2-16-16-16zm96 32a16 16 0 1 0 0-32 16 16 0 1 0 0 32z";t.DF={prefix:"fas",iconName:n,icon:[448,512,r,a,o]},t.KC=t.DF},70982:function(e,t,n){"use strict";function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function a(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n0;)t+=he[62*Math.random()|0];return t}function me(e){for(var t=[],n=(e||[]).length>>>0;n--;)t[n]=e[n];return t}function ge(e){return e.classList?me(e.classList):(e.getAttribute("class")||"").split(" ").filter((function(e){return e}))}function be(e){return"".concat(e).replace(/&/g,"&").replace(/"/g,""").replace(/'/g,"'").replace(//g,">")}function ye(e){return Object.keys(e||{}).reduce((function(t,n){return t+"".concat(n,": ").concat(e[n].trim(),";")}),"")}function we(e){return e.size!==de.size||e.x!==de.x||e.y!==de.y||e.rotate!==de.rotate||e.flipX||e.flipY}var xe=':root, :host {\n --fa-font-solid: normal 900 1em/1 "Font Awesome 6 Solid";\n --fa-font-regular: normal 400 1em/1 "Font Awesome 6 Regular";\n --fa-font-light: normal 300 1em/1 "Font Awesome 6 Light";\n --fa-font-thin: normal 100 1em/1 "Font Awesome 6 Thin";\n --fa-font-duotone: normal 900 1em/1 "Font Awesome 6 Duotone";\n --fa-font-sharp-solid: normal 900 1em/1 "Font Awesome 6 Sharp";\n --fa-font-sharp-regular: normal 400 1em/1 "Font Awesome 6 Sharp";\n --fa-font-sharp-light: normal 300 1em/1 "Font Awesome 6 Sharp";\n --fa-font-brands: normal 400 1em/1 "Font Awesome 6 Brands";\n}\n\nsvg:not(:root).svg-inline--fa, svg:not(:host).svg-inline--fa {\n overflow: visible;\n box-sizing: content-box;\n}\n\n.svg-inline--fa {\n display: var(--fa-display, inline-block);\n height: 1em;\n overflow: visible;\n vertical-align: -0.125em;\n}\n.svg-inline--fa.fa-2xs {\n vertical-align: 0.1em;\n}\n.svg-inline--fa.fa-xs {\n vertical-align: 0em;\n}\n.svg-inline--fa.fa-sm {\n vertical-align: -0.0714285705em;\n}\n.svg-inline--fa.fa-lg {\n vertical-align: -0.2em;\n}\n.svg-inline--fa.fa-xl {\n vertical-align: -0.25em;\n}\n.svg-inline--fa.fa-2xl {\n vertical-align: -0.3125em;\n}\n.svg-inline--fa.fa-pull-left {\n margin-right: var(--fa-pull-margin, 0.3em);\n width: auto;\n}\n.svg-inline--fa.fa-pull-right {\n margin-left: var(--fa-pull-margin, 0.3em);\n width: auto;\n}\n.svg-inline--fa.fa-li {\n width: var(--fa-li-width, 2em);\n top: 0.25em;\n}\n.svg-inline--fa.fa-fw {\n width: var(--fa-fw-width, 1.25em);\n}\n\n.fa-layers svg.svg-inline--fa {\n bottom: 0;\n left: 0;\n margin: auto;\n position: absolute;\n right: 0;\n top: 0;\n}\n\n.fa-layers-counter, .fa-layers-text {\n display: inline-block;\n position: absolute;\n text-align: center;\n}\n\n.fa-layers {\n display: inline-block;\n height: 1em;\n position: relative;\n text-align: center;\n vertical-align: -0.125em;\n width: 1em;\n}\n.fa-layers svg.svg-inline--fa {\n -webkit-transform-origin: center center;\n transform-origin: center center;\n}\n\n.fa-layers-text {\n left: 50%;\n top: 50%;\n -webkit-transform: translate(-50%, -50%);\n transform: translate(-50%, -50%);\n -webkit-transform-origin: center center;\n transform-origin: center center;\n}\n\n.fa-layers-counter {\n background-color: var(--fa-counter-background-color, #ff253a);\n border-radius: var(--fa-counter-border-radius, 1em);\n box-sizing: border-box;\n color: var(--fa-inverse, #fff);\n line-height: var(--fa-counter-line-height, 1);\n max-width: var(--fa-counter-max-width, 5em);\n min-width: var(--fa-counter-min-width, 1.5em);\n overflow: hidden;\n padding: var(--fa-counter-padding, 0.25em 0.5em);\n right: var(--fa-right, 0);\n text-overflow: ellipsis;\n top: var(--fa-top, 0);\n -webkit-transform: scale(var(--fa-counter-scale, 0.25));\n transform: scale(var(--fa-counter-scale, 0.25));\n -webkit-transform-origin: top right;\n transform-origin: top right;\n}\n\n.fa-layers-bottom-right {\n bottom: var(--fa-bottom, 0);\n right: var(--fa-right, 0);\n top: auto;\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: bottom right;\n transform-origin: bottom right;\n}\n\n.fa-layers-bottom-left {\n bottom: var(--fa-bottom, 0);\n left: var(--fa-left, 0);\n right: auto;\n top: auto;\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: bottom left;\n transform-origin: bottom left;\n}\n\n.fa-layers-top-right {\n top: var(--fa-top, 0);\n right: var(--fa-right, 0);\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: top right;\n transform-origin: top right;\n}\n\n.fa-layers-top-left {\n left: var(--fa-left, 0);\n right: auto;\n top: var(--fa-top, 0);\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: top left;\n transform-origin: top left;\n}\n\n.fa-1x {\n font-size: 1em;\n}\n\n.fa-2x {\n font-size: 2em;\n}\n\n.fa-3x {\n font-size: 3em;\n}\n\n.fa-4x {\n font-size: 4em;\n}\n\n.fa-5x {\n font-size: 5em;\n}\n\n.fa-6x {\n font-size: 6em;\n}\n\n.fa-7x {\n font-size: 7em;\n}\n\n.fa-8x {\n font-size: 8em;\n}\n\n.fa-9x {\n font-size: 9em;\n}\n\n.fa-10x {\n font-size: 10em;\n}\n\n.fa-2xs {\n font-size: 0.625em;\n line-height: 0.1em;\n vertical-align: 0.225em;\n}\n\n.fa-xs {\n font-size: 0.75em;\n line-height: 0.0833333337em;\n vertical-align: 0.125em;\n}\n\n.fa-sm {\n font-size: 0.875em;\n line-height: 0.0714285718em;\n vertical-align: 0.0535714295em;\n}\n\n.fa-lg {\n font-size: 1.25em;\n line-height: 0.05em;\n vertical-align: -0.075em;\n}\n\n.fa-xl {\n font-size: 1.5em;\n line-height: 0.0416666682em;\n vertical-align: -0.125em;\n}\n\n.fa-2xl {\n font-size: 2em;\n line-height: 0.03125em;\n vertical-align: -0.1875em;\n}\n\n.fa-fw {\n text-align: center;\n width: 1.25em;\n}\n\n.fa-ul {\n list-style-type: none;\n margin-left: var(--fa-li-margin, 2.5em);\n padding-left: 0;\n}\n.fa-ul > li {\n position: relative;\n}\n\n.fa-li {\n left: calc(var(--fa-li-width, 2em) * -1);\n position: absolute;\n text-align: center;\n width: var(--fa-li-width, 2em);\n line-height: inherit;\n}\n\n.fa-border {\n border-color: var(--fa-border-color, #eee);\n border-radius: var(--fa-border-radius, 0.1em);\n border-style: var(--fa-border-style, solid);\n border-width: var(--fa-border-width, 0.08em);\n padding: var(--fa-border-padding, 0.2em 0.25em 0.15em);\n}\n\n.fa-pull-left {\n float: left;\n margin-right: var(--fa-pull-margin, 0.3em);\n}\n\n.fa-pull-right {\n float: right;\n margin-left: var(--fa-pull-margin, 0.3em);\n}\n\n.fa-beat {\n -webkit-animation-name: fa-beat;\n animation-name: fa-beat;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out);\n animation-timing-function: var(--fa-animation-timing, ease-in-out);\n}\n\n.fa-bounce {\n -webkit-animation-name: fa-bounce;\n animation-name: fa-bounce;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1));\n animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1));\n}\n\n.fa-fade {\n -webkit-animation-name: fa-fade;\n animation-name: fa-fade;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n}\n\n.fa-beat-fade {\n -webkit-animation-name: fa-beat-fade;\n animation-name: fa-beat-fade;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n}\n\n.fa-flip {\n -webkit-animation-name: fa-flip;\n animation-name: fa-flip;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out);\n animation-timing-function: var(--fa-animation-timing, ease-in-out);\n}\n\n.fa-shake {\n -webkit-animation-name: fa-shake;\n animation-name: fa-shake;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, linear);\n animation-timing-function: var(--fa-animation-timing, linear);\n}\n\n.fa-spin {\n -webkit-animation-name: fa-spin;\n animation-name: fa-spin;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 2s);\n animation-duration: var(--fa-animation-duration, 2s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, linear);\n animation-timing-function: var(--fa-animation-timing, linear);\n}\n\n.fa-spin-reverse {\n --fa-animation-direction: reverse;\n}\n\n.fa-pulse,\n.fa-spin-pulse {\n -webkit-animation-name: fa-spin;\n animation-name: fa-spin;\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, steps(8));\n animation-timing-function: var(--fa-animation-timing, steps(8));\n}\n\n@media (prefers-reduced-motion: reduce) {\n .fa-beat,\n.fa-bounce,\n.fa-fade,\n.fa-beat-fade,\n.fa-flip,\n.fa-pulse,\n.fa-shake,\n.fa-spin,\n.fa-spin-pulse {\n -webkit-animation-delay: -1ms;\n animation-delay: -1ms;\n -webkit-animation-duration: 1ms;\n animation-duration: 1ms;\n -webkit-animation-iteration-count: 1;\n animation-iteration-count: 1;\n -webkit-transition-delay: 0s;\n transition-delay: 0s;\n -webkit-transition-duration: 0s;\n transition-duration: 0s;\n }\n}\n@-webkit-keyframes fa-beat {\n 0%, 90% {\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 45% {\n -webkit-transform: scale(var(--fa-beat-scale, 1.25));\n transform: scale(var(--fa-beat-scale, 1.25));\n }\n}\n@keyframes fa-beat {\n 0%, 90% {\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 45% {\n -webkit-transform: scale(var(--fa-beat-scale, 1.25));\n transform: scale(var(--fa-beat-scale, 1.25));\n }\n}\n@-webkit-keyframes fa-bounce {\n 0% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 10% {\n -webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n }\n 30% {\n -webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n }\n 50% {\n -webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n }\n 57% {\n -webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n }\n 64% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 100% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n}\n@keyframes fa-bounce {\n 0% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 10% {\n -webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n }\n 30% {\n -webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n }\n 50% {\n -webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n }\n 57% {\n -webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n }\n 64% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 100% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n}\n@-webkit-keyframes fa-fade {\n 50% {\n opacity: var(--fa-fade-opacity, 0.4);\n }\n}\n@keyframes fa-fade {\n 50% {\n opacity: var(--fa-fade-opacity, 0.4);\n }\n}\n@-webkit-keyframes fa-beat-fade {\n 0%, 100% {\n opacity: var(--fa-beat-fade-opacity, 0.4);\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 50% {\n opacity: 1;\n -webkit-transform: scale(var(--fa-beat-fade-scale, 1.125));\n transform: scale(var(--fa-beat-fade-scale, 1.125));\n }\n}\n@keyframes fa-beat-fade {\n 0%, 100% {\n opacity: var(--fa-beat-fade-opacity, 0.4);\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 50% {\n opacity: 1;\n -webkit-transform: scale(var(--fa-beat-fade-scale, 1.125));\n transform: scale(var(--fa-beat-fade-scale, 1.125));\n }\n}\n@-webkit-keyframes fa-flip {\n 50% {\n -webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n }\n}\n@keyframes fa-flip {\n 50% {\n -webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n }\n}\n@-webkit-keyframes fa-shake {\n 0% {\n -webkit-transform: rotate(-15deg);\n transform: rotate(-15deg);\n }\n 4% {\n -webkit-transform: rotate(15deg);\n transform: rotate(15deg);\n }\n 8%, 24% {\n -webkit-transform: rotate(-18deg);\n transform: rotate(-18deg);\n }\n 12%, 28% {\n -webkit-transform: rotate(18deg);\n transform: rotate(18deg);\n }\n 16% {\n -webkit-transform: rotate(-22deg);\n transform: rotate(-22deg);\n }\n 20% {\n -webkit-transform: rotate(22deg);\n transform: rotate(22deg);\n }\n 32% {\n -webkit-transform: rotate(-12deg);\n transform: rotate(-12deg);\n }\n 36% {\n -webkit-transform: rotate(12deg);\n transform: rotate(12deg);\n }\n 40%, 100% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n}\n@keyframes fa-shake {\n 0% {\n -webkit-transform: rotate(-15deg);\n transform: rotate(-15deg);\n }\n 4% {\n -webkit-transform: rotate(15deg);\n transform: rotate(15deg);\n }\n 8%, 24% {\n -webkit-transform: rotate(-18deg);\n transform: rotate(-18deg);\n }\n 12%, 28% {\n -webkit-transform: rotate(18deg);\n transform: rotate(18deg);\n }\n 16% {\n -webkit-transform: rotate(-22deg);\n transform: rotate(-22deg);\n }\n 20% {\n -webkit-transform: rotate(22deg);\n transform: rotate(22deg);\n }\n 32% {\n -webkit-transform: rotate(-12deg);\n transform: rotate(-12deg);\n }\n 36% {\n -webkit-transform: rotate(12deg);\n transform: rotate(12deg);\n }\n 40%, 100% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n}\n@-webkit-keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n@keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n.fa-rotate-90 {\n -webkit-transform: rotate(90deg);\n transform: rotate(90deg);\n}\n\n.fa-rotate-180 {\n -webkit-transform: rotate(180deg);\n transform: rotate(180deg);\n}\n\n.fa-rotate-270 {\n -webkit-transform: rotate(270deg);\n transform: rotate(270deg);\n}\n\n.fa-flip-horizontal {\n -webkit-transform: scale(-1, 1);\n transform: scale(-1, 1);\n}\n\n.fa-flip-vertical {\n -webkit-transform: scale(1, -1);\n transform: scale(1, -1);\n}\n\n.fa-flip-both,\n.fa-flip-horizontal.fa-flip-vertical {\n -webkit-transform: scale(-1, -1);\n transform: scale(-1, -1);\n}\n\n.fa-rotate-by {\n -webkit-transform: rotate(var(--fa-rotate-angle, none));\n transform: rotate(var(--fa-rotate-angle, none));\n}\n\n.fa-stack {\n display: inline-block;\n vertical-align: middle;\n height: 2em;\n position: relative;\n width: 2.5em;\n}\n\n.fa-stack-1x,\n.fa-stack-2x {\n bottom: 0;\n left: 0;\n margin: auto;\n position: absolute;\n right: 0;\n top: 0;\n z-index: var(--fa-stack-z-index, auto);\n}\n\n.svg-inline--fa.fa-stack-1x {\n height: 1em;\n width: 1.25em;\n}\n.svg-inline--fa.fa-stack-2x {\n height: 2em;\n width: 2.5em;\n}\n\n.fa-inverse {\n color: var(--fa-inverse, #fff);\n}\n\n.sr-only,\n.fa-sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n white-space: nowrap;\n border-width: 0;\n}\n\n.sr-only-focusable:not(:focus),\n.fa-sr-only-focusable:not(:focus) {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n white-space: nowrap;\n border-width: 0;\n}\n\n.svg-inline--fa .fa-primary {\n fill: var(--fa-primary-color, currentColor);\n opacity: var(--fa-primary-opacity, 1);\n}\n\n.svg-inline--fa .fa-secondary {\n fill: var(--fa-secondary-color, currentColor);\n opacity: var(--fa-secondary-opacity, 0.4);\n}\n\n.svg-inline--fa.fa-swap-opacity .fa-primary {\n opacity: var(--fa-secondary-opacity, 0.4);\n}\n\n.svg-inline--fa.fa-swap-opacity .fa-secondary {\n opacity: var(--fa-primary-opacity, 1);\n}\n\n.svg-inline--fa mask .fa-primary,\n.svg-inline--fa mask .fa-secondary {\n fill: black;\n}\n\n.fad.fa-inverse,\n.fa-duotone.fa-inverse {\n color: var(--fa-inverse, #fff);\n}';function ke(){var e=N,t=A,n=ce.cssPrefix,r=ce.replacementClass,a=xe;if(n!==e||r!==t){var o=new RegExp("\\.".concat(e,"\\-"),"g"),i=new RegExp("\\--".concat(e,"\\-"),"g"),s=new RegExp("\\.".concat(t),"g");a=a.replace(o,".".concat(n,"-")).replace(i,"--".concat(n,"-")).replace(s,".".concat(r))}return a}var De=!1;function Ce(){ce.autoAddCss&&!De&&(!function(e){if(e&&O){var t=E.createElement("style");t.setAttribute("type","text/css"),t.innerHTML=e;for(var n=E.head.childNodes,r=null,a=n.length-1;a>-1;a--){var o=n[a],i=(o.tagName||"").toUpperCase();["STYLE","LINK"].indexOf(i)>-1&&(r=o)}E.head.insertBefore(t,r)}}(ke()),De=!0)}var Ee={mixout:function(){return{dom:{css:ke,insertCss:Ce}}},hooks:function(){return{beforeDOMElementCreation:function(){Ce()},beforeI2svg:function(){Ce()}}}},Se=C||{};Se[P]||(Se[P]={}),Se[P].styles||(Se[P].styles={}),Se[P].hooks||(Se[P].hooks={}),Se[P].shims||(Se[P].shims=[]);var _e=Se[P],Oe=[],Me=!1;function Pe(e){var t=e.tag,n=e.attributes,r=void 0===n?{}:n,a=e.children,o=void 0===a?[]:a;return"string"===typeof e?be(e):"<".concat(t," ").concat(function(e){return Object.keys(e||{}).reduce((function(t,n){return t+"".concat(n,'="').concat(be(e[n]),'" ')}),"").trim()}(r),">").concat(o.map(Pe).join(""),"")}function Te(e,t,n){if(e&&e[t]&&e[t][n])return{prefix:t,iconName:n,icon:e[t][n]}}O&&((Me=(E.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(E.readyState))||E.addEventListener("DOMContentLoaded",(function e(){E.removeEventListener("DOMContentLoaded",e),Me=1,Oe.map((function(e){return e()}))})));var Ne=function(e,t,n,r){var a,o,i,s=Object.keys(e),l=s.length,u=void 0!==r?function(e,t){return function(n,r,a,o){return e.call(t,n,r,a,o)}}(t,r):t;for(void 0===n?(a=1,i=e[s[0]]):(a=0,i=n);a=55296&&a<=56319&&n2&&void 0!==arguments[2]?arguments[2]:{}).skipHooks,r=void 0!==n&&n,o=Ze(t);"function"!==typeof _e.hooks.addPack||r?_e.styles[e]=a(a({},_e.styles[e]||{}),o):_e.hooks.addPack(e,Ze(t)),"fas"===e&&Re("fa",t)}var je,Le,Fe,Ie=_e.styles,Ye=_e.shims,Be=(s(je={},z,Object.values(Q[z])),s(je,U,Object.values(Q[U])),je),He=null,ze={},Ue={},We={},qe={},Ke={},Ve=(s(Le={},z,Object.keys(K[z])),s(Le,U,Object.keys(K[U])),Le);function Qe(e,t){var n,r=t.split("-"),a=r[0],o=r.slice(1).join("-");return a!==e||""===o||(n=o,~ie.indexOf(n))?null:o}var Ge,Xe=function(){var e=function(e){return Ne(Ie,(function(t,n,r){return t[r]=Ne(n,e,{}),t}),{})};ze=e((function(e,t,n){(t[3]&&(e[t[3]]=n),t[2])&&t[2].filter((function(e){return"number"===typeof e})).forEach((function(t){e[t.toString(16)]=n}));return e})),Ue=e((function(e,t,n){(e[n]=n,t[2])&&t[2].filter((function(e){return"string"===typeof e})).forEach((function(t){e[t]=n}));return e})),Ke=e((function(e,t,n){var r=t[2];return e[n]=n,r.forEach((function(t){e[t]=n})),e}));var t="far"in Ie||ce.autoFetchSvg,n=Ne(Ye,(function(e,n){var r=n[0],a=n[1],o=n[2];return"far"!==a||t||(a="fas"),"string"===typeof r&&(e.names[r]={prefix:a,iconName:o}),"number"===typeof r&&(e.unicodes[r.toString(16)]={prefix:a,iconName:o}),e}),{names:{},unicodes:{}});We=n.names,qe=n.unicodes,He=rt(ce.styleDefault,{family:ce.familyDefault})};function $e(e,t){return(ze[e]||{})[t]}function Je(e,t){return(Ke[e]||{})[t]}function et(e){return We[e]||{prefix:null,iconName:null}}function tt(){return He}Ge=function(e){He=rt(e.styleDefault,{family:ce.familyDefault})},fe.push(Ge),Xe();var nt=function(){return{prefix:null,iconName:null,rest:[]}};function rt(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).family,n=void 0===t?z:t,r=K[n][e],a=V[n][e]||V[n][r],o=e in _e.styles?e:null;return a||o||null}var at=(s(Fe={},z,Object.keys(Q[z])),s(Fe,U,Object.keys(Q[U])),Fe);function ot(e){var t,n=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).skipLookups,r=void 0!==n&&n,a=(s(t={},z,"".concat(ce.cssPrefix,"-").concat(z)),s(t,U,"".concat(ce.cssPrefix,"-").concat(U)),t),o=null,i=z;(e.includes(a[z])||e.some((function(e){return at[z].includes(e)})))&&(i=z),(e.includes(a[U])||e.some((function(e){return at[U].includes(e)})))&&(i=U);var l=e.reduce((function(e,t){var n=Qe(ce.cssPrefix,t);if(Ie[t]?(t=Be[i].includes(t)?G[i][t]:t,o=t,e.prefix=t):Ve[i].indexOf(t)>-1?(o=t,e.prefix=rt(t,{family:i})):n?e.iconName=n:t!==ce.replacementClass&&t!==a[z]&&t!==a[U]&&e.rest.push(t),!r&&e.prefix&&e.iconName){var s="fa"===o?et(e.iconName):{},l=Je(e.prefix,e.iconName);s.prefix&&(o=null),e.iconName=s.iconName||l||e.iconName,e.prefix=s.prefix||e.prefix,"far"!==e.prefix||Ie.far||!Ie.fas||ce.autoFetchSvg||(e.prefix="fas")}return e}),nt());return(e.includes("fa-brands")||e.includes("fab"))&&(l.prefix="fab"),(e.includes("fa-duotone")||e.includes("fad"))&&(l.prefix="fad"),l.prefix||i!==U||!Ie.fass&&!ce.autoFetchSvg||(l.prefix="fass",l.iconName=Je(l.prefix,l.iconName)||l.iconName),"fa"!==l.prefix&&"fa"!==o||(l.prefix=tt()||"fas"),l}var it=function(){function e(){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.definitions={}}var t,n,r;return t=e,n=[{key:"add",value:function(){for(var e=this,t=arguments.length,n=new Array(t),r=0;r0&&s.forEach((function(t){"string"===typeof t&&(e[a][t]=i)})),e[a][o]=i})),e}}],n&&i(t.prototype,n),r&&i(t,r),Object.defineProperty(t,"prototype",{writable:!1}),e}(),st=[],lt={},ut={},ct=Object.keys(ut);function ft(e,t){for(var n=arguments.length,r=new Array(n>2?n-2:0),a=2;a1?t-1:0),r=1;r0&&void 0!==arguments[0]?arguments[0]:{};return O?(pt("beforeI2svg",e),dt("pseudoElements2svg",e),dt("i2svg",e)):Promise.reject("Operation requires a DOM of some kind.")},watch:function(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.autoReplaceSvgRoot;!1===ce.autoReplaceSvg&&(ce.autoReplaceSvg=!0),ce.observeMutations=!0,e=function(){bt({autoReplaceSvgRoot:n}),pt("watch",t)},O&&(Me?setTimeout(e,0):Oe.push(e))}},gt={noAuto:function(){ce.autoReplaceSvg=!1,ce.observeMutations=!1,pt("noAuto")},config:ce,dom:mt,parse:{icon:function(e){if(null===e)return null;if("object"===o(e)&&e.prefix&&e.iconName)return{prefix:e.prefix,iconName:Je(e.prefix,e.iconName)||e.iconName};if(Array.isArray(e)&&2===e.length){var t=0===e[1].indexOf("fa-")?e[1].slice(3):e[1],n=rt(e[0]);return{prefix:n,iconName:Je(n,t)||t}}if("string"===typeof e&&(e.indexOf("".concat(ce.cssPrefix,"-"))>-1||e.match(X))){var r=ot(e.split(" "),{skipLookups:!0});return{prefix:r.prefix||tt(),iconName:Je(r.prefix,r.iconName)||r.iconName}}if("string"===typeof e){var a=tt();return{prefix:a,iconName:Je(a,e)||e}}}},library:vt,findIconDefinition:ht,toHtml:Pe},bt=function(){var e=(arguments.length>0&&void 0!==arguments[0]?arguments[0]:{}).autoReplaceSvgRoot,t=void 0===e?E:e;(Object.keys(_e.styles).length>0||ce.autoFetchSvg)&&O&&ce.autoReplaceSvg&>.dom.i2svg({node:t})};function yt(e,t){return Object.defineProperty(e,"abstract",{get:t}),Object.defineProperty(e,"html",{get:function(){return e.abstract.map((function(e){return Pe(e)}))}}),Object.defineProperty(e,"node",{get:function(){if(O){var t=E.createElement("div");return t.innerHTML=e.html,t.children}}}),e}function wt(e){var t=e.icons,n=t.main,r=t.mask,o=e.prefix,i=e.iconName,s=e.transform,l=e.symbol,u=e.title,c=e.maskId,f=e.titleId,p=e.extra,d=e.watchable,h=void 0!==d&&d,v=r.found?r:n,m=v.width,g=v.height,b="fak"===o,y=[ce.replacementClass,i?"".concat(ce.cssPrefix,"-").concat(i):""].filter((function(e){return-1===p.classes.indexOf(e)})).filter((function(e){return""!==e||!!e})).concat(p.classes).join(" "),w={children:[],attributes:a(a({},p.attributes),{},{"data-prefix":o,"data-icon":i,class:y,role:p.attributes.role||"img",xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 ".concat(m," ").concat(g)})},x=b&&!~p.classes.indexOf("fa-fw")?{width:"".concat(m/g*16*.0625,"em")}:{};h&&(w.attributes[Z]=""),u&&(w.children.push({tag:"title",attributes:{id:w.attributes["aria-labelledby"]||"title-".concat(f||ve())},children:[u]}),delete w.attributes.title);var k=a(a({},w),{},{prefix:o,iconName:i,main:n,mask:r,maskId:c,transform:s,symbol:l,styles:a(a({},x),p.styles)}),D=r.found&&n.found?dt("generateAbstractMask",k)||{children:[],attributes:{}}:dt("generateAbstractIcon",k)||{children:[],attributes:{}},C=D.children,E=D.attributes;return k.children=C,k.attributes=E,l?function(e){var t=e.prefix,n=e.iconName,r=e.children,o=e.attributes,i=e.symbol,s=!0===i?"".concat(t,"-").concat(ce.cssPrefix,"-").concat(n):i;return[{tag:"svg",attributes:{style:"display: none;"},children:[{tag:"symbol",attributes:a(a({},o),{},{id:s}),children:r}]}]}(k):function(e){var t=e.children,n=e.main,r=e.mask,o=e.attributes,i=e.styles,s=e.transform;if(we(s)&&n.found&&!r.found){var l={x:n.width/n.height/2,y:.5};o.style=ye(a(a({},i),{},{"transform-origin":"".concat(l.x+s.x/16,"em ").concat(l.y+s.y/16,"em")}))}return[{tag:"svg",attributes:o,children:t}]}(k)}function xt(e){var t=e.content,n=e.width,r=e.height,o=e.transform,i=e.title,s=e.extra,l=e.watchable,u=void 0!==l&&l,c=a(a(a({},s.attributes),i?{title:i}:{}),{},{class:s.classes.join(" ")});u&&(c[Z]="");var f=a({},s.styles);we(o)&&(f.transform=function(e){var t=e.transform,n=e.width,r=void 0===n?T:n,a=e.height,o=void 0===a?T:a,i=e.startCentered,s=void 0!==i&&i,l="";return l+=s&&M?"translate(".concat(t.x/pe-r/2,"em, ").concat(t.y/pe-o/2,"em) "):s?"translate(calc(-50% + ".concat(t.x/pe,"em), calc(-50% + ").concat(t.y/pe,"em)) "):"translate(".concat(t.x/pe,"em, ").concat(t.y/pe,"em) "),l+="scale(".concat(t.size/pe*(t.flipX?-1:1),", ").concat(t.size/pe*(t.flipY?-1:1),") "),l+"rotate(".concat(t.rotate,"deg) ")}({transform:o,startCentered:!0,width:n,height:r}),f["-webkit-transform"]=f.transform);var p=ye(f);p.length>0&&(c.style=p);var d=[];return d.push({tag:"span",attributes:c,children:[t]}),i&&d.push({tag:"span",attributes:{class:"sr-only"},children:[i]}),d}var kt=_e.styles;function Dt(e){var t=e[0],n=e[1],r=l(e.slice(4),1)[0];return{found:!0,width:t,height:n,icon:Array.isArray(r)?{tag:"g",attributes:{class:"".concat(ce.cssPrefix,"-").concat(ae.GROUP)},children:[{tag:"path",attributes:{class:"".concat(ce.cssPrefix,"-").concat(ae.SECONDARY),fill:"currentColor",d:r[0]}},{tag:"path",attributes:{class:"".concat(ce.cssPrefix,"-").concat(ae.PRIMARY),fill:"currentColor",d:r[1]}}]}:{tag:"path",attributes:{fill:"currentColor",d:r}}}}var Ct={found:!1,width:512,height:512};function Et(e,t){var n=t;return"fa"===t&&null!==ce.styleDefault&&(t=tt()),new Promise((function(r,o){dt("missingIconAbstract");if("fa"===n){var i=et(e)||{};e=i.iconName||e,t=i.prefix||t}if(e&&t&&kt[t]&&kt[t][e])return r(Dt(kt[t][e]));!function(e,t){H||ce.showMissingIcons||!e||console.error('Icon with name "'.concat(e,'" and prefix "').concat(t,'" is missing.'))}(e,t),r(a(a({},Ct),{},{icon:ce.showMissingIcons&&e&&dt("missingIconAbstract")||{}}))}))}var St=function(){},_t=ce.measurePerformance&&_&&_.mark&&_.measure?_:{mark:St,measure:St},Ot='FA "6.4.2"',Mt=function(e){_t.mark("".concat(Ot," ").concat(e," ends")),_t.measure("".concat(Ot," ").concat(e),"".concat(Ot," ").concat(e," begins"),"".concat(Ot," ").concat(e," ends"))},Pt={begin:function(e){return _t.mark("".concat(Ot," ").concat(e," begins")),function(){return Mt(e)}},end:Mt},Tt=function(){};function Nt(e){return"string"===typeof(e.getAttribute?e.getAttribute(Z):null)}function At(e){return E.createElementNS("http://www.w3.org/2000/svg",e)}function Zt(e){return E.createElement(e)}function Rt(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).ceFn,n=void 0===t?"svg"===e.tag?At:Zt:t;if("string"===typeof e)return E.createTextNode(e);var r=n(e.tag);return Object.keys(e.attributes||[]).forEach((function(t){r.setAttribute(t,e.attributes[t])})),(e.children||[]).forEach((function(e){r.appendChild(Rt(e,{ceFn:n}))})),r}var jt={replace:function(e){var t=e[0];if(t.parentNode)if(e[1].forEach((function(e){t.parentNode.insertBefore(Rt(e),t)})),null===t.getAttribute(Z)&&ce.keepOriginalSource){var n=E.createComment(function(e){var t=" ".concat(e.outerHTML," ");return"".concat(t,"Font Awesome fontawesome.com ")}(t));t.parentNode.replaceChild(n,t)}else t.remove()},nest:function(e){var t=e[0],n=e[1];if(~ge(t).indexOf(ce.replacementClass))return jt.replace(e);var r=new RegExp("".concat(ce.cssPrefix,"-.*"));if(delete n[0].attributes.id,n[0].attributes.class){var a=n[0].attributes.class.split(" ").reduce((function(e,t){return t===ce.replacementClass||t.match(r)?e.toSvg.push(t):e.toNode.push(t),e}),{toNode:[],toSvg:[]});n[0].attributes.class=a.toSvg.join(" "),0===a.toNode.length?t.removeAttribute("class"):t.setAttribute("class",a.toNode.join(" "))}var o=n.map((function(e){return Pe(e)})).join("\n");t.setAttribute(Z,""),t.innerHTML=o}};function Lt(e){e()}function Ft(e,t){var n="function"===typeof t?t:Tt;if(0===e.length)n();else{var r=Lt;ce.mutateApproach===Y&&(r=C.requestAnimationFrame||Lt),r((function(){var t=!0===ce.autoReplaceSvg?jt.replace:jt[ce.autoReplaceSvg]||jt.replace,r=Pt.begin("mutate");e.map(t),r(),n()}))}}var It=!1;function Yt(){It=!0}function Bt(){It=!1}var Ht=null;function zt(e){if(S&&ce.observeMutations){var t=e.treeCallback,n=void 0===t?Tt:t,r=e.nodeCallback,a=void 0===r?Tt:r,o=e.pseudoElementsCallback,i=void 0===o?Tt:o,s=e.observeMutationsRoot,l=void 0===s?E:s;Ht=new S((function(e){if(!It){var t=tt();me(e).forEach((function(e){if("childList"===e.type&&e.addedNodes.length>0&&!Nt(e.addedNodes[0])&&(ce.searchPseudoElements&&i(e.target),n(e.target)),"attributes"===e.type&&e.target.parentNode&&ce.searchPseudoElements&&i(e.target.parentNode),"attributes"===e.type&&Nt(e.target)&&~re.indexOf(e.attributeName))if("class"===e.attributeName&&function(e){var t=e.getAttribute?e.getAttribute(L):null,n=e.getAttribute?e.getAttribute(F):null;return t&&n}(e.target)){var r=ot(ge(e.target)),o=r.prefix,s=r.iconName;e.target.setAttribute(L,o||t),s&&e.target.setAttribute(F,s)}else(l=e.target)&&l.classList&&l.classList.contains&&l.classList.contains(ce.replacementClass)&&a(e.target);var l}))}})),O&&Ht.observe(l,{childList:!0,attributes:!0,characterData:!0,subtree:!0})}}function Ut(e){var t,n,r=e.getAttribute("data-prefix"),a=e.getAttribute("data-icon"),o=void 0!==e.innerText?e.innerText.trim():"",i=ot(ge(e));return i.prefix||(i.prefix=tt()),r&&a&&(i.prefix=r,i.iconName=a),i.iconName&&i.prefix||(i.prefix&&o.length>0&&(i.iconName=(t=i.prefix,n=e.innerText,(Ue[t]||{})[n]||$e(i.prefix,Ae(e.innerText)))),!i.iconName&&ce.autoFetchSvg&&e.firstChild&&e.firstChild.nodeType===Node.TEXT_NODE&&(i.iconName=e.firstChild.data)),i}function Wt(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{styleParser:!0},n=Ut(e),r=n.iconName,o=n.prefix,i=n.rest,s=function(e){var t=me(e.attributes).reduce((function(e,t){return"class"!==e.name&&"style"!==e.name&&(e[t.name]=t.value),e}),{}),n=e.getAttribute("title"),r=e.getAttribute("data-fa-title-id");return ce.autoA11y&&(n?t["aria-labelledby"]="".concat(ce.replacementClass,"-title-").concat(r||ve()):(t["aria-hidden"]="true",t.focusable="false")),t}(e),l=ft("parseNodeAttributes",{},e),u=t.styleParser?function(e){var t=e.getAttribute("style"),n=[];return t&&(n=t.split(";").reduce((function(e,t){var n=t.split(":"),r=n[0],a=n.slice(1);return r&&a.length>0&&(e[r]=a.join(":").trim()),e}),{})),n}(e):[];return a({iconName:r,title:e.getAttribute("title"),titleId:e.getAttribute("data-fa-title-id"),prefix:o,transform:de,mask:{iconName:null,prefix:null,rest:[]},maskId:null,symbol:!1,extra:{classes:i,styles:u,attributes:s}},l)}var qt=_e.styles;function Kt(e){var t="nest"===ce.autoReplaceSvg?Wt(e,{styleParser:!1}):Wt(e);return~t.extra.classes.indexOf($)?dt("generateLayersText",e,t):dt("generateSvgReplacementMutation",e,t)}var Vt=new Set;function Qt(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;if(!O)return Promise.resolve();var n=E.documentElement.classList,r=function(e){return n.add("".concat(I,"-").concat(e))},a=function(e){return n.remove("".concat(I,"-").concat(e))},o=ce.autoFetchSvg?Vt:W.map((function(e){return"fa-".concat(e)})).concat(Object.keys(qt));o.includes("fa")||o.push("fa");var i=[".".concat($,":not([").concat(Z,"])")].concat(o.map((function(e){return".".concat(e,":not([").concat(Z,"])")}))).join(", ");if(0===i.length)return Promise.resolve();var s=[];try{s=me(e.querySelectorAll(i))}catch(Rn){}if(!(s.length>0))return Promise.resolve();r("pending"),a("complete");var l=Pt.begin("onTree"),u=s.reduce((function(e,t){try{var n=Kt(t);n&&e.push(n)}catch(Rn){H||"MissingIcon"===Rn.name&&console.error(Rn)}return e}),[]);return new Promise((function(e,n){Promise.all(u).then((function(n){Ft(n,(function(){r("active"),r("complete"),a("pending"),"function"===typeof t&&t(),l(),e()}))})).catch((function(e){l(),n(e)}))}))}function Gt(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;Kt(e).then((function(e){e&&Ft([e],t)}))}W.map((function(e){Vt.add("fa-".concat(e))})),Object.keys(K[z]).map(Vt.add.bind(Vt)),Object.keys(K[U]).map(Vt.add.bind(Vt)),Vt=u(Vt);var Xt=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.transform,r=void 0===n?de:n,o=t.symbol,i=void 0!==o&&o,s=t.mask,l=void 0===s?null:s,u=t.maskId,c=void 0===u?null:u,f=t.title,p=void 0===f?null:f,d=t.titleId,h=void 0===d?null:d,v=t.classes,m=void 0===v?[]:v,g=t.attributes,b=void 0===g?{}:g,y=t.styles,w=void 0===y?{}:y;if(e){var x=e.prefix,k=e.iconName,D=e.icon;return yt(a({type:"icon"},e),(function(){return pt("beforeDOMElementCreation",{iconDefinition:e,params:t}),ce.autoA11y&&(p?b["aria-labelledby"]="".concat(ce.replacementClass,"-title-").concat(h||ve()):(b["aria-hidden"]="true",b.focusable="false")),wt({icons:{main:Dt(D),mask:l?Dt(l.icon):{found:!1,width:null,height:null,icon:{}}},prefix:x,iconName:k,transform:a(a({},de),r),symbol:i,title:p,maskId:c,titleId:h,extra:{attributes:b,styles:w,classes:m}})}))}},$t={mixout:function(){return{icon:(e=Xt,function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=(t||{}).icon?t:ht(t||{}),o=n.mask;return o&&(o=(o||{}).icon?o:ht(o||{})),e(r,a(a({},n),{},{mask:o}))})};var e},hooks:function(){return{mutationObserverCallbacks:function(e){return e.treeCallback=Qt,e.nodeCallback=Gt,e}}},provides:function(e){e.i2svg=function(e){var t=e.node,n=void 0===t?E:t,r=e.callback;return Qt(n,void 0===r?function(){}:r)},e.generateSvgReplacementMutation=function(e,t){var n=t.iconName,r=t.title,a=t.titleId,o=t.prefix,i=t.transform,s=t.symbol,u=t.mask,c=t.maskId,f=t.extra;return new Promise((function(t,p){Promise.all([Et(n,o),u.iconName?Et(u.iconName,u.prefix):Promise.resolve({found:!1,width:512,height:512,icon:{}})]).then((function(u){var p=l(u,2),d=p[0],h=p[1];t([e,wt({icons:{main:d,mask:h},prefix:o,iconName:n,transform:i,symbol:s,maskId:c,title:r,titleId:a,extra:f,watchable:!0})])})).catch(p)}))},e.generateAbstractIcon=function(e){var t,n=e.children,r=e.attributes,a=e.main,o=e.transform,i=ye(e.styles);return i.length>0&&(r.style=i),we(o)&&(t=dt("generateAbstractTransformGrouping",{main:a,transform:o,containerWidth:a.width,iconWidth:a.width})),n.push(t||a.icon),{children:n,attributes:r}}}},Jt={mixout:function(){return{layer:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.classes,r=void 0===n?[]:n;return yt({type:"layer"},(function(){pt("beforeDOMElementCreation",{assembler:e,params:t});var n=[];return e((function(e){Array.isArray(e)?e.map((function(e){n=n.concat(e.abstract)})):n=n.concat(e.abstract)})),[{tag:"span",attributes:{class:["".concat(ce.cssPrefix,"-layers")].concat(u(r)).join(" ")},children:n}]}))}}}},en={mixout:function(){return{counter:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.title,r=void 0===n?null:n,o=t.classes,i=void 0===o?[]:o,s=t.attributes,l=void 0===s?{}:s,c=t.styles,f=void 0===c?{}:c;return yt({type:"counter",content:e},(function(){return pt("beforeDOMElementCreation",{content:e,params:t}),function(e){var t=e.content,n=e.title,r=e.extra,o=a(a(a({},r.attributes),n?{title:n}:{}),{},{class:r.classes.join(" ")}),i=ye(r.styles);i.length>0&&(o.style=i);var s=[];return s.push({tag:"span",attributes:o,children:[t]}),n&&s.push({tag:"span",attributes:{class:"sr-only"},children:[n]}),s}({content:e.toString(),title:r,extra:{attributes:l,styles:f,classes:["".concat(ce.cssPrefix,"-layers-counter")].concat(u(i))}})}))}}}},tn={mixout:function(){return{text:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.transform,r=void 0===n?de:n,o=t.title,i=void 0===o?null:o,s=t.classes,l=void 0===s?[]:s,c=t.attributes,f=void 0===c?{}:c,p=t.styles,d=void 0===p?{}:p;return yt({type:"text",content:e},(function(){return pt("beforeDOMElementCreation",{content:e,params:t}),xt({content:e,transform:a(a({},de),r),title:i,extra:{attributes:f,styles:d,classes:["".concat(ce.cssPrefix,"-layers-text")].concat(u(l))}})}))}}},provides:function(e){e.generateLayersText=function(e,t){var n=t.title,r=t.transform,a=t.extra,o=null,i=null;if(M){var s=parseInt(getComputedStyle(e).fontSize,10),l=e.getBoundingClientRect();o=l.width/s,i=l.height/s}return ce.autoA11y&&!n&&(a.attributes["aria-hidden"]="true"),Promise.resolve([e,xt({content:e.innerHTML,width:o,height:i,transform:r,title:n,extra:a,watchable:!0})])}}},nn=new RegExp('"',"ug"),rn=[1105920,1112319];function an(e,t){var n="".concat(j).concat(t.replace(":","-"));return new Promise((function(r,o){if(null!==e.getAttribute(n))return r();var i=me(e.children).filter((function(e){return e.getAttribute(R)===t}))[0],s=C.getComputedStyle(e,t),l=s.getPropertyValue("font-family").match(J),u=s.getPropertyValue("font-weight"),c=s.getPropertyValue("content");if(i&&!l)return e.removeChild(i),r();if(l&&"none"!==c&&""!==c){var f=s.getPropertyValue("content"),p=~["Sharp"].indexOf(l[2])?U:z,d=~["Solid","Regular","Light","Thin","Duotone","Brands","Kit"].indexOf(l[2])?V[p][l[2].toLowerCase()]:ee[p][u],h=function(e){var t=e.replace(nn,""),n=function(e,t){var n,r=e.length,a=e.charCodeAt(t);return a>=55296&&a<=56319&&r>t+1&&(n=e.charCodeAt(t+1))>=56320&&n<=57343?1024*(a-55296)+n-56320+65536:a}(t,0),r=n>=rn[0]&&n<=rn[1],a=2===t.length&&t[0]===t[1];return{value:Ae(a?t[0]:t),isSecondary:r||a}}(f),v=h.value,m=h.isSecondary,g=l[0].startsWith("FontAwesome"),b=$e(d,v),y=b;if(g){var w=function(e){var t=qe[e],n=$e("fas",e);return t||(n?{prefix:"fas",iconName:n}:null)||{prefix:null,iconName:null}}(v);w.iconName&&w.prefix&&(b=w.iconName,d=w.prefix)}if(!b||m||i&&i.getAttribute(L)===d&&i.getAttribute(F)===y)r();else{e.setAttribute(n,y),i&&e.removeChild(i);var x={iconName:null,title:null,titleId:null,prefix:null,transform:de,symbol:!1,mask:{iconName:null,prefix:null,rest:[]},maskId:null,extra:{classes:[],styles:{},attributes:{}}},k=x.extra;k.attributes[R]=t,Et(b,d).then((function(o){var i=wt(a(a({},x),{},{icons:{main:o,mask:nt()},prefix:d,iconName:y,extra:k,watchable:!0})),s=E.createElementNS("http://www.w3.org/2000/svg","svg");"::before"===t?e.insertBefore(s,e.firstChild):e.appendChild(s),s.outerHTML=i.map((function(e){return Pe(e)})).join("\n"),e.removeAttribute(n),r()})).catch(o)}}else r()}))}function on(e){return Promise.all([an(e,"::before"),an(e,"::after")])}function sn(e){return e.parentNode!==document.head&&!~B.indexOf(e.tagName.toUpperCase())&&!e.getAttribute(R)&&(!e.parentNode||"svg"!==e.parentNode.tagName)}function ln(e){if(O)return new Promise((function(t,n){var r=me(e.querySelectorAll("*")).filter(sn).map(on),a=Pt.begin("searchPseudoElements");Yt(),Promise.all(r).then((function(){a(),Bt(),t()})).catch((function(){a(),Bt(),n()}))}))}var un=!1,cn=function(e){return e.toLowerCase().split(" ").reduce((function(e,t){var n=t.toLowerCase().split("-"),r=n[0],a=n.slice(1).join("-");if(r&&"h"===a)return e.flipX=!0,e;if(r&&"v"===a)return e.flipY=!0,e;if(a=parseFloat(a),isNaN(a))return e;switch(r){case"grow":e.size=e.size+a;break;case"shrink":e.size=e.size-a;break;case"left":e.x=e.x-a;break;case"right":e.x=e.x+a;break;case"up":e.y=e.y-a;break;case"down":e.y=e.y+a;break;case"rotate":e.rotate=e.rotate+a}return e}),{size:16,x:0,y:0,flipX:!1,flipY:!1,rotate:0})},fn={mixout:function(){return{parse:{transform:function(e){return cn(e)}}}},hooks:function(){return{parseNodeAttributes:function(e,t){var n=t.getAttribute("data-fa-transform");return n&&(e.transform=cn(n)),e}}},provides:function(e){e.generateAbstractTransformGrouping=function(e){var t=e.main,n=e.transform,r=e.containerWidth,o=e.iconWidth,i={transform:"translate(".concat(r/2," 256)")},s="translate(".concat(32*n.x,", ").concat(32*n.y,") "),l="scale(".concat(n.size/16*(n.flipX?-1:1),", ").concat(n.size/16*(n.flipY?-1:1),") "),u="rotate(".concat(n.rotate," 0 0)"),c={outer:i,inner:{transform:"".concat(s," ").concat(l," ").concat(u)},path:{transform:"translate(".concat(o/2*-1," -256)")}};return{tag:"g",attributes:a({},c.outer),children:[{tag:"g",attributes:a({},c.inner),children:[{tag:t.icon.tag,children:t.icon.children,attributes:a(a({},t.icon.attributes),c.path)}]}]}}}},pn={x:0,y:0,width:"100%",height:"100%"};function dn(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return e.attributes&&(e.attributes.fill||t)&&(e.attributes.fill="black"),e}var hn={hooks:function(){return{parseNodeAttributes:function(e,t){var n=t.getAttribute("data-fa-mask"),r=n?ot(n.split(" ").map((function(e){return e.trim()}))):nt();return r.prefix||(r.prefix=tt()),e.mask=r,e.maskId=t.getAttribute("data-fa-mask-id"),e}}},provides:function(e){e.generateAbstractMask=function(e){var t,n=e.children,r=e.attributes,o=e.main,i=e.mask,s=e.maskId,l=e.transform,u=o.width,c=o.icon,f=i.width,p=i.icon,d=function(e){var t=e.transform,n=e.containerWidth,r=e.iconWidth,a={transform:"translate(".concat(n/2," 256)")},o="translate(".concat(32*t.x,", ").concat(32*t.y,") "),i="scale(".concat(t.size/16*(t.flipX?-1:1),", ").concat(t.size/16*(t.flipY?-1:1),") "),s="rotate(".concat(t.rotate," 0 0)");return{outer:a,inner:{transform:"".concat(o," ").concat(i," ").concat(s)},path:{transform:"translate(".concat(r/2*-1," -256)")}}}({transform:l,containerWidth:f,iconWidth:u}),h={tag:"rect",attributes:a(a({},pn),{},{fill:"white"})},v=c.children?{children:c.children.map(dn)}:{},m={tag:"g",attributes:a({},d.inner),children:[dn(a({tag:c.tag,attributes:a(a({},c.attributes),d.path)},v))]},g={tag:"g",attributes:a({},d.outer),children:[m]},b="mask-".concat(s||ve()),y="clip-".concat(s||ve()),w={tag:"mask",attributes:a(a({},pn),{},{id:b,maskUnits:"userSpaceOnUse",maskContentUnits:"userSpaceOnUse"}),children:[h,g]},x={tag:"defs",children:[{tag:"clipPath",attributes:{id:y},children:(t=p,"g"===t.tag?t.children:[t])},w]};return n.push(x,{tag:"rect",attributes:a({fill:"currentColor","clip-path":"url(#".concat(y,")"),mask:"url(#".concat(b,")")},pn)}),{children:n,attributes:r}}}},vn={provides:function(e){var t=!1;C.matchMedia&&(t=C.matchMedia("(prefers-reduced-motion: reduce)").matches),e.missingIconAbstract=function(){var e=[],n={fill:"currentColor"},r={attributeType:"XML",repeatCount:"indefinite",dur:"2s"};e.push({tag:"path",attributes:a(a({},n),{},{d:"M156.5,447.7l-12.6,29.5c-18.7-9.5-35.9-21.2-51.5-34.9l22.7-22.7C127.6,430.5,141.5,440,156.5,447.7z M40.6,272H8.5 c1.4,21.2,5.4,41.7,11.7,61.1L50,321.2C45.1,305.5,41.8,289,40.6,272z M40.6,240c1.4-18.8,5.2-37,11.1-54.1l-29.5-12.6 C14.7,194.3,10,216.7,8.5,240H40.6z M64.3,156.5c7.8-14.9,17.2-28.8,28.1-41.5L69.7,92.3c-13.7,15.6-25.5,32.8-34.9,51.5 L64.3,156.5z M397,419.6c-13.9,12-29.4,22.3-46.1,30.4l11.9,29.8c20.7-9.9,39.8-22.6,56.9-37.6L397,419.6z M115,92.4 c13.9-12,29.4-22.3,46.1-30.4l-11.9-29.8c-20.7,9.9-39.8,22.6-56.8,37.6L115,92.4z M447.7,355.5c-7.8,14.9-17.2,28.8-28.1,41.5 l22.7,22.7c13.7-15.6,25.5-32.9,34.9-51.5L447.7,355.5z M471.4,272c-1.4,18.8-5.2,37-11.1,54.1l29.5,12.6 c7.5-21.1,12.2-43.5,13.6-66.8H471.4z M321.2,462c-15.7,5-32.2,8.2-49.2,9.4v32.1c21.2-1.4,41.7-5.4,61.1-11.7L321.2,462z M240,471.4c-18.8-1.4-37-5.2-54.1-11.1l-12.6,29.5c21.1,7.5,43.5,12.2,66.8,13.6V471.4z M462,190.8c5,15.7,8.2,32.2,9.4,49.2h32.1 c-1.4-21.2-5.4-41.7-11.7-61.1L462,190.8z M92.4,397c-12-13.9-22.3-29.4-30.4-46.1l-29.8,11.9c9.9,20.7,22.6,39.8,37.6,56.9 L92.4,397z M272,40.6c18.8,1.4,36.9,5.2,54.1,11.1l12.6-29.5C317.7,14.7,295.3,10,272,8.5V40.6z M190.8,50 c15.7-5,32.2-8.2,49.2-9.4V8.5c-21.2,1.4-41.7,5.4-61.1,11.7L190.8,50z M442.3,92.3L419.6,115c12,13.9,22.3,29.4,30.5,46.1 l29.8-11.9C470,128.5,457.3,109.4,442.3,92.3z M397,92.4l22.7-22.7c-15.6-13.7-32.8-25.5-51.5-34.9l-12.6,29.5 C370.4,72.1,384.4,81.5,397,92.4z"})});var o=a(a({},r),{},{attributeName:"opacity"}),i={tag:"circle",attributes:a(a({},n),{},{cx:"256",cy:"364",r:"28"}),children:[]};return t||i.children.push({tag:"animate",attributes:a(a({},r),{},{attributeName:"r",values:"28;14;28;28;14;28;"})},{tag:"animate",attributes:a(a({},o),{},{values:"1;0;1;1;0;1;"})}),e.push(i),e.push({tag:"path",attributes:a(a({},n),{},{opacity:"1",d:"M263.7,312h-16c-6.6,0-12-5.4-12-12c0-71,77.4-63.9,77.4-107.8c0-20-17.8-40.2-57.4-40.2c-29.1,0-44.3,9.6-59.2,28.7 c-3.9,5-11.1,6-16.2,2.4l-13.1-9.2c-5.6-3.9-6.9-11.8-2.6-17.2c21.2-27.2,46.4-44.7,91.2-44.7c52.3,0,97.4,29.8,97.4,80.2 c0,67.6-77.4,63.5-77.4,107.8C275.7,306.6,270.3,312,263.7,312z"}),children:t?[]:[{tag:"animate",attributes:a(a({},o),{},{values:"1;0;0;0;0;1;"})}]}),t||e.push({tag:"path",attributes:a(a({},n),{},{opacity:"0",d:"M232.5,134.5l7,168c0.3,6.4,5.6,11.5,12,11.5h9c6.4,0,11.7-5.1,12-11.5l7-168c0.3-6.8-5.2-12.5-12-12.5h-23 C237.7,122,232.2,127.7,232.5,134.5z"}),children:[{tag:"animate",attributes:a(a({},o),{},{values:"0;0;1;1;0;0;"})}]}),{tag:"g",attributes:{class:"missing"},children:e}}}};!function(e,t){var n=t.mixoutsTo;st=e,lt={},Object.keys(ut).forEach((function(e){-1===ct.indexOf(e)&&delete ut[e]})),st.forEach((function(e){var t=e.mixout?e.mixout():{};if(Object.keys(t).forEach((function(e){"function"===typeof t[e]&&(n[e]=t[e]),"object"===o(t[e])&&Object.keys(t[e]).forEach((function(r){n[e]||(n[e]={}),n[e][r]=t[e][r]}))})),e.hooks){var r=e.hooks();Object.keys(r).forEach((function(e){lt[e]||(lt[e]=[]),lt[e].push(r[e])}))}e.provides&&e.provides(ut)}))}([Ee,$t,Jt,en,tn,{hooks:function(){return{mutationObserverCallbacks:function(e){return e.pseudoElementsCallback=ln,e}}},provides:function(e){e.pseudoElements2svg=function(e){var t=e.node,n=void 0===t?E:t;ce.searchPseudoElements&&ln(n)}}},{mixout:function(){return{dom:{unwatch:function(){Yt(),un=!0}}}},hooks:function(){return{bootstrap:function(){zt(ft("mutationObserverCallbacks",{}))},noAuto:function(){Ht&&Ht.disconnect()},watch:function(e){var t=e.observeMutationsRoot;un?Bt():zt(ft("mutationObserverCallbacks",{observeMutationsRoot:t}))}}}},fn,hn,vn,{hooks:function(){return{parseNodeAttributes:function(e,t){var n=t.getAttribute("data-fa-symbol"),r=null!==n&&(""===n||n);return e.symbol=r,e}}}}],{mixoutsTo:gt});var mn=gt.parse,gn=gt.icon,bn=n(45697),yn=n.n(bn),wn=n(67294);function xn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function kn(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}function Sn(e){return function(e){if(Array.isArray(e))return _n(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return _n(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return _n(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function _n(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n0||!Array.isArray(t)&&t?Cn({},e,t):{}}var An=wn.forwardRef((function(e,t){var n=e.icon,r=e.mask,a=e.symbol,o=e.className,i=e.title,s=e.titleId,l=e.maskId,u=Tn(n),c=Nn("classes",[].concat(Sn(function(e){var t,n=e.beat,r=e.fade,a=e.beatFade,o=e.bounce,i=e.shake,s=e.flash,l=e.spin,u=e.spinPulse,c=e.spinReverse,f=e.pulse,p=e.fixedWidth,d=e.inverse,h=e.border,v=e.listItem,m=e.flip,g=e.size,b=e.rotation,y=e.pull,w=(Cn(t={"fa-beat":n,"fa-fade":r,"fa-beat-fade":a,"fa-bounce":o,"fa-shake":i,"fa-flash":s,"fa-spin":l,"fa-spin-reverse":c,"fa-spin-pulse":u,"fa-pulse":f,"fa-fw":p,"fa-inverse":d,"fa-border":h,"fa-li":v,"fa-flip":!0===m,"fa-flip-horizontal":"horizontal"===m||"both"===m,"fa-flip-vertical":"vertical"===m||"both"===m},"fa-".concat(g),"undefined"!==typeof g&&null!==g),Cn(t,"fa-rotate-".concat(b),"undefined"!==typeof b&&null!==b&&0!==b),Cn(t,"fa-pull-".concat(y),"undefined"!==typeof y&&null!==y),Cn(t,"fa-swap-opacity",e.swapOpacity),t);return Object.keys(w).map((function(e){return w[e]?e:null})).filter((function(e){return e}))}(e)),Sn(o.split(" ")))),f=Nn("transform","string"===typeof e.transform?mn.transform(e.transform):e.transform),p=Nn("mask",Tn(r)),d=gn(u,kn(kn(kn(kn({},c),f),p),{},{symbol:a,title:i,titleId:s,maskId:l}));if(!d)return function(){var e;!Pn&&console&&"function"===typeof console.error&&(e=console).error.apply(e,arguments)}("Could not find icon",u),null;var h=d.abstract,v={ref:t};return Object.keys(e).forEach((function(t){An.defaultProps.hasOwnProperty(t)||(v[t]=e[t])})),Zn(h[0],v)}));An.displayName="FontAwesomeIcon",An.propTypes={beat:yn().bool,border:yn().bool,beatFade:yn().bool,bounce:yn().bool,className:yn().string,fade:yn().bool,flash:yn().bool,mask:yn().oneOfType([yn().object,yn().array,yn().string]),maskId:yn().string,fixedWidth:yn().bool,inverse:yn().bool,flip:yn().oneOf([!0,!1,"horizontal","vertical","both"]),icon:yn().oneOfType([yn().object,yn().array,yn().string]),listItem:yn().bool,pull:yn().oneOf(["right","left"]),pulse:yn().bool,rotation:yn().oneOf([0,90,180,270]),shake:yn().bool,size:yn().oneOf(["2xs","xs","sm","lg","xl","2xl","1x","2x","3x","4x","5x","6x","7x","8x","9x","10x"]),spin:yn().bool,spinPulse:yn().bool,spinReverse:yn().bool,symbol:yn().oneOfType([yn().bool,yn().string]),title:yn().string,titleId:yn().string,transform:yn().oneOfType([yn().string,yn().object]),swapOpacity:yn().bool},An.defaultProps={border:!1,className:"",mask:null,maskId:null,fixedWidth:!1,inverse:!1,flip:!1,icon:null,listItem:!1,pull:null,pulse:!1,rotation:null,size:null,spin:!1,spinPulse:!1,spinReverse:!1,beat:!1,fade:!1,beatFade:!1,bounce:!1,shake:!1,symbol:!1,title:"",titleId:null,transform:null,swapOpacity:!1};var Zn=function e(t,n){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if("string"===typeof n)return n;var a=(n.children||[]).map((function(n){return e(t,n)})),o=Object.keys(n.attributes||{}).reduce((function(e,t){var r=n.attributes[t];switch(t){case"class":e.attrs.className=r,delete n.attributes.class;break;case"style":e.attrs.style=r.split(";").map((function(e){return e.trim()})).filter((function(e){return e})).reduce((function(e,t){var n,r=t.indexOf(":"),a=On(t.slice(0,r)),o=t.slice(r+1).trim();return a.startsWith("webkit")?e[(n=a,n.charAt(0).toUpperCase()+n.slice(1))]=o:e[a]=o,e}),{});break;default:0===t.indexOf("aria-")||0===t.indexOf("data-")?e.attrs[t.toLowerCase()]=r:e.attrs[On(t)]=r}return e}),{attrs:{}}),i=r.style,s=void 0===i?{}:i,l=En(r,Mn);return o.attrs.style=kn(kn({},o.attrs.style),s),t.apply(void 0,[n.tag,kn(kn({},o.attrs),l)].concat(Sn(a)))}.bind(null,wn.createElement)},70597:function(e,t,n){"use strict";var r,a=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["fill","width","height","style"]);return i.default.createElement("svg",a({viewBox:"0 0 24 24",style:a({fill:n,width:o,height:l},c)},f),i.default.createElement("path",{d:"M21,7L9,19L3.5,13.5L4.91,12.09L9,16.17L19.59,5.59L21,7Z"}))}},43891:function(e,t,n){"use strict";var r,a=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["fill","width","height","style"]);return i.default.createElement("svg",a({viewBox:"0 0 24 24",style:a({fill:n,width:o,height:l},c)},f),i.default.createElement("path",{d:"M12,18.17L8.83,15L7.42,16.41L12,21L16.59,16.41L15.17,15M12,5.83L15.17,9L16.58,7.59L12,3L7.41,7.59L8.83,9L12,5.83Z"}))}},94184:function(e,t){var n;!function(){"use strict";var r={}.hasOwnProperty;function a(){for(var e=[],t=0;ti&&(f=u(f,0,i)),e?p+f:f+p)}};e.exports={start:f(!1),end:f(!0)}},33161:function(e,t,n){"use strict";n(82109)({target:"Number",stat:!0},{isInteger:n(55988)})},66528:function(e,t,n){"use strict";var r=n(82109),a=n(76650).end;r({target:"String",proto:!0,forced:n(54986)},{padEnd:function(e){return a(this,e,arguments.length>1?arguments[1]:void 0)}})},83112:function(e,t,n){"use strict";var r=n(82109),a=n(76650).start;r({target:"String",proto:!0,forced:n(54986)},{padStart:function(e){return a(this,e,arguments.length>1?arguments[1]:void 0)}})},68757:function(e,t,n){"use strict";var r=n(82109),a=n(46916),o=n(1702),i=n(84488),s=n(60614),l=n(68554),u=n(47850),c=n(41340),f=n(58173),p=n(34706),d=n(10647),h=n(5112),v=n(31913),m=h("replace"),g=TypeError,b=o("".indexOf),y=o("".replace),w=o("".slice),x=Math.max,k=function(e,t,n){return n>e.length?-1:""===t?n:b(e,t,n)};r({target:"String",proto:!0},{replaceAll:function(e,t){var n,r,o,h,D,C,E,S,_,O=i(this),M=0,P=0,T="";if(!l(e)){if((n=u(e))&&(r=c(i(p(e))),!~b(r,"g")))throw new g("`.replaceAll` does not allow non-global regexes");if(o=f(e,m))return a(o,e,O,t);if(v&&n)return y(c(O),e,t)}for(h=c(O),D=c(e),(C=s(t))||(t=c(t)),E=D.length,S=x(1,E),M=k(h,D,0);-1!==M;)_=C?c(t(D,M,h)):d(D,h,M,[],void 0,t),T+=w(h,P,M)+_,P=M+E,M=k(h,D,M+S);return P=l.getDate()?l:(n.setFullYear(l.getFullYear(),l.getMonth(),s),n)}},8791:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(11640),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=3*(0,r.Z)(t);return(0,a.default)(e,n)}},63500:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(77349),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=7*(0,r.Z)(t);return(0,a.default)(e,n)}},21593:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(11640),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=(0,r.Z)(t);return(0,a.default)(e,12*n)}},49474:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(19013),a=n(13882);function o(e,t){(0,a.Z)(2,arguments);var n=(0,r.default)(e),o=(0,r.default)(t),i=n.getTime()-o.getTime();return i<0?-1:i>0?1:i}},36948:function(e,t,n){"use strict";n.d(t,{qk:function(){return o},vh:function(){return a},yJ:function(){return r}});Math.pow(10,8);var r=6e4,a=36e5,o=1e3},92300:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return s}});var r=n(24262),a=n(69119),o=n(13882),i=864e5;function s(e,t){(0,o.Z)(2,arguments);var n=(0,a.default)(e),s=(0,a.default)(t),l=n.getTime()-(0,r.Z)(n),u=s.getTime()-(0,r.Z)(s);return Math.round((l-u)/i)}},84129:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e,t){(0,a.Z)(2,arguments);var n=(0,r.default)(e),o=(0,r.default)(t);return 12*(n.getFullYear()-o.getFullYear())+(n.getMonth()-o.getMonth())}},91857:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e,t){(0,a.Z)(2,arguments);var n=(0,r.default)(e),o=(0,r.default)(t);return n.getFullYear()-o.getFullYear()}},67803:function(e,t,n){"use strict";n.d(t,{Z:function(){return s}});var r=n(19013),a=n(92300),o=n(13882);function i(e,t){var n=e.getFullYear()-t.getFullYear()||e.getMonth()-t.getMonth()||e.getDate()-t.getDate()||e.getHours()-t.getHours()||e.getMinutes()-t.getMinutes()||e.getSeconds()-t.getSeconds()||e.getMilliseconds()-t.getMilliseconds();return n<0?-1:n>0?1:n}function s(e,t){(0,o.Z)(2,arguments);var n=(0,r.default)(e),s=(0,r.default)(t),l=i(n,s),u=Math.abs((0,a.default)(n,s));n.setDate(n.getDate()-l*u);var c=l*(u-Number(i(n,s)===-l));return 0===c?0:c}},76972:function(e,t,n){"use strict";n.d(t,{Z:function(){return s}});var r=n(36948),a=n(59910),o=n(13882),i=n(93645);function s(e,t,n){(0,o.Z)(2,arguments);var s=(0,a.Z)(e,t)/r.vh;return(0,i.u)(null===n||void 0===n?void 0:n.roundingMethod)(s)}},59910:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(19013),a=n(13882);function o(e,t){return(0,a.Z)(2,arguments),(0,r.default)(e).getTime()-(0,r.default)(t).getTime()}},69690:function(e,t,n){"use strict";n.d(t,{Z:function(){return s}});var r=n(36948),a=n(59910),o=n(13882),i=n(93645);function s(e,t,n){(0,o.Z)(2,arguments);var s=(0,a.Z)(e,t)/r.yJ;return(0,i.u)(null===n||void 0===n?void 0:n.roundingMethod)(s)}},50157:function(e,t,n){"use strict";n.d(t,{Z:function(){return u}});var r=n(19013),a=n(84129),o=n(49474),i=n(13882),s=n(83894),l=n(4135);function u(e,t){(0,i.Z)(2,arguments);var n,u=(0,r.default)(e),c=(0,r.default)(t),f=(0,o.Z)(u,c),p=Math.abs((0,a.default)(u,c));if(p<1)n=0;else{1===u.getMonth()&&u.getDate()>27&&u.setDate(30),u.setMonth(u.getMonth()-f*p);var d=(0,o.Z)(u,c)===-f;(function(e){(0,i.Z)(1,arguments);var t=(0,r.default)(e);return(0,s.default)(t).getTime()===(0,l.default)(t).getTime()})((0,r.default)(e))&&1===p&&1===(0,o.Z)(e,c)&&(d=!1),n=f*(p-Number(d))}return 0===n?0:n}},11699:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(59910),a=n(13882),o=n(93645);function i(e,t,n){(0,a.Z)(2,arguments);var i=(0,r.Z)(e,t)/1e3;return(0,o.u)(null===n||void 0===n?void 0:n.roundingMethod)(i)}},5001:function(e,t,n){"use strict";n.d(t,{Z:function(){return s}});var r=n(19013),a=n(91857),o=n(49474),i=n(13882);function s(e,t){(0,i.Z)(2,arguments);var n=(0,r.default)(e),s=(0,r.default)(t),l=(0,o.Z)(n,s),u=Math.abs((0,a.default)(n,s));n.setFullYear(1584),s.setFullYear(1584);var c=(0,o.Z)(n,s)===-l,f=l*(u-Number(c));return 0===f?0:f}},83894:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){(0,a.Z)(1,arguments);var t=(0,r.default)(e);return t.setHours(23,59,59,999),t}},4135:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){(0,a.Z)(1,arguments);var t=(0,r.default)(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(23,59,59,999),t}},67090:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return s}});var r=n(84314),a=n(19013),o=n(83946),i=n(13882);function s(e,t){var n,s,l,u,c,f,p,d;(0,i.Z)(1,arguments);var h=(0,r.j)(),v=(0,o.Z)(null!==(n=null!==(s=null!==(l=null!==(u=null===t||void 0===t?void 0:t.weekStartsOn)&&void 0!==u?u:null===t||void 0===t||null===(c=t.locale)||void 0===c||null===(f=c.options)||void 0===f?void 0:f.weekStartsOn)&&void 0!==l?l:h.weekStartsOn)&&void 0!==s?s:null===(p=h.locale)||void 0===p||null===(d=p.options)||void 0===d?void 0:d.weekStartsOn)&&void 0!==n?n:0);if(!(v>=0&&v<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var m=(0,a.default)(e),g=m.getDay(),b=6+(g0?(x=(0,i.default)(t),k=(0,i.default)(e)):(x=(0,i.default)(e),k=(0,i.default)(t));var C,E=String(null!==(g=null===n||void 0===n?void 0:n.roundingMethod)&&void 0!==g?g:"round");if("floor"===E)C=Math.floor;else if("ceil"===E)C=Math.ceil;else{if("round"!==E)throw new RangeError("roundingMethod must be 'floor', 'ceil' or 'round'");C=Math.round}var S,_=k.getTime()-x.getTime(),O=_/f,M=(_-((0,a.Z)(k)-(0,a.Z)(x)))/f,P=null===n||void 0===n?void 0:n.unit;if("second"===(S=P?String(P):O<1?"second":O<60?"minute":O=s.getTime()?n+1:t.getTime()>=u.getTime()?n:n-1}(e),n=new Date(0);return n.setFullYear(t,0,4),n.setHours(0,0,0,0),i(n)}var l=6048e5;function u(e){(0,o.Z)(1,arguments);var t=(0,r.default)(e),n=i(t).getTime()-s(t).getTime();return Math.round(n/l)+1}},39159:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){return(0,a.Z)(1,arguments),(0,r.default)(e).getMinutes()}},78966:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){return(0,a.Z)(1,arguments),(0,r.default)(e).getMonth()}},56605:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){(0,a.Z)(1,arguments);var t=(0,r.default)(e);return Math.floor(t.getMonth()/3)+1}},77881:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){return(0,a.Z)(1,arguments),(0,r.default)(e).getSeconds()}},28789:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){return(0,a.Z)(1,arguments),(0,r.default)(e).getTime()}},99982:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(28789),a=n(13882);function o(e){return(0,a.Z)(1,arguments),Math.floor((0,r.default)(e)/1e3)}},95570:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){return(0,a.Z)(1,arguments),(0,r.default)(e).getFullYear()}},33926:function(e,t,n){"use strict";n.d(t,{Z:function(){return d}});var r=n(49474),a=n(29422),o=n(67803),i=n(76972),s=n(69690),l=n(50157),u=n(11699),c=n(5001),f=n(19013),p=n(13882);function d(e){(0,p.Z)(1,arguments);var t=(0,f.default)(e.start),n=(0,f.default)(e.end);if(isNaN(t.getTime()))throw new RangeError("Start Date is invalid");if(isNaN(n.getTime()))throw new RangeError("End Date is invalid");var d={};d.years=Math.abs((0,c.Z)(n,t));var h=(0,r.Z)(n,t),v=(0,a.Z)(t,{years:h*d.years});d.months=Math.abs((0,l.Z)(n,v));var m=(0,a.Z)(v,{months:h*d.months});d.days=Math.abs((0,o.Z)(n,m));var g=(0,a.Z)(m,{days:h*d.days});d.hours=Math.abs((0,i.Z)(n,g));var b=(0,a.Z)(g,{hours:h*d.hours});d.minutes=Math.abs((0,s.Z)(n,b));var y=(0,a.Z)(b,{minutes:h*d.minutes});return d.seconds=Math.abs((0,u.Z)(n,y)),d}},42699:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e,t){(0,a.Z)(2,arguments);var n=(0,r.default)(e),o=(0,r.default)(t);return n.getTime()>o.getTime()}},313:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e,t){(0,a.Z)(2,arguments);var n=(0,r.default)(e),o=(0,r.default)(t);return n.getTime()=o&&n<=i}},99890:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(71002),a=n(19013),o=n(13882);function i(e){var t,n;if((0,o.Z)(1,arguments),e&&"function"===typeof e.forEach)t=e;else{if("object"!==(0,r.Z)(e)||null===e)return new Date(NaN);t=Array.prototype.slice.call(e)}return t.forEach((function(e){var t=(0,a.default)(e);(void 0===n||nt||isNaN(t.getDate()))&&(n=t)})),n||new Date(NaN)}},76417:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return Ze}});var r=n(71002),a=n(40181);function o(e,t){var n="undefined"!==typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(!n){if(Array.isArray(e)||(n=(0,a.Z)(e))||t&&e&&"number"===typeof e.length){n&&(e=n);var r=0,o=function(){};return{s:o,n:function(){return r>=e.length?{done:!0}:{done:!1,value:e[r++]}},e:function(e){throw e},f:o}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var i,s=!0,l=!1;return{s:function(){n=n.call(e)},n:function(){var e=n.next();return s=e.done,e},e:function(e){l=!0,i=e},f:function(){try{s||null==n.return||n.return()}finally{if(l)throw i}}}}var i=n(8958),s=n(91218),l=n(19013),u=n(52149),c=n(97621),f=n(24262),p=n(5267),d=n(83946),h=n(13882),v=n(97326),m=n(60136),g=n(29388),b=n(15671),y=n(43144),w=n(4942),x=function(){function e(){(0,b.Z)(this,e),(0,w.Z)(this,"priority",void 0),(0,w.Z)(this,"subPriority",0)}return(0,y.Z)(e,[{key:"validate",value:function(e,t){return!0}}]),e}(),k=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(e,r,a,o,i){var s;return(0,b.Z)(this,n),(s=t.call(this)).value=e,s.validateValue=r,s.setValue=a,s.priority=o,i&&(s.subPriority=i),s}return(0,y.Z)(n,[{key:"validate",value:function(e,t){return this.validateValue(e,this.value,t)}},{key:"set",value:function(e,t,n){return this.setValue(e,t,this.value,n)}}]),n}(x),D=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o0,a=r?t:1-t;if(a<=50)n=e||100;else{var o=a+50;n=e+100*Math.floor(o/100)-(e>=o%100?100:0)}return r?n:1-n}function B(e){return e%400===0||e%4===0&&e%100!==0}var H=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o0}},{key:"set",value:function(e,t,n){var r=e.getUTCFullYear();if(n.isTwoDigitYear){var a=Y(n.year,r);return e.setUTCFullYear(a,0,1),e.setUTCHours(0,0,0,0),e}var o="era"in t&&1!==t.era?1-n.year:n.year;return e.setUTCFullYear(o,0,1),e.setUTCHours(0,0,0,0),e}}]),n}(C),z=n(7651),U=n(59025),W=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o0}},{key:"set",value:function(e,t,n,r){var a=(0,z.Z)(e,r);if(n.isTwoDigitYear){var o=Y(n.year,a);return e.setUTCFullYear(o,0,r.firstWeekContainsDate),e.setUTCHours(0,0,0,0),(0,U.Z)(e,r)}var i="era"in t&&1!==t.era?1-n.year:n.year;return e.setUTCFullYear(i,0,r.firstWeekContainsDate),e.setUTCHours(0,0,0,0),(0,U.Z)(e,r)}}]),n}(C),q=n(66979),K=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=4}},{key:"set",value:function(e,t,n){return e.setUTCMonth(3*(n-1),1),e.setUTCHours(0,0,0,0),e}}]),n}(C),G=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=4}},{key:"set",value:function(e,t,n){return e.setUTCMonth(3*(n-1),1),e.setUTCHours(0,0,0,0),e}}]),n}(C),X=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=11}},{key:"set",value:function(e,t,n){return e.setUTCMonth(n,1),e.setUTCHours(0,0,0,0),e}}]),n}(C),$=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=11}},{key:"set",value:function(e,t,n){return e.setUTCMonth(n,1),e.setUTCHours(0,0,0,0),e}}]),n}(C),J=n(23324);var ee=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=53}},{key:"set",value:function(e,t,n,r){return(0,U.Z)(function(e,t,n){(0,h.Z)(2,arguments);var r=(0,l.default)(e),a=(0,d.Z)(t),o=(0,J.Z)(r,n)-a;return r.setUTCDate(r.getUTCDate()-7*o),r}(e,n,r),r)}}]),n}(C),te=n(49702);var ne=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=53}},{key:"set",value:function(e,t,n){return(0,q.Z)(function(e,t){(0,h.Z)(2,arguments);var n=(0,l.default)(e),r=(0,d.Z)(t),a=(0,te.Z)(n)-r;return n.setUTCDate(n.getUTCDate()-7*a),n}(e,n))}}]),n}(C),re=[31,28,31,30,31,30,31,31,30,31,30,31],ae=[31,29,31,30,31,30,31,31,30,31,30,31],oe=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=ae[r]:t>=1&&t<=re[r]}},{key:"set",value:function(e,t,n){return e.setUTCDate(n),e.setUTCHours(0,0,0,0),e}}]),n}(C),ie=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=366:t>=1&&t<=365}},{key:"set",value:function(e,t,n){return e.setUTCMonth(0,n),e.setUTCHours(0,0,0,0),e}}]),n}(C),se=n(84314);function le(e,t,n){var r,a,o,i,s,u,c,f;(0,h.Z)(2,arguments);var p=(0,se.j)(),v=(0,d.Z)(null!==(r=null!==(a=null!==(o=null!==(i=null===n||void 0===n?void 0:n.weekStartsOn)&&void 0!==i?i:null===n||void 0===n||null===(s=n.locale)||void 0===s||null===(u=s.options)||void 0===u?void 0:u.weekStartsOn)&&void 0!==o?o:p.weekStartsOn)&&void 0!==a?a:null===(c=p.locale)||void 0===c||null===(f=c.options)||void 0===f?void 0:f.weekStartsOn)&&void 0!==r?r:0);if(!(v>=0&&v<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var m=(0,l.default)(e),g=(0,d.Z)(t),b=((g%7+7)%7=0&&t<=6}},{key:"set",value:function(e,t,n,r){return(e=le(e,n,r)).setUTCHours(0,0,0,0),e}}]),n}(C),ce=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=6}},{key:"set",value:function(e,t,n,r){return(e=le(e,n,r)).setUTCHours(0,0,0,0),e}}]),n}(C),fe=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=6}},{key:"set",value:function(e,t,n,r){return(e=le(e,n,r)).setUTCHours(0,0,0,0),e}}]),n}(C);var pe=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=7}},{key:"set",value:function(e,t,n){return e=function(e,t){(0,h.Z)(2,arguments);var n=(0,d.Z)(t);n%7===0&&(n-=7);var r=(0,l.default)(e),a=((n%7+7)%7<1?7:0)+n-r.getUTCDay();return r.setUTCDate(r.getUTCDate()+a),r}(e,n),e.setUTCHours(0,0,0,0),e}}]),n}(C),de=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=12}},{key:"set",value:function(e,t,n){var r=e.getUTCHours()>=12;return r&&n<12?e.setUTCHours(n+12,0,0,0):r||12!==n?e.setUTCHours(n,0,0,0):e.setUTCHours(0,0,0,0),e}}]),n}(C),ge=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=23}},{key:"set",value:function(e,t,n){return e.setUTCHours(n,0,0,0),e}}]),n}(C),be=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=11}},{key:"set",value:function(e,t,n){return e.getUTCHours()>=12&&n<12?e.setUTCHours(n+12,0,0,0):e.setUTCHours(n,0,0,0),e}}]),n}(C),ye=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&t<=24}},{key:"set",value:function(e,t,n){var r=n<=24?n%24:n;return e.setUTCHours(r,0,0,0),e}}]),n}(C),we=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=59}},{key:"set",value:function(e,t,n){return e.setUTCMinutes(n,0,0),e}}]),n}(C),xe=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=0&&t<=59}},{key:"set",value:function(e,t,n){return e.setUTCSeconds(n,0),e}}]),n}(C),ke=function(e){(0,m.Z)(n,e);var t=(0,g.Z)(n);function n(){var e;(0,b.Z)(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=1&&F<=7))throw new RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var I=(0,d.Z)(null!==(S=null!==(_=null!==(O=null!==(M=null===a||void 0===a?void 0:a.weekStartsOn)&&void 0!==M?M:null===a||void 0===a||null===(P=a.locale)||void 0===P||null===(T=P.options)||void 0===T?void 0:T.weekStartsOn)&&void 0!==O?O:j.weekStartsOn)&&void 0!==_?_:null===(N=j.locale)||void 0===N||null===(A=N.options)||void 0===A?void 0:A.weekStartsOn)&&void 0!==S?S:0);if(!(I>=0&&I<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");if(""===R)return""===Z?(0,l.default)(n):new Date(NaN);var Y,B={firstWeekContainsDate:F,weekStartsOn:I,locale:L},H=[new D],z=R.match(Me).map((function(e){var t=e[0];return t in c.Z?(0,c.Z[t])(e,L.formatLong):e})).join("").match(Oe),U=[],W=o(z);try{var q=function(){var t=Y.value;null!==a&&void 0!==a&&a.useAdditionalWeekYearTokens||!(0,p.Do)(t)||(0,p.qp)(t,R,e),null!==a&&void 0!==a&&a.useAdditionalDayOfYearTokens||!(0,p.Iu)(t)||(0,p.qp)(t,R,e);var n=t[0],r=_e[n];if(r){var o=r.incompatibleTokens;if(Array.isArray(o)){var i=U.find((function(e){return o.includes(e.token)||e.token===n}));if(i)throw new RangeError("The format string mustn't contain `".concat(i.fullToken,"` and `").concat(t,"` at the same time"))}else if("*"===r.incompatibleTokens&&U.length>0)throw new RangeError("The format string mustn't contain `".concat(t,"` and any other token at the same time"));U.push({token:n,fullToken:t});var s=r.run(Z,t,L.match,B);if(!s)return{v:new Date(NaN)};H.push(s.setter),Z=s.rest}else{if(n.match(Ae))throw new RangeError("Format string contains an unescaped latin alphabet character `"+n+"`");if("''"===t?t="'":"'"===n&&(t=t.match(Pe)[1].replace(Te,"'")),0!==Z.indexOf(t))return{v:new Date(NaN)};Z=Z.slice(t.length)}};for(W.s();!(Y=W.n()).done;){var K=q();if("object"===(0,r.Z)(K))return K.v}}catch(ne){W.e(ne)}finally{W.f()}if(Z.length>0&&Ne.test(Z))return new Date(NaN);var V=H.map((function(e){return e.priority})).sort((function(e,t){return t-e})).filter((function(e,t,n){return n.indexOf(e)===t})).map((function(e){return H.filter((function(t){return t.priority===e})).sort((function(e,t){return t.subPriority-e.subPriority}))})).map((function(e){return e[0]})),Q=(0,l.default)(n);if(isNaN(Q.getTime()))return new Date(NaN);var G,X=(0,s.Z)(Q,(0,f.Z)(Q)),$={},J=o(V);try{for(J.s();!(G=J.n()).done;){var ee=G.value;if(!ee.validate(X,B))return new Date(NaN);var te=ee.set(X,$,B);Array.isArray(te)?(X=te[0],(0,u.Z)($,te[1])):X=te}}catch(ne){J.e(ne)}finally{J.f()}return X}},23855:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(36948),a=n(13882),o=n(83946);function i(e,t){var n;(0,a.Z)(1,arguments);var i=(0,o.Z)(null!==(n=null===t||void 0===t?void 0:t.additionalDigits)&&void 0!==n?n:2);if(2!==i&&1!==i&&0!==i)throw new RangeError("additionalDigits must be 0, 1 or 2");if("string"!==typeof e&&"[object String]"!==Object.prototype.toString.call(e))return new Date(NaN);var v,m=function(e){var t,n={},r=e.split(s.dateTimeDelimiter);if(r.length>2)return n;/:/.test(r[0])?t=r[0]:(n.date=r[0],t=r[1],s.timeZoneDelimiter.test(n.date)&&(n.date=e.split(s.timeZoneDelimiter)[0],t=e.substr(n.date.length,e.length)));if(t){var a=s.timezone.exec(t);a?(n.time=t.replace(a[1],""),n.timezone=a[1]):n.time=t}return n}(e);if(m.date){var g=function(e,t){var n=new RegExp("^(?:(\\d{4}|[+-]\\d{"+(4+t)+"})|(\\d{2}|[+-]\\d{"+(2+t)+"})$)"),r=e.match(n);if(!r)return{year:NaN,restDateString:""};var a=r[1]?parseInt(r[1]):null,o=r[2]?parseInt(r[2]):null;return{year:null===o?a:100*o,restDateString:e.slice((r[1]||r[2]).length)}}(m.date,i);v=function(e,t){if(null===t)return new Date(NaN);var n=e.match(l);if(!n)return new Date(NaN);var r=!!n[4],a=f(n[1]),o=f(n[2])-1,i=f(n[3]),s=f(n[4]),u=f(n[5])-1;if(r)return function(e,t,n){return t>=1&&t<=53&&n>=0&&n<=6}(0,s,u)?function(e,t,n){var r=new Date(0);r.setUTCFullYear(e,0,4);var a=r.getUTCDay()||7,o=7*(t-1)+n+1-a;return r.setUTCDate(r.getUTCDate()+o),r}(t,s,u):new Date(NaN);var c=new Date(0);return function(e,t,n){return t>=0&&t<=11&&n>=1&&n<=(d[t]||(h(e)?29:28))}(t,o,i)&&function(e,t){return t>=1&&t<=(h(e)?366:365)}(t,a)?(c.setUTCFullYear(t,o,Math.max(a,i)),c):new Date(NaN)}(g.restDateString,g.year)}if(!v||isNaN(v.getTime()))return new Date(NaN);var b,y=v.getTime(),w=0;if(m.time&&(w=function(e){var t=e.match(u);if(!t)return NaN;var n=p(t[1]),a=p(t[2]),o=p(t[3]);if(!function(e,t,n){if(24===e)return 0===t&&0===n;return n>=0&&n<60&&t>=0&&t<60&&e>=0&&e<25}(n,a,o))return NaN;return n*r.vh+a*r.yJ+1e3*o}(m.time),isNaN(w)))return new Date(NaN);if(!m.timezone){var x=new Date(y+w),k=new Date(0);return k.setFullYear(x.getUTCFullYear(),x.getUTCMonth(),x.getUTCDate()),k.setHours(x.getUTCHours(),x.getUTCMinutes(),x.getUTCSeconds(),x.getUTCMilliseconds()),k}return b=function(e){if("Z"===e)return 0;var t=e.match(c);if(!t)return 0;var n="+"===t[1]?-1:1,a=parseInt(t[2]),o=t[3]&&parseInt(t[3])||0;if(!function(e,t){return t>=0&&t<=59}(0,o))return NaN;return n*(a*r.vh+o*r.yJ)}(m.timezone),isNaN(b)?new Date(NaN):new Date(y+w+b)}var s={dateTimeDelimiter:/[T ]/,timeZoneDelimiter:/[Z ]/i,timezone:/([Z+-].*)$/},l=/^-?(?:(\d{3})|(\d{2})(?:-?(\d{2}))?|W(\d{2})(?:-?(\d{1}))?|)$/,u=/^(\d{2}(?:[.,]\d*)?)(?::?(\d{2}(?:[.,]\d*)?))?(?::?(\d{2}(?:[.,]\d*)?))?$/,c=/^([+-])(\d{2})(?::?(\d{2}))?$/;function f(e){return e?parseInt(e):1}function p(e){return e&&parseFloat(e.replace(",","."))||0}var d=[31,null,31,30,31,30,31,31,30,31,30,31];function h(e){return e%400===0||e%4===0&&e%100!==0}},92311:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return l}});var r=n(71002),a=n(19013),o=n(62225),i=n(83946),s=n(13882);function l(e,t){if((0,s.Z)(2,arguments),"object"!==(0,r.Z)(t)||null===t)throw new RangeError("values parameter must be an object");var n=(0,a.default)(e);return isNaN(n.getTime())?new Date(NaN):(null!=t.year&&n.setFullYear(t.year),null!=t.month&&(n=(0,o.default)(n,t.month)),null!=t.date&&n.setDate((0,i.Z)(t.date)),null!=t.hours&&n.setHours((0,i.Z)(t.hours)),null!=t.minutes&&n.setMinutes((0,i.Z)(t.minutes)),null!=t.seconds&&n.setSeconds((0,i.Z)(t.seconds)),null!=t.milliseconds&&n.setMilliseconds((0,i.Z)(t.milliseconds)),n)}},37042:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(19013),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=(0,a.default)(e),i=(0,r.Z)(t);return n.setHours(i),n}},4543:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(19013),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=(0,a.default)(e),i=(0,r.Z)(t);return n.setMinutes(i),n}},62225:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(19013),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=(0,a.default)(e),i=(0,r.Z)(t),s=n.getFullYear(),l=n.getDate(),u=new Date(0);u.setFullYear(s,i,15),u.setHours(0,0,0,0);var c=function(e){(0,o.Z)(1,arguments);var t=(0,a.default)(e),n=t.getFullYear(),r=t.getMonth(),i=new Date(0);return i.setFullYear(n,r+1,0),i.setHours(0,0,0,0),i.getDate()}(u);return n.setMonth(i,Math.min(l,c)),n}},11503:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return s}});var r=n(83946),a=n(19013),o=n(62225),i=n(13882);function s(e,t){(0,i.Z)(2,arguments);var n=(0,a.default)(e),s=(0,r.Z)(t)-(Math.floor(n.getMonth()/3)+1);return(0,o.default)(n,n.getMonth()+3*s)}},39880:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(19013),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=(0,a.default)(e),i=(0,r.Z)(t);return n.setSeconds(i),n}},44749:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return i}});var r=n(83946),a=n(19013),o=n(13882);function i(e,t){(0,o.Z)(2,arguments);var n=(0,a.default)(e),i=(0,r.Z)(t);return isNaN(n.getTime())?new Date(NaN):(n.setFullYear(i),n)}},69119:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){(0,a.Z)(1,arguments);var t=(0,r.default)(e);return t.setHours(0,0,0,0),t}},43703:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){(0,a.Z)(1,arguments);var t=(0,r.default)(e);return t.setDate(1),t.setHours(0,0,0,0),t}},94431:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return o}});var r=n(19013),a=n(13882);function o(e){(0,a.Z)(1,arguments);var t=(0,r.default)(e),n=t.getMonth(),o=n-n%3;return t.setMonth(o,1),t.setHours(0,0,0,0),t}},584:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return s}});var r=n(19013),a=n(83946),o=n(13882),i=n(84314);function s(e,t){var n,s,l,u,c,f,p,d;(0,o.Z)(1,arguments);var h=(0,i.j)(),v=(0,a.Z)(null!==(n=null!==(s=null!==(l=null!==(u=null===t||void 0===t?void 0:t.weekStartsOn)&&void 0!==u?u:null===t||void 0===t||null===(c=t.locale)||void 0===c||null===(f=c.options)||void 0===f?void 0:f.weekStartsOn)&&void 0!==l?l:h.weekStartsOn)&&void 0!==s?s:null===(p=h.locale)||void 0===p||null===(d=p.options)||void 0===d?void 0:d.weekStartsOn)&&void 0!==n?n:0);if(!(v>=0&&v<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var m=(0,r.default)(e),g=m.getDay(),b=(g>>7-e%8&1)},put:function(e,t){for(var n=0;n>>t-n-1&1))},getLengthInBits:function(){return this.length},putBit:function(e){var t=Math.floor(this.length/8);this.buffer.length<=t&&this.buffer.push(0),e&&(this.buffer[t]|=128>>>this.length%8),this.length++}},e.exports=t},49381:function(e){e.exports={L:1,M:0,Q:3,H:2}},32832:function(e,t,n){var r=n(11518);function a(e,t){if(void 0==e.length)throw new Error(e.length+"/"+t);for(var n=0;n=7&&this.setupTypeNumber(e),null==this.dataCache&&(this.dataCache=l.createData(this.typeNumber,this.errorCorrectLevel,this.dataList)),this.mapData(this.dataCache,t)},u.setupPositionProbePattern=function(e,t){for(var n=-1;n<=7;n++)if(!(e+n<=-1||this.moduleCount<=e+n))for(var r=-1;r<=7;r++)t+r<=-1||this.moduleCount<=t+r||(this.modules[e+n][t+r]=0<=n&&n<=6&&(0==r||6==r)||0<=r&&r<=6&&(0==n||6==n)||2<=n&&n<=4&&2<=r&&r<=4)},u.getBestMaskPattern=function(){for(var e=0,t=0,n=0;n<8;n++){this.makeImpl(!0,n);var r=i.getLostPoint(this);(0==n||e>r)&&(e=r,t=n)}return t},u.createMovieClip=function(e,t,n){var r=e.createEmptyMovieClip(t,n);this.make();for(var a=0;a>n&1);this.modules[Math.floor(n/3)][n%3+this.moduleCount-8-3]=r}for(n=0;n<18;n++){r=!e&&1==(t>>n&1);this.modules[n%3+this.moduleCount-8-3][Math.floor(n/3)]=r}},u.setupTypeInfo=function(e,t){for(var n=this.errorCorrectLevel<<3|t,r=i.getBCHTypeInfo(n),a=0;a<15;a++){var o=!e&&1==(r>>a&1);a<6?this.modules[a][8]=o:a<8?this.modules[a+1][8]=o:this.modules[this.moduleCount-15+a][8]=o}for(a=0;a<15;a++){o=!e&&1==(r>>a&1);a<8?this.modules[8][this.moduleCount-a-1]=o:a<9?this.modules[8][15-a-1+1]=o:this.modules[8][15-a-1]=o}this.modules[this.moduleCount-8][8]=!e},u.mapData=function(e,t){for(var n=-1,r=this.moduleCount-1,a=7,o=0,s=this.moduleCount-1;s>0;s-=2)for(6==s&&s--;;){for(var l=0;l<2;l++)if(null==this.modules[r][s-l]){var u=!1;o>>a&1)),i.getMask(t,r,s-l)&&(u=!u),this.modules[r][s-l]=u,-1==--a&&(o++,a=7)}if((r+=n)<0||this.moduleCount<=r){r-=n,n=-n;break}}},l.PAD0=236,l.PAD1=17,l.createData=function(e,t,n){for(var r=a.getRSBlocks(e,t),s=new o,u=0;u8*f)throw new Error("code length overflow. ("+s.getLengthInBits()+">"+8*f+")");for(s.getLengthInBits()+4<=8*f&&s.put(0,4);s.getLengthInBits()%8!=0;)s.putBit(!1);for(;!(s.getLengthInBits()>=8*f)&&(s.put(l.PAD0,8),!(s.getLengthInBits()>=8*f));)s.put(l.PAD1,8);return l.createBytes(s,r)},l.createBytes=function(e,t){for(var n=0,r=0,a=0,o=new Array(t.length),l=new Array(t.length),u=0;u=0?h.get(v):0}}var m=0;for(p=0;p=256;)e-=255;return t.EXP_TABLE[e]},EXP_TABLE:new Array(256),LOG_TABLE:new Array(256)},n=0;n<8;n++)t.EXP_TABLE[n]=1<=0;)t^=h.G15<=0;)t^=h.G18<>>=1;return t},getPatternPosition:function(e){return h.PATTERN_POSITION_TABLE[e-1]},getMask:function(e,t,n){switch(e){case i:return(t+n)%2==0;case s:return t%2==0;case l:return n%3==0;case u:return(t+n)%3==0;case c:return(Math.floor(t/2)+Math.floor(n/3))%2==0;case f:return t*n%2+t*n%3==0;case p:return(t*n%2+t*n%3)%2==0;case d:return(t*n%3+(t+n)%2)%2==0;default:throw new Error("bad maskPattern:"+e)}},getErrorCorrectPolynomial:function(e){for(var t=new a([1],0),n=0;n5&&(n+=3+o-5)}for(r=0;ri?1:Math.round(100*c/i)/100,t.a!==f)return{h:t.h,s:t.s,l:t.l,a:f,source:"rgb"}}else{var p=void 0;if(r!==(p=u<0?0:u>o?1:Math.round(100*u/o)/100))return{h:t.h,s:t.s,l:t.l,a:p,source:"rgb"}}return null}(e,r.props.hsl,r.props.direction,r.props.a,r.container);t&&"function"===typeof r.props.onChange&&r.props.onChange(t,e)},r.handleMouseDown=function(e){r.handleChange(e),window.addEventListener("mousemove",r.handleChange),window.addEventListener("mouseup",r.handleMouseUp)},r.handleMouseUp=function(){r.unbindEventListeners()},r.unbindEventListeners=function(){window.removeEventListener("mousemove",r.handleChange),window.removeEventListener("mouseup",r.handleMouseUp)},p(r,n)}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),f(t,[{key:"componentWillUnmount",value:function(){this.unbindEventListeners()}},{key:"render",value:function(){var e=this,t=this.props.rgb,n=(0,a.ZP)({default:{alpha:{absolute:"0px 0px 0px 0px",borderRadius:this.props.radius},checkboard:{absolute:"0px 0px 0px 0px",overflow:"hidden",borderRadius:this.props.radius},gradient:{absolute:"0px 0px 0px 0px",background:"linear-gradient(to right, rgba("+t.r+","+t.g+","+t.b+", 0) 0%,\n rgba("+t.r+","+t.g+","+t.b+", 1) 100%)",boxShadow:this.props.shadow,borderRadius:this.props.radius},container:{position:"relative",height:"100%",margin:"0 3px"},pointer:{position:"absolute",left:100*t.a+"%"},slider:{width:"4px",borderRadius:"1px",height:"8px",boxShadow:"0 0 2px rgba(0, 0, 0, .6)",background:"#fff",marginTop:"1px",transform:"translateX(-2px)"}},vertical:{gradient:{background:"linear-gradient(to bottom, rgba("+t.r+","+t.g+","+t.b+", 0) 0%,\n rgba("+t.r+","+t.g+","+t.b+", 1) 100%)"},pointer:{left:0,top:100*t.a+"%"}},overwrite:c({},this.props.style)},{vertical:"vertical"===this.props.direction,overwrite:!0});return r.createElement("div",{style:n.alpha},r.createElement("div",{style:n.checkboard},r.createElement(u,{renderers:this.props.renderers})),r.createElement("div",{style:n.gradient}),r.createElement("div",{style:n.container,ref:function(t){return e.container=t},onMouseDown:this.handleMouseDown,onTouchMove:this.handleChange,onTouchStart:this.handleChange},r.createElement("div",{style:n.pointer},this.props.pointer?r.createElement(this.props.pointer,this.props):r.createElement("div",{style:n.slider}))))}}]),t}(r.PureComponent||r.Component),h=d,v=function(){function e(e,t){for(var n=0;n-1)){var a=n.getArrowOffset(),o=38===e.keyCode?r+a:r-a;n.setUpdatedValue(o,e)}},n.handleDrag=function(e){if(n.props.dragLabel){var t=Math.round(n.props.value+e.movementX);t>=0&&t<=n.props.dragMax&&n.props.onChange&&n.props.onChange(n.getValueObjectWithLabel(t),e)}},n.handleMouseDown=function(e){n.props.dragLabel&&(e.preventDefault(),n.handleDrag(e),window.addEventListener("mousemove",n.handleDrag),window.addEventListener("mouseup",n.handleMouseUp))},n.handleMouseUp=function(){n.unbindEventListeners()},n.unbindEventListeners=function(){window.removeEventListener("mousemove",n.handleDrag),window.removeEventListener("mouseup",n.handleMouseUp)},n.state={value:String(e.value).toUpperCase(),blurValue:String(e.value).toUpperCase()},n.inputId="rc-editable-input-"+g++,n}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),v(t,[{key:"componentDidUpdate",value:function(e,t){this.props.value===this.state.value||e.value===this.props.value&&t.value===this.state.value||(this.input===document.activeElement?this.setState({blurValue:String(this.props.value).toUpperCase()}):this.setState({value:String(this.props.value).toUpperCase(),blurValue:!this.state.blurValue&&String(this.props.value).toUpperCase()}))}},{key:"componentWillUnmount",value:function(){this.unbindEventListeners()}},{key:"getValueObjectWithLabel",value:function(e){return function(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}({},this.props.label,e)}},{key:"getArrowOffset",value:function(){return this.props.arrowOffset||1}},{key:"setUpdatedValue",value:function(e,t){var n=this.props.label?this.getValueObjectWithLabel(e):e;this.props.onChange&&this.props.onChange(n,t),this.setState({value:e})}},{key:"render",value:function(){var e=this,t=(0,a.ZP)({default:{wrap:{position:"relative"}},"user-override":{wrap:this.props.style&&this.props.style.wrap?this.props.style.wrap:{},input:this.props.style&&this.props.style.input?this.props.style.input:{},label:this.props.style&&this.props.style.label?this.props.style.label:{}},"dragLabel-true":{label:{cursor:"ew-resize"}}},{"user-override":!0},this.props);return r.createElement("div",{style:t.wrap},r.createElement("input",{id:this.inputId,style:t.input,ref:function(t){return e.input=t},value:this.state.value,onKeyDown:this.handleKeyDown,onChange:this.handleChange,onBlur:this.handleBlur,placeholder:this.props.placeholder,spellCheck:"false"}),this.props.label&&!this.props.hideLabel?r.createElement("label",{htmlFor:this.inputId,style:t.label,onMouseDown:this.handleMouseDown},this.props.label):null)}}]),t}(r.PureComponent||r.Component),y=function(){function e(e,t){for(var n=0;no?0:360*(-100*u/o+100)/100,n.h!==c)return{h:c,s:n.s,l:n.l,a:n.a,source:"hsl"}}else{var f=void 0;if(f=l<0?0:l>a?359:100*l/a*360/100,n.h!==f)return{h:f,s:n.s,l:n.l,a:n.a,source:"hsl"}}return null}(e,r.props.direction,r.props.hsl,r.container);t&&"function"===typeof r.props.onChange&&r.props.onChange(t,e)},r.handleMouseDown=function(e){r.handleChange(e),window.addEventListener("mousemove",r.handleChange),window.addEventListener("mouseup",r.handleMouseUp)},r.handleMouseUp=function(){r.unbindEventListeners()},w(r,n)}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),y(t,[{key:"componentWillUnmount",value:function(){this.unbindEventListeners()}},{key:"unbindEventListeners",value:function(){window.removeEventListener("mousemove",this.handleChange),window.removeEventListener("mouseup",this.handleMouseUp)}},{key:"render",value:function(){var e=this,t=this.props.direction,n=void 0===t?"horizontal":t,o=(0,a.ZP)({default:{hue:{absolute:"0px 0px 0px 0px",borderRadius:this.props.radius,boxShadow:this.props.shadow},container:{padding:"0 2px",position:"relative",height:"100%",borderRadius:this.props.radius},pointer:{position:"absolute",left:100*this.props.hsl.h/360+"%"},slider:{marginTop:"1px",width:"4px",borderRadius:"1px",height:"8px",boxShadow:"0 0 2px rgba(0, 0, 0, .6)",background:"#fff",transform:"translateX(-2px)"}},vertical:{pointer:{left:"0px",top:-100*this.props.hsl.h/360+100+"%"}}},{vertical:"vertical"===n});return r.createElement("div",{style:o.hue},r.createElement("div",{className:"hue-"+n,style:o.container,ref:function(t){return e.container=t},onMouseDown:this.handleMouseDown,onTouchMove:this.handleChange,onTouchStart:this.handleChange},r.createElement("style",null,"\n .hue-horizontal {\n background: linear-gradient(to right, #f00 0%, #ff0 17%, #0f0\n 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n background: -webkit-linear-gradient(to right, #f00 0%, #ff0\n 17%, #0f0 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n }\n\n .hue-vertical {\n background: linear-gradient(to top, #f00 0%, #ff0 17%, #0f0 33%,\n #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n background: -webkit-linear-gradient(to top, #f00 0%, #ff0 17%,\n #0f0 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n }\n "),r.createElement("div",{style:o.pointer},this.props.pointer?r.createElement(this.props.pointer,this.props):r.createElement("div",{style:o.slider}))))}}]),t}(r.PureComponent||r.Component),k=x,D=n(45697),C=n.n(D);var E=function(){this.__data__=[],this.size=0};var S=function(e,t){return e===t||e!==e&&t!==t};var _=function(e,t){for(var n=e.length;n--;)if(S(e[n][0],t))return n;return-1},O=Array.prototype.splice;var M=function(e){var t=this.__data__,n=_(t,e);return!(n<0)&&(n==t.length-1?t.pop():O.call(t,n,1),--this.size,!0)};var P=function(e){var t=this.__data__,n=_(t,e);return n<0?void 0:t[n][1]};var T=function(e){return _(this.__data__,e)>-1};var N=function(e,t){var n=this.__data__,r=_(n,e);return r<0?(++this.size,n.push([e,t])):n[r][1]=t,this};function A(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t-1&&e%1==0&&e<=9007199254740991};var pt=function(e){return null!=e&&ft(e.length)&&!J(e)};var dt=function(e){return rt(e)&&pt(e)};var ht=function(){return!1},vt="object"==typeof exports&&exports&&!exports.nodeType&&exports,mt=vt&&"object"==typeof module&&module&&!module.nodeType&&module,gt=mt&&mt.exports===vt?B.Buffer:void 0,bt=(gt?gt.isBuffer:void 0)||ht,yt=Function.prototype,wt=Object.prototype,xt=yt.toString,kt=wt.hasOwnProperty,Dt=xt.call(Object);var Ct=function(e){if(!rt(e)||"[object Object]"!=X(e))return!1;var t=Je(e);if(null===t)return!0;var n=kt.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&xt.call(n)==Dt},Et={};Et["[object Float32Array]"]=Et["[object Float64Array]"]=Et["[object Int8Array]"]=Et["[object Int16Array]"]=Et["[object Int32Array]"]=Et["[object Uint8Array]"]=Et["[object Uint8ClampedArray]"]=Et["[object Uint16Array]"]=Et["[object Uint32Array]"]=!0,Et["[object Arguments]"]=Et["[object Array]"]=Et["[object ArrayBuffer]"]=Et["[object Boolean]"]=Et["[object DataView]"]=Et["[object Date]"]=Et["[object Error]"]=Et["[object Function]"]=Et["[object Map]"]=Et["[object Number]"]=Et["[object Object]"]=Et["[object RegExp]"]=Et["[object Set]"]=Et["[object String]"]=Et["[object WeakMap]"]=!1;var St=function(e){return rt(e)&&ft(e.length)&&!!Et[X(e)]};var _t=function(e){return function(t){return e(t)}},Ot="object"==typeof exports&&exports&&!exports.nodeType&&exports,Mt=Ot&&"object"==typeof module&&module&&!module.nodeType&&module,Pt=Mt&&Mt.exports===Ot&&I.process,Tt=function(){try{var e=Mt&&Mt.require&&Mt.require("util").types;return e||Pt&&Pt.binding&&Pt.binding("util")}catch(t){}}(),Nt=Tt&&Tt.isTypedArray,At=Nt?_t(Nt):St;var Zt=function(e,t){if(("constructor"!==t||"function"!==typeof e[t])&&"__proto__"!=t)return e[t]},Rt=Object.prototype.hasOwnProperty;var jt=function(e,t,n){var r=e[t];Rt.call(e,t)&&S(r,n)&&(void 0!==n||t in e)||Fe(e,t,n)};var Lt=function(e,t,n,r){var a=!n;n||(n={});for(var o=-1,i=t.length;++o-1&&e%1==0&&e0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}},an=rn(tn);var on=function(e,t){return an(Jt(e,t,Gt),e+"")};var sn=function(e,t,n){if(!$(n))return!1;var r=typeof t;return!!("number"==r?pt(n)&&Yt(t,n.length):"string"==r&&t in n)&&S(n[t],e)};var ln=function(e){return on((function(t,n){var r=-1,a=n.length,o=a>1?n[a-1]:void 0,i=a>2?n[2]:void 0;for(o=e.length>3&&"function"==typeof o?(a--,o):void 0,i&&sn(n[0],n[1],i)&&(o=a<3?void 0:o,a=1),t=Object(t);++r=t||n<0||f&&e-u>=o}function v(){var e=fn();if(h(e))return m(e);s=setTimeout(v,function(e){var n=t-(e-l);return f?Dn(n,o-(e-u)):n}(e))}function m(e){return s=void 0,p&&r?d(e):(r=a=void 0,i)}function g(){var e=fn(),n=h(e);if(r=arguments,a=this,l=e,n){if(void 0===s)return function(e){return u=e,s=setTimeout(v,t),c?d(e):i}(l);if(f)return clearTimeout(s),s=setTimeout(v,t),d(l)}return void 0===s&&(s=setTimeout(v,t)),i}return t=xn(t)||0,$(n)&&(c=!!n.leading,o=(f="maxWait"in n)?kn(xn(n.maxWait)||0,t):o,p="trailing"in n?!!n.trailing:p),g.cancel=function(){void 0!==s&&clearTimeout(s),u=0,r=l=a=s=void 0},g.flush=function(){return void 0===s?i:m(fn())},g};var En=function(e,t,n){var r=!0,a=!0;if("function"!=typeof e)throw new TypeError("Expected a function");return $(n)&&(r="leading"in n?!!n.leading:r,a="trailing"in n?!!n.trailing:a),Cn(e,t,{leading:r,maxWait:t,trailing:a})},Sn=function(){function e(e,t){for(var n=0;na&&(l=a),u<0?u=0:u>o&&(u=o);var c=l/a,f=1-u/o;return{h:t.h,s:c,v:f,a:t.a,source:"hsv"}}(e,n.props.hsl,n.container),e)},n.handleMouseDown=function(e){n.handleChange(e);var t=n.getContainerRenderWindow();t.addEventListener("mousemove",n.handleChange),t.addEventListener("mouseup",n.handleMouseUp)},n.handleMouseUp=function(){n.unbindEventListeners()},n.throttle=En((function(e,t,n){e(t,n)}),50),n}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),Sn(t,[{key:"componentWillUnmount",value:function(){this.throttle.cancel(),this.unbindEventListeners()}},{key:"getContainerRenderWindow",value:function(){for(var e=this.container,t=window;!t.document.contains(e)&&t.parent!==t;)t=t.parent;return t}},{key:"unbindEventListeners",value:function(){var e=this.getContainerRenderWindow();e.removeEventListener("mousemove",this.handleChange),e.removeEventListener("mouseup",this.handleMouseUp)}},{key:"render",value:function(){var e=this,t=this.props.style||{},n=t.color,o=t.white,i=t.black,s=t.pointer,l=t.circle,u=(0,a.ZP)({default:{color:{absolute:"0px 0px 0px 0px",background:"hsl("+this.props.hsl.h+",100%, 50%)",borderRadius:this.props.radius},white:{absolute:"0px 0px 0px 0px",borderRadius:this.props.radius},black:{absolute:"0px 0px 0px 0px",boxShadow:this.props.shadow,borderRadius:this.props.radius},pointer:{position:"absolute",top:-100*this.props.hsv.v+100+"%",left:100*this.props.hsv.s+"%",cursor:"default"},circle:{width:"4px",height:"4px",boxShadow:"0 0 0 1.5px #fff, inset 0 0 1px 1px rgba(0,0,0,.3),\n 0 0 1px 2px rgba(0,0,0,.4)",borderRadius:"50%",cursor:"hand",transform:"translate(-2px, -2px)"}},custom:{color:n,white:o,black:i,pointer:s,circle:l}},{custom:!!this.props.style});return r.createElement("div",{style:u.color,ref:function(t){return e.container=t},onMouseDown:this.handleMouseDown,onTouchMove:this.handleChange,onTouchStart:this.handleChange},r.createElement("style",null,"\n .saturation-white {\n background: -webkit-linear-gradient(to right, #fff, rgba(255,255,255,0));\n background: linear-gradient(to right, #fff, rgba(255,255,255,0));\n }\n .saturation-black {\n background: -webkit-linear-gradient(to top, #000, rgba(0,0,0,0));\n background: linear-gradient(to top, #000, rgba(0,0,0,0));\n }\n "),r.createElement("div",{style:u.white,className:"saturation-white"},r.createElement("div",{style:u.black,className:"saturation-black"}),r.createElement("div",{style:u.pointer},this.props.pointer?r.createElement(this.props.pointer,this.props):r.createElement("div",{style:u.circle}))))}}]),t}(r.PureComponent||r.Component),On=_n;var Mn=function(e,t){for(var n=-1,r=null==e?0:e.length;++n1&&(n-=1),n<1/6?e+6*(t-e)*n:n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}if(e=ir(e,360),t=ir(t,100),n=ir(n,100),0===t)r=a=o=n;else{var s=n<.5?n*(1+t):n+t-n*t,l=2*n-s;r=i(l,s,e+1/3),a=i(l,s,e),o=i(l,s,e-1/3)}return{r:255*r,g:255*a,b:255*o}}(e.h,r,o),i=!0,s="hsl"),e.hasOwnProperty("a")&&(n=e.a));var l,u,c;return n=or(n),{ok:i,format:e.format||s,r:Math.min(255,Math.max(t.r,0)),g:Math.min(255,Math.max(t.g,0)),b:Math.min(255,Math.max(t.b,0)),a:n}}(e);this._originalInput=e,this._r=n.r,this._g=n.g,this._b=n.b,this._a=n.a,this._roundA=Math.round(100*this._a)/100,this._format=t.format||n.format,this._gradientType=t.gradientType,this._r<1&&(this._r=Math.round(this._r)),this._g<1&&(this._g=Math.round(this._g)),this._b<1&&(this._b=Math.round(this._b)),this._ok=n.ok}function Bn(e,t,n){e=ir(e,255),t=ir(t,255),n=ir(n,255);var r,a,o=Math.max(e,t,n),i=Math.min(e,t,n),s=(o+i)/2;if(o==i)r=a=0;else{var l=o-i;switch(a=s>.5?l/(2-o-i):l/(o+i),o){case e:r=(t-n)/l+(t>1)+720)%360;--t;)r.h=(r.h+a)%360,o.push(Yn(r));return o}function nr(e,t){t=t||6;for(var n=Yn(e).toHsv(),r=n.h,a=n.s,o=n.v,i=[],s=1/t;t--;)i.push(Yn({h:r,s:a,v:o})),o=(o+s)%1;return i}Yn.prototype={isDark:function(){return this.getBrightness()<128},isLight:function(){return!this.isDark()},isValid:function(){return this._ok},getOriginalInput:function(){return this._originalInput},getFormat:function(){return this._format},getAlpha:function(){return this._a},getBrightness:function(){var e=this.toRgb();return(299*e.r+587*e.g+114*e.b)/1e3},getLuminance:function(){var e,t,n,r=this.toRgb();return e=r.r/255,t=r.g/255,n=r.b/255,.2126*(e<=.03928?e/12.92:Math.pow((e+.055)/1.055,2.4))+.7152*(t<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4))+.0722*(n<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4))},setAlpha:function(e){return this._a=or(e),this._roundA=Math.round(100*this._a)/100,this},toHsv:function(){var e=Hn(this._r,this._g,this._b);return{h:360*e.h,s:e.s,v:e.v,a:this._a}},toHsvString:function(){var e=Hn(this._r,this._g,this._b),t=Math.round(360*e.h),n=Math.round(100*e.s),r=Math.round(100*e.v);return 1==this._a?"hsv("+t+", "+n+"%, "+r+"%)":"hsva("+t+", "+n+"%, "+r+"%, "+this._roundA+")"},toHsl:function(){var e=Bn(this._r,this._g,this._b);return{h:360*e.h,s:e.s,l:e.l,a:this._a}},toHslString:function(){var e=Bn(this._r,this._g,this._b),t=Math.round(360*e.h),n=Math.round(100*e.s),r=Math.round(100*e.l);return 1==this._a?"hsl("+t+", "+n+"%, "+r+"%)":"hsla("+t+", "+n+"%, "+r+"%, "+this._roundA+")"},toHex:function(e){return zn(this._r,this._g,this._b,e)},toHexString:function(e){return"#"+this.toHex(e)},toHex8:function(e){return function(e,t,n,r,a){var o=[ur(Math.round(e).toString(16)),ur(Math.round(t).toString(16)),ur(Math.round(n).toString(16)),ur(fr(r))];if(a&&o[0].charAt(0)==o[0].charAt(1)&&o[1].charAt(0)==o[1].charAt(1)&&o[2].charAt(0)==o[2].charAt(1)&&o[3].charAt(0)==o[3].charAt(1))return o[0].charAt(0)+o[1].charAt(0)+o[2].charAt(0)+o[3].charAt(0);return o.join("")}(this._r,this._g,this._b,this._a,e)},toHex8String:function(e){return"#"+this.toHex8(e)},toRgb:function(){return{r:Math.round(this._r),g:Math.round(this._g),b:Math.round(this._b),a:this._a}},toRgbString:function(){return 1==this._a?"rgb("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+")":"rgba("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+", "+this._roundA+")"},toPercentageRgb:function(){return{r:Math.round(100*ir(this._r,255))+"%",g:Math.round(100*ir(this._g,255))+"%",b:Math.round(100*ir(this._b,255))+"%",a:this._a}},toPercentageRgbString:function(){return 1==this._a?"rgb("+Math.round(100*ir(this._r,255))+"%, "+Math.round(100*ir(this._g,255))+"%, "+Math.round(100*ir(this._b,255))+"%)":"rgba("+Math.round(100*ir(this._r,255))+"%, "+Math.round(100*ir(this._g,255))+"%, "+Math.round(100*ir(this._b,255))+"%, "+this._roundA+")"},toName:function(){return 0===this._a?"transparent":!(this._a<1)&&(ar[zn(this._r,this._g,this._b,!0)]||!1)},toFilter:function(e){var t="#"+Un(this._r,this._g,this._b,this._a),n=t,r=this._gradientType?"GradientType = 1, ":"";if(e){var a=Yn(e);n="#"+Un(a._r,a._g,a._b,a._a)}return"progid:DXImageTransform.Microsoft.gradient("+r+"startColorstr="+t+",endColorstr="+n+")"},toString:function(e){var t=!!e;e=e||this._format;var n=!1,r=this._a<1&&this._a>=0;return t||!r||"hex"!==e&&"hex6"!==e&&"hex3"!==e&&"hex4"!==e&&"hex8"!==e&&"name"!==e?("rgb"===e&&(n=this.toRgbString()),"prgb"===e&&(n=this.toPercentageRgbString()),"hex"!==e&&"hex6"!==e||(n=this.toHexString()),"hex3"===e&&(n=this.toHexString(!0)),"hex4"===e&&(n=this.toHex8String(!0)),"hex8"===e&&(n=this.toHex8String()),"name"===e&&(n=this.toName()),"hsl"===e&&(n=this.toHslString()),"hsv"===e&&(n=this.toHsvString()),n||this.toHexString()):"name"===e&&0===this._a?this.toName():this.toRgbString()},clone:function(){return Yn(this.toString())},_applyModification:function(e,t){var n=e.apply(null,[this].concat([].slice.call(t)));return this._r=n._r,this._g=n._g,this._b=n._b,this.setAlpha(n._a),this},lighten:function(){return this._applyModification(Vn,arguments)},brighten:function(){return this._applyModification(Qn,arguments)},darken:function(){return this._applyModification(Gn,arguments)},desaturate:function(){return this._applyModification(Wn,arguments)},saturate:function(){return this._applyModification(qn,arguments)},greyscale:function(){return this._applyModification(Kn,arguments)},spin:function(){return this._applyModification(Xn,arguments)},_applyCombination:function(e,t){return e.apply(null,[this].concat([].slice.call(t)))},analogous:function(){return this._applyCombination(tr,arguments)},complement:function(){return this._applyCombination($n,arguments)},monochromatic:function(){return this._applyCombination(nr,arguments)},splitcomplement:function(){return this._applyCombination(er,arguments)},triad:function(){return this._applyCombination(Jn,[3])},tetrad:function(){return this._applyCombination(Jn,[4])}},Yn.fromRatio=function(e,t){if("object"==Ln(e)){var n={};for(var r in e)e.hasOwnProperty(r)&&(n[r]="a"===r?e[r]:cr(e[r]));e=n}return Yn(e,t)},Yn.equals=function(e,t){return!(!e||!t)&&Yn(e).toRgbString()==Yn(t).toRgbString()},Yn.random=function(){return Yn.fromRatio({r:Math.random(),g:Math.random(),b:Math.random()})},Yn.mix=function(e,t,n){n=0===n?0:n||50;var r=Yn(e).toRgb(),a=Yn(t).toRgb(),o=n/100;return Yn({r:(a.r-r.r)*o+r.r,g:(a.g-r.g)*o+r.g,b:(a.b-r.b)*o+r.b,a:(a.a-r.a)*o+r.a})},Yn.readability=function(e,t){var n=Yn(e),r=Yn(t);return(Math.max(n.getLuminance(),r.getLuminance())+.05)/(Math.min(n.getLuminance(),r.getLuminance())+.05)},Yn.isReadable=function(e,t,n){var r,a,o=Yn.readability(e,t);switch(a=!1,(r=function(e){var t,n;t=((e=e||{level:"AA",size:"small"}).level||"AA").toUpperCase(),n=(e.size||"small").toLowerCase(),"AA"!==t&&"AAA"!==t&&(t="AA");"small"!==n&&"large"!==n&&(n="small");return{level:t,size:n}}(n)).level+r.size){case"AAsmall":case"AAAlarge":a=o>=4.5;break;case"AAlarge":a=o>=3;break;case"AAAsmall":a=o>=7}return a},Yn.mostReadable=function(e,t,n){var r,a,o,i,s=null,l=0;a=(n=n||{}).includeFallbackColors,o=n.level,i=n.size;for(var u=0;ul&&(l=r,s=Yn(t[u]));return Yn.isReadable(e,s,{level:o,size:i})||!a?s:(n.includeFallbackColors=!1,Yn.mostReadable(e,["#fff","#000"],n))};var rr=Yn.names={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"0ff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000",blanchedalmond:"ffebcd",blue:"00f",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",burntsienna:"ea7e5d",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"0ff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkgrey:"a9a9a9",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkslategrey:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dimgrey:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"f0f",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",grey:"808080",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgray:"d3d3d3",lightgreen:"90ee90",lightgrey:"d3d3d3",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"789",lightslategrey:"789",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"0f0",limegreen:"32cd32",linen:"faf0e6",magenta:"f0f",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370db",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"db7093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",rebeccapurple:"663399",red:"f00",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",slategrey:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"fff",whitesmoke:"f5f5f5",yellow:"ff0",yellowgreen:"9acd32"},ar=Yn.hexNames=function(e){var t={};for(var n in e)e.hasOwnProperty(n)&&(t[e[n]]=n);return t}(rr);function or(e){return e=parseFloat(e),(isNaN(e)||e<0||e>1)&&(e=1),e}function ir(e,t){(function(e){return"string"==typeof e&&-1!=e.indexOf(".")&&1===parseFloat(e)})(e)&&(e="100%");var n=function(e){return"string"===typeof e&&-1!=e.indexOf("%")}(e);return e=Math.min(t,Math.max(0,parseFloat(e))),n&&(e=parseInt(e*t,10)/100),Math.abs(e-t)<1e-6?1:e%t/parseFloat(t)}function sr(e){return Math.min(1,Math.max(0,e))}function lr(e){return parseInt(e,16)}function ur(e){return 1==e.length?"0"+e:""+e}function cr(e){return e<=1&&(e=100*e+"%"),e}function fr(e){return Math.round(255*parseFloat(e)).toString(16)}function pr(e){return lr(e)/255}var dr=function(){var e="(?:[-\\+]?\\d*\\.\\d+%?)|(?:[-\\+]?\\d+%?)",t="[\\s|\\(]+("+e+")[,|\\s]+("+e+")[,|\\s]+("+e+")\\s*\\)?",n="[\\s|\\(]+("+e+")[,|\\s]+("+e+")[,|\\s]+("+e+")[,|\\s]+("+e+")\\s*\\)?";return{CSS_UNIT:new RegExp(e),rgb:new RegExp("rgb"+t),rgba:new RegExp("rgba"+n),hsl:new RegExp("hsl"+t),hsla:new RegExp("hsla"+n),hsv:new RegExp("hsv"+t),hsva:new RegExp("hsva"+n),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/}}();function hr(e){return!!dr.CSS_UNIT.exec(e)}var vr=function(e){var t=0,n=0;return jn(["r","g","b","a","h","s","l","v"],(function(r){if(e[r]&&(t+=1,isNaN(e[r])||(n+=1),"s"===r||"l"===r)){/^\d+%$/.test(e[r])&&(n+=1)}})),t===n&&e},mr=function(e,t){var n=e.hex?Yn(e.hex):Yn(e),r=n.toHsl(),a=n.toHsv(),o=n.toRgb(),i=n.toHex();return 0===r.s&&(r.h=t||0,a.h=t||0),{hsl:r,hex:"000000"===i&&0===o.a?"transparent":"#"+i,rgb:o,hsv:a,oldHue:e.h||t||r.h,source:e.source}},gr=function(e){if("transparent"===e)return!0;var t="#"===String(e).charAt(0)?1:0;return e.length!==4+t&&e.length<7+t&&Yn(e).isValid()},br=function(e){if(!e)return"#fff";var t=mr(e);return"transparent"===t.hex?"rgba(0,0,0,0.4)":(299*t.rgb.r+587*t.rgb.g+114*t.rgb.b)/1e3>=128?"#000":"#fff"},yr=function(e,t){return Yn(t+" ("+e.replace("\xb0","")+")")._ok},wr=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"span";return function(n){function a(){var e,t,n;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,a);for(var r=arguments.length,o=Array(r),i=0;is))return!1;var u=o.get(e),c=o.get(t);if(u&&c)return u==t&&c==e;var f=-1,p=!0,d=2&n?new Rr:void 0;for(o.set(e,t),o.set(t,e);++f1&&(e.a=1),n.props.onChange({h:n.props.hsl.h,s:n.props.hsl.s,l:n.props.hsl.l,a:Math.round(100*e.a)/100,source:"rgb"},t)):(e.h||e.s||e.l)&&("string"===typeof e.s&&e.s.includes("%")&&(e.s=e.s.replace("%","")),"string"===typeof e.l&&e.l.includes("%")&&(e.l=e.l.replace("%","")),1==e.s?e.s=.01:1==e.l&&(e.l=.01),n.props.onChange({h:e.h||n.props.hsl.h,s:Number(Do(e.s)?n.props.hsl.s:e.s),l:Number(Do(e.l)?n.props.hsl.l:e.l),source:"hsl"},t))},n.showHighlight=function(e){e.currentTarget.style.background="#eee"},n.hideHighlight=function(e){e.currentTarget.style.background="transparent"},1!==e.hsl.a&&"hex"===e.view?n.state={view:"rgb"}:n.state={view:e.view},n}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),Eo(t,[{key:"render",value:function(){var e=this,t=(0,a.ZP)({default:{wrap:{paddingTop:"16px",display:"flex"},fields:{flex:"1",display:"flex",marginLeft:"-6px"},field:{paddingLeft:"6px",width:"100%"},alpha:{paddingLeft:"6px",width:"100%"},toggle:{width:"32px",textAlign:"right",position:"relative"},icon:{marginRight:"-4px",marginTop:"12px",cursor:"pointer",position:"relative"},iconHighlight:{position:"absolute",width:"24px",height:"28px",background:"#eee",borderRadius:"4px",top:"10px",left:"12px",display:"none"},input:{fontSize:"11px",color:"#333",width:"100%",borderRadius:"2px",border:"none",boxShadow:"inset 0 0 0 1px #dadada",height:"21px",textAlign:"center"},label:{textTransform:"uppercase",fontSize:"11px",lineHeight:"11px",color:"#969696",textAlign:"center",display:"block",marginTop:"12px"},svg:{fill:"#333",width:"24px",height:"24px",border:"1px transparent solid",borderRadius:"5px"}},disableAlpha:{alpha:{display:"none"}}},this.props,this.state),n=void 0;return"hex"===this.state.view?n=r.createElement("div",{style:t.fields,className:"flexbox-fix"},r.createElement("div",{style:t.field},r.createElement(b,{style:{input:t.input,label:t.label},label:"hex",value:this.props.hex,onChange:this.handleChange}))):"rgb"===this.state.view?n=r.createElement("div",{style:t.fields,className:"flexbox-fix"},r.createElement("div",{style:t.field},r.createElement(b,{style:{input:t.input,label:t.label},label:"r",value:this.props.rgb.r,onChange:this.handleChange})),r.createElement("div",{style:t.field},r.createElement(b,{style:{input:t.input,label:t.label},label:"g",value:this.props.rgb.g,onChange:this.handleChange})),r.createElement("div",{style:t.field},r.createElement(b,{style:{input:t.input,label:t.label},label:"b",value:this.props.rgb.b,onChange:this.handleChange})),r.createElement("div",{style:t.alpha},r.createElement(b,{style:{input:t.input,label:t.label},label:"a",value:this.props.rgb.a,arrowOffset:.01,onChange:this.handleChange}))):"hsl"===this.state.view&&(n=r.createElement("div",{style:t.fields,className:"flexbox-fix"},r.createElement("div",{style:t.field},r.createElement(b,{style:{input:t.input,label:t.label},label:"h",value:Math.round(this.props.hsl.h),onChange:this.handleChange})),r.createElement("div",{style:t.field},r.createElement(b,{style:{input:t.input,label:t.label},label:"s",value:Math.round(100*this.props.hsl.s)+"%",onChange:this.handleChange})),r.createElement("div",{style:t.field},r.createElement(b,{style:{input:t.input,label:t.label},label:"l",value:Math.round(100*this.props.hsl.l)+"%",onChange:this.handleChange})),r.createElement("div",{style:t.alpha},r.createElement(b,{style:{input:t.input,label:t.label},label:"a",value:this.props.hsl.a,arrowOffset:.01,onChange:this.handleChange})))),r.createElement("div",{style:t.wrap,className:"flexbox-fix"},n,r.createElement("div",{style:t.toggle},r.createElement("div",{style:t.icon,onClick:this.toggleViews,ref:function(t){return e.icon=t}},r.createElement(Co.Z,{style:t.svg,onMouseOver:this.showHighlight,onMouseEnter:this.showHighlight,onMouseOut:this.hideHighlight}))))}}],[{key:"getDerivedStateFromProps",value:function(e,t){return 1!==e.hsl.a&&"hex"===t.view?{view:"rgb"}:null}}]),t}(r.Component);So.defaultProps={view:"hex"};var _o=So,Oo=function(){var e=(0,a.ZP)({default:{picker:{width:"12px",height:"12px",borderRadius:"6px",transform:"translate(-6px, -1px)",backgroundColor:"rgb(248, 248, 248)",boxShadow:"0 1px 4px 0 rgba(0, 0, 0, 0.37)"}}});return r.createElement("div",{style:e.picker})},Mo=function(){var e=(0,a.ZP)({default:{picker:{width:"12px",height:"12px",borderRadius:"6px",boxShadow:"inset 0 0 0 1px #fff",transform:"translate(-6px, -6px)"}}});return r.createElement("div",{style:e.picker})},Po=function(e){var t=e.width,n=e.onChange,o=e.disableAlpha,i=e.rgb,s=e.hsl,l=e.hsv,c=e.hex,f=e.renderers,p=e.styles,d=void 0===p?{}:p,v=e.className,m=void 0===v?"":v,g=e.defaultView,b=(0,a.ZP)(ln({default:{picker:{width:t,background:"#fff",borderRadius:"2px",boxShadow:"0 0 2px rgba(0,0,0,.3), 0 4px 8px rgba(0,0,0,.3)",boxSizing:"initial",fontFamily:"Menlo"},saturation:{width:"100%",paddingBottom:"55%",position:"relative",borderRadius:"2px 2px 0 0",overflow:"hidden"},Saturation:{radius:"2px 2px 0 0"},body:{padding:"16px 16px 12px"},controls:{display:"flex"},color:{width:"32px"},swatch:{marginTop:"6px",width:"16px",height:"16px",borderRadius:"8px",position:"relative",overflow:"hidden"},active:{absolute:"0px 0px 0px 0px",borderRadius:"8px",boxShadow:"inset 0 0 0 1px rgba(0,0,0,.1)",background:"rgba("+i.r+", "+i.g+", "+i.b+", "+i.a+")",zIndex:"2"},toggles:{flex:"1"},hue:{height:"10px",position:"relative",marginBottom:"8px"},Hue:{radius:"2px"},alpha:{height:"10px",position:"relative"},Alpha:{radius:"2px"}},disableAlpha:{color:{width:"22px"},alpha:{display:"none"},hue:{marginBottom:"0px"},swatch:{width:"10px",height:"10px",marginTop:"0px"}}},d),{disableAlpha:o});return r.createElement("div",{style:b.picker,className:"chrome-picker "+m},r.createElement("div",{style:b.saturation},r.createElement(On,{style:b.Saturation,hsl:s,hsv:l,pointer:Mo,onChange:n})),r.createElement("div",{style:b.body},r.createElement("div",{style:b.controls,className:"flexbox-fix"},r.createElement("div",{style:b.color},r.createElement("div",{style:b.swatch},r.createElement("div",{style:b.active}),r.createElement(u,{renderers:f}))),r.createElement("div",{style:b.toggles},r.createElement("div",{style:b.hue},r.createElement(k,{style:b.Hue,hsl:s,pointer:Oo,onChange:n})),r.createElement("div",{style:b.alpha},r.createElement(h,{style:b.Alpha,rgb:i,hsl:s,pointer:Oo,renderers:f,onChange:n})))),r.createElement(_o,{rgb:i,hsl:s,hex:c,view:g,onChange:n,disableAlpha:o})))};Po.propTypes={width:C().oneOfType([C().string,C().number]),disableAlpha:C().bool,styles:C().object,defaultView:C().oneOf(["hex","rgb","hsl"])},Po.defaultProps={width:225,disableAlpha:!1,styles:{}};kr(Po);var To=function(e){var t=e.color,n=e.onClick,o=void 0===n?function(){}:n,i=e.onSwatchHover,s=e.active,l=(0,a.ZP)({default:{color:{background:t,width:"15px",height:"15px",float:"left",marginRight:"5px",marginBottom:"5px",position:"relative",cursor:"pointer"},dot:{absolute:"5px 5px 5px 5px",background:br(t),borderRadius:"50%",opacity:"0"}},active:{dot:{opacity:"1"}},"color-#FFFFFF":{color:{boxShadow:"inset 0 0 0 1px #ddd"},dot:{background:"#000"}},transparent:{dot:{background:"#000"}}},{active:s,"color-#FFFFFF":"#FFFFFF"===t,transparent:"transparent"===t});return r.createElement(_r,{style:l.color,color:t,onClick:o,onHover:i,focusStyle:{boxShadow:"0 0 4px "+t}},r.createElement("div",{style:l.dot}))},No=function(e){var t=e.hex,n=e.rgb,o=e.onChange,i=(0,a.ZP)({default:{fields:{display:"flex",paddingBottom:"6px",paddingRight:"5px",position:"relative"},active:{position:"absolute",top:"6px",left:"5px",height:"9px",width:"9px",background:t},HEXwrap:{flex:"6",position:"relative"},HEXinput:{width:"80%",padding:"0px",paddingLeft:"20%",border:"none",outline:"none",background:"none",fontSize:"12px",color:"#333",height:"16px"},HEXlabel:{display:"none"},RGBwrap:{flex:"3",position:"relative"},RGBinput:{width:"70%",padding:"0px",paddingLeft:"30%",border:"none",outline:"none",background:"none",fontSize:"12px",color:"#333",height:"16px"},RGBlabel:{position:"absolute",top:"3px",left:"0px",lineHeight:"16px",textTransform:"uppercase",fontSize:"12px",color:"#999"}}}),s=function(e,t){e.r||e.g||e.b?o({r:e.r||n.r,g:e.g||n.g,b:e.b||n.b,source:"rgb"},t):o({hex:e.hex,source:"hex"},t)};return r.createElement("div",{style:i.fields,className:"flexbox-fix"},r.createElement("div",{style:i.active}),r.createElement(b,{style:{wrap:i.HEXwrap,input:i.HEXinput,label:i.HEXlabel},label:"hex",value:t,onChange:s}),r.createElement(b,{style:{wrap:i.RGBwrap,input:i.RGBinput,label:i.RGBlabel},label:"r",value:n.r,onChange:s}),r.createElement(b,{style:{wrap:i.RGBwrap,input:i.RGBinput,label:i.RGBlabel},label:"g",value:n.g,onChange:s}),r.createElement(b,{style:{wrap:i.RGBwrap,input:i.RGBinput,label:i.RGBlabel},label:"b",value:n.b,onChange:s}))},Ao=function(e){var t=e.onChange,n=e.onSwatchHover,o=e.colors,i=e.hex,s=e.rgb,l=e.styles,u=void 0===l?{}:l,c=e.className,f=void 0===c?"":c,p=(0,a.ZP)(ln({default:{Compact:{background:"#f6f6f6",radius:"4px"},compact:{paddingTop:"5px",paddingLeft:"5px",boxSizing:"initial",width:"240px"},clear:{clear:"both"}}},u)),d=function(e,n){e.hex?gr(e.hex)&&t({hex:e.hex,source:"hex"},n):t(e,n)};return r.createElement(cn,{style:p.Compact,styles:u},r.createElement("div",{style:p.compact,className:"compact-picker "+f},r.createElement("div",null,$a(o,(function(e){return r.createElement(To,{key:e,color:e,active:e.toLowerCase()===i,onClick:d,onSwatchHover:n})})),r.createElement("div",{style:p.clear})),r.createElement(No,{hex:i,rgb:s,onChange:d})))};Ao.propTypes={colors:C().arrayOf(C().string),styles:C().object},Ao.defaultProps={colors:["#4D4D4D","#999999","#FFFFFF","#F44E3B","#FE9200","#FCDC00","#DBDF00","#A4DD00","#68CCCA","#73D8FF","#AEA1FF","#FDA1FF","#333333","#808080","#cccccc","#D33115","#E27300","#FCC400","#B0BC00","#68BC00","#16A5A5","#009CE0","#7B64FF","#FA28FF","#000000","#666666","#B3B3B3","#9F0500","#C45100","#FB9E00","#808900","#194D33","#0C797D","#0062B1","#653294","#AB149E"],styles:{}};kr(Ao);var Zo=(0,a.tz)((function(e){var t=e.hover,n=e.color,o=e.onClick,i=e.onSwatchHover,s={position:"relative",zIndex:"2",outline:"2px solid #fff",boxShadow:"0 0 5px 2px rgba(0,0,0,0.25)"},l=(0,a.ZP)({default:{swatch:{width:"25px",height:"25px",fontSize:"0"}},hover:{swatch:s}},{hover:t});return r.createElement("div",{style:l.swatch},r.createElement(_r,{color:n,onClick:o,onHover:i,focusStyle:s}))})),Ro=function(e){var t=e.width,n=e.colors,o=e.onChange,i=e.onSwatchHover,s=e.triangle,l=e.styles,u=void 0===l?{}:l,c=e.className,f=void 0===c?"":c,p=(0,a.ZP)(ln({default:{card:{width:t,background:"#fff",border:"1px solid rgba(0,0,0,0.2)",boxShadow:"0 3px 12px rgba(0,0,0,0.15)",borderRadius:"4px",position:"relative",padding:"5px",display:"flex",flexWrap:"wrap"},triangle:{position:"absolute",border:"7px solid transparent",borderBottomColor:"#fff"},triangleShadow:{position:"absolute",border:"8px solid transparent",borderBottomColor:"rgba(0,0,0,0.15)"}},"hide-triangle":{triangle:{display:"none"},triangleShadow:{display:"none"}},"top-left-triangle":{triangle:{top:"-14px",left:"10px"},triangleShadow:{top:"-16px",left:"9px"}},"top-right-triangle":{triangle:{top:"-14px",right:"10px"},triangleShadow:{top:"-16px",right:"9px"}},"bottom-left-triangle":{triangle:{top:"35px",left:"10px",transform:"rotate(180deg)"},triangleShadow:{top:"37px",left:"9px",transform:"rotate(180deg)"}},"bottom-right-triangle":{triangle:{top:"35px",right:"10px",transform:"rotate(180deg)"},triangleShadow:{top:"37px",right:"9px",transform:"rotate(180deg)"}}},u),{"hide-triangle":"hide"===s,"top-left-triangle":"top-left"===s,"top-right-triangle":"top-right"===s,"bottom-left-triangle":"bottom-left"===s,"bottom-right-triangle":"bottom-right"===s}),d=function(e,t){return o({hex:e,source:"hex"},t)};return r.createElement("div",{style:p.card,className:"github-picker "+f},r.createElement("div",{style:p.triangleShadow}),r.createElement("div",{style:p.triangle}),$a(n,(function(e){return r.createElement(Zo,{color:e,key:e,onClick:d,onSwatchHover:i})})))};Ro.propTypes={width:C().oneOfType([C().string,C().number]),colors:C().arrayOf(C().string),triangle:C().oneOf(["hide","top-left","top-right","bottom-left","bottom-right"]),styles:C().object},Ro.defaultProps={width:200,colors:["#B80000","#DB3E00","#FCCB00","#008B02","#006B76","#1273DE","#004DCF","#5300EB","#EB9694","#FAD0C3","#FEF3BD","#C1E1C5","#BEDADC","#C4DEF6","#BED3F3","#D4C4FB"],triangle:"top-left",styles:{}};kr(Ro);var jo=function(e){var t=e.direction,n=(0,a.ZP)({default:{picker:{width:"18px",height:"18px",borderRadius:"50%",transform:"translate(-9px, -1px)",backgroundColor:"rgb(248, 248, 248)",boxShadow:"0 1px 4px 0 rgba(0, 0, 0, 0.37)"}},vertical:{picker:{transform:"translate(-3px, -9px)"}}},{vertical:"vertical"===t});return r.createElement("div",{style:n.picker})},Lo=Object.assign||function(e){for(var t=1;t.5});return r.createElement("div",{style:n.picker})},Bo=function(){var e=(0,a.ZP)({default:{triangle:{width:0,height:0,borderStyle:"solid",borderWidth:"4px 0 4px 6px",borderColor:"transparent transparent transparent #fff",position:"absolute",top:"1px",left:"1px"},triangleBorder:{width:0,height:0,borderStyle:"solid",borderWidth:"5px 0 5px 8px",borderColor:"transparent transparent transparent #555"},left:{Extend:"triangleBorder",transform:"translate(-13px, -4px)"},leftInside:{Extend:"triangle",transform:"translate(-8px, -5px)"},right:{Extend:"triangleBorder",transform:"translate(20px, -14px) rotate(180deg)"},rightInside:{Extend:"triangle",transform:"translate(-8px, -5px)"}}});return r.createElement("div",{style:e.pointer},r.createElement("div",{style:e.left},r.createElement("div",{style:e.leftInside})),r.createElement("div",{style:e.right},r.createElement("div",{style:e.rightInside})))},Ho=function(e){var t=e.onClick,n=e.label,o=e.children,i=e.active,s=(0,a.ZP)({default:{button:{backgroundImage:"linear-gradient(-180deg, #FFFFFF 0%, #E6E6E6 100%)",border:"1px solid #878787",borderRadius:"2px",height:"20px",boxShadow:"0 1px 0 0 #EAEAEA",fontSize:"14px",color:"#000",lineHeight:"20px",textAlign:"center",marginBottom:"10px",cursor:"pointer"}},active:{button:{boxShadow:"0 0 0 1px #878787"}}},{active:i});return r.createElement("div",{style:s.button,onClick:t},n||o)},zo=function(e){var t=e.rgb,n=e.currentColor,o=(0,a.ZP)({default:{swatches:{border:"1px solid #B3B3B3",borderBottom:"1px solid #F0F0F0",marginBottom:"2px",marginTop:"1px"},new:{height:"34px",background:"rgb("+t.r+","+t.g+", "+t.b+")",boxShadow:"inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 1px 0 #000"},current:{height:"34px",background:n,boxShadow:"inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 -1px 0 #000"},label:{fontSize:"14px",color:"#000",textAlign:"center"}}});return r.createElement("div",null,r.createElement("div",{style:o.label},"new"),r.createElement("div",{style:o.swatches},r.createElement("div",{style:o.new}),r.createElement("div",{style:o.current})),r.createElement("div",{style:o.label},"current"))},Uo=function(){function e(e,t){for(var n=0;n100&&(e.a=100),e.a/=100,t({h:o.h,s:o.s,l:o.l,a:e.a,source:"rgb"},r))};return r.createElement("div",{style:l.fields,className:"flexbox-fix"},r.createElement("div",{style:l.double},r.createElement(b,{style:{input:l.input,label:l.label},label:"hex",value:i.replace("#",""),onChange:u})),r.createElement("div",{style:l.single},r.createElement(b,{style:{input:l.input,label:l.label},label:"r",value:n.r,onChange:u,dragLabel:"true",dragMax:"255"})),r.createElement("div",{style:l.single},r.createElement(b,{style:{input:l.input,label:l.label},label:"g",value:n.g,onChange:u,dragLabel:"true",dragMax:"255"})),r.createElement("div",{style:l.single},r.createElement(b,{style:{input:l.input,label:l.label},label:"b",value:n.b,onChange:u,dragLabel:"true",dragMax:"255"})),r.createElement("div",{style:l.alpha},r.createElement(b,{style:{input:l.input,label:l.label},label:"a",value:Math.round(100*n.a),onChange:u,dragLabel:"true",dragMax:"100"})))},Ko=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.maxDate,a=t.excludeDates,o=t.excludeDateIntervals,i=t.includeDates,s=t.includeDateIntervals,l=t.filterDate;return dn(e,{minDate:n,maxDate:r})||a&&a.some((function(t){return Xt(e,t)}))||o&&o.some((function(t){var n=t.start,r=t.end;return ct.default(e,{start:n,end:r})}))||i&&!i.some((function(t){return Xt(e,t)}))||s&&!s.some((function(t){var n=t.start,r=t.end;return ct.default(e,{start:n,end:r})}))||l&&!l(Ft(e))||!1}function on(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.excludeDates,r=t.excludeDateIntervals;return r&&r.length>0?r.some((function(t){var n=t.start,r=t.end;return ct.default(e,{start:n,end:r})})):n&&n.some((function(t){return Xt(e,t)}))||!1}function sn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.maxDate,a=t.excludeDates,o=t.includeDates,i=t.filterDate;return dn(e,{minDate:Xe.default(n),maxDate:tt.default(r)})||a&&a.some((function(t){return Qt(e,t)}))||o&&!o.some((function(t){return Qt(e,t)}))||i&&!i(Ft(e))||!1}function ln(e,t,n,r){var a=je.default(e),o=Ze.default(e),i=je.default(t),s=Ze.default(t),l=je.default(r);return a===i&&a===l?o<=n&&n<=s:a=n||la:void 0}function un(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.maxDate,a=t.excludeDates,o=t.includeDates,i=t.filterDate;return dn(e,{minDate:n,maxDate:r})||a&&a.some((function(t){return Gt(e,t)}))||o&&!o.some((function(t){return Gt(e,t)}))||i&&!i(Ft(e))||!1}function cn(e,t,n){if(!he.default(t)||!he.default(n))return!1;var r=je.default(t),a=je.default(n);return r<=e&&a>=e}function fn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.maxDate,a=t.excludeDates,o=t.includeDates,i=t.filterDate,s=new Date(e,0,1);return dn(s,{minDate:Je.default(n),maxDate:nt.default(r)})||a&&a.some((function(e){return Vt(s,e)}))||o&&!o.some((function(e){return Vt(s,e)}))||i&&!i(Ft(s))||!1}function pn(e,t,n,r){var a=je.default(e),o=Re.default(e),i=je.default(t),s=Re.default(t),l=je.default(r);return a===i&&a===l?o<=n&&n<=s:a=n||la:void 0}function dn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.maxDate;return n&&qe.default(e,n)<0||r&&qe.default(e,r)>0}function hn(e,t){return t.some((function(t){return Pe.default(t)===Pe.default(e)&&Me.default(t)===Me.default(e)}))}function vn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.excludeTimes,r=t.includeTimes,a=t.filterTime;return n&&hn(e,n)||r&&!hn(e,r)||a&&!a(e)||!1}function mn(e,t){var n=t.minTime,r=t.maxTime;if(!n||!r)throw new Error("Both minTime and maxTime props required");var a,o=Ft(),i=Ye.default(Ie.default(o,Me.default(e)),Pe.default(e)),s=Ye.default(Ie.default(o,Me.default(n)),Pe.default(n)),l=Ye.default(Ie.default(o,Me.default(r)),Pe.default(r));try{a=!ct.default(i,{start:s,end:l})}catch(e){a=!1}return a}function gn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.includeDates,a=Ee.default(e,1);return n&&Ke.default(n,a)>0||r&&r.every((function(e){return Ke.default(e,a)>0}))||!1}function bn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.maxDate,r=t.includeDates,a=we.default(e,1);return n&&Ke.default(a,n)>0||r&&r.every((function(e){return Ke.default(a,e)>0}))||!1}function yn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.includeDates,a=_e.default(e,1);return n&&Ve.default(n,a)>0||r&&r.every((function(e){return Ve.default(e,a)>0}))||!1}function wn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.maxDate,r=t.includeDates,a=ke.default(e,1);return n&&Ve.default(a,n)>0||r&&r.every((function(e){return Ve.default(a,e)>0}))||!1}function xn(e){var t=e.minDate,n=e.includeDates;if(n&&t){var r=n.filter((function(e){return qe.default(e,t)>=0}));return Ue.default(r)}return n?Ue.default(n):t}function kn(e){var t=e.maxDate,n=e.includeDates;if(n&&t){var r=n.filter((function(e){return qe.default(e,t)<=0}));return We.default(r)}return n?We.default(n):t}function Dn(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"react-datepicker__day--highlighted",n=new Map,r=0,a=e.length;r0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"react-datepicker__day--holidays",n=new Map;return e.forEach((function(e){var r=e.date,a=e.holidayName;if(de.default(r)){var o=Yt(r,"MM.dd.yyyy"),i=n.get(o)||{};if(!("className"in i)||i.className!==t||(s=i.holidayNames,l=[a],s.length!==l.length||!s.every((function(e,t){return e===l[t]})))){var s,l;i.className=t;var u=i.holidayNames;i.holidayNames=u?[].concat(Pt(u),[a]):[a],n.set(o,i)}}})),n}function En(e,t,n,r,a){for(var o=a.length,i=[],s=0;s1&&void 0!==arguments[1]?arguments[1]:jt,n=Math.ceil(je.default(e)/t)*t;return{startPeriod:n-(t-1),endPeriod:n}}function On(e){var t=e.getSeconds(),n=e.getMilliseconds();return ft.default(e.getTime()-1e3*t-n)}function Mn(e,t,n,r){for(var a=[],o=0;o<2*t+1;o++){var i=e+t-o,s=!0;n&&(s=je.default(n)<=i),r&&s&&(s=je.default(r)>=i),s&&a.push(i)}return a}var Pn=function(e){Et(r,e);var n=Mt(r);function r(e){var a;wt(this,r),Dt(Ot(a=n.call(this,e)),"renderOptions",(function(){var e=a.props.year,t=a.state.yearsList.map((function(t){return fe.default.createElement("div",{className:e===t?"react-datepicker__year-option react-datepicker__year-option--selected_year":"react-datepicker__year-option",key:t,onClick:a.onChange.bind(Ot(a),t),"aria-selected":e===t?"true":void 0},e===t?fe.default.createElement("span",{className:"react-datepicker__year-option--selected"},"\u2713"):"",t)})),n=a.props.minDate?je.default(a.props.minDate):null,r=a.props.maxDate?je.default(a.props.maxDate):null;return r&&a.state.yearsList.find((function(e){return e===r}))||t.unshift(fe.default.createElement("div",{className:"react-datepicker__year-option",key:"upcoming",onClick:a.incrementYears},fe.default.createElement("a",{className:"react-datepicker__navigation react-datepicker__navigation--years react-datepicker__navigation--years-upcoming"}))),n&&a.state.yearsList.find((function(e){return e===n}))||t.push(fe.default.createElement("div",{className:"react-datepicker__year-option",key:"previous",onClick:a.decrementYears},fe.default.createElement("a",{className:"react-datepicker__navigation react-datepicker__navigation--years react-datepicker__navigation--years-previous"}))),t})),Dt(Ot(a),"onChange",(function(e){a.props.onChange(e)})),Dt(Ot(a),"handleClickOutside",(function(){a.props.onCancel()})),Dt(Ot(a),"shiftYears",(function(e){var t=a.state.yearsList.map((function(t){return t+e}));a.setState({yearsList:t})})),Dt(Ot(a),"incrementYears",(function(){return a.shiftYears(1)})),Dt(Ot(a),"decrementYears",(function(){return a.shiftYears(-1)}));var o=e.yearDropdownItemNumber,i=e.scrollableYearDropdown,s=o||(i?10:5);return a.state={yearsList:Mn(a.props.year,s,a.props.minDate,a.props.maxDate)},a.dropdownRef=t.createRef(),a}return kt(r,[{key:"componentDidMount",value:function(){var e=this.dropdownRef.current;if(e){var t=e.children?Array.from(e.children):null,n=t?t.find((function(e){return e.ariaSelected})):null;e.scrollTop=n?n.offsetTop+(n.clientHeight-e.clientHeight)/2:(e.scrollHeight-e.clientHeight)/2}}},{key:"render",value:function(){var e=pe.default({"react-datepicker__year-dropdown":!0,"react-datepicker__year-dropdown--scrollable":this.props.scrollableYearDropdown});return fe.default.createElement("div",{className:e,ref:this.dropdownRef},this.renderOptions())}}]),r}(fe.default.Component),Tn=ht.default(Pn),Nn=function(e){Et(n,e);var t=Mt(n);function n(){var e;wt(this,n);for(var r=arguments.length,a=new Array(r),o=0;o0?a.get(o).holidayNames.join(", "):""})),Dt(Ot(e),"getTabIndex",(function(t,n){var r=t||e.props.selected,a=n||e.props.preSelection;return e.isKeyboardSelected()||e.isSameDay(r)&&Xt(a,r)?0:-1})),Dt(Ot(e),"handleFocusDay",(function(){var t,n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},r=!1;0===e.getTabIndex()&&!n.isInputFocused&&e.isSameDay(e.props.preSelection)&&(document.activeElement&&document.activeElement!==document.body||(r=!0),e.props.inline&&!e.props.shouldFocusDayInline&&(r=!1),e.props.containerRef&&e.props.containerRef.current&&e.props.containerRef.current.contains(document.activeElement)&&document.activeElement.classList.contains("react-datepicker__day")&&(r=!0),e.props.monthShowsDuplicateDaysEnd&&e.isAfterMonth()&&(r=!1),e.props.monthShowsDuplicateDaysStart&&e.isBeforeMonth()&&(r=!1)),r&&(null===(t=e.dayEl.current)||void 0===t||t.focus({preventScroll:!0}))})),Dt(Ot(e),"renderDayContents",(function(){return e.props.monthShowsDuplicateDaysEnd&&e.isAfterMonth()||e.props.monthShowsDuplicateDaysStart&&e.isBeforeMonth()?null:e.props.renderDayContents?e.props.renderDayContents(Ne.default(e.props.day),e.props.day):Ne.default(e.props.day)})),Dt(Ot(e),"render",(function(){return fe.default.createElement("div",{ref:e.dayEl,className:e.getClassNames(e.props.day),onKeyDown:e.handleOnKeyDown,onClick:e.handleClick,onMouseEnter:e.handleMouseEnter,tabIndex:e.getTabIndex(),"aria-label":e.getAriaLabel(),role:"option",title:e.getTitle(),"aria-disabled":e.isDisabled(),"aria-current":e.isCurrentDay()?"date":void 0,"aria-selected":e.isSelected()||e.isInRange()},e.renderDayContents(),""!==e.getTitle()&&fe.default.createElement("span",{className:"holiday-overlay"},e.getTitle()))})),e}return kt(n,[{key:"componentDidMount",value:function(){this.handleFocusDay()}},{key:"componentDidUpdate",value:function(e){this.handleFocusDay(e)}}]),n}(fe.default.Component),Hn=function(e){Et(n,e);var t=Mt(n);function n(){var e;wt(this,n);for(var r=arguments.length,a=new Array(r),o=0;o=6,s=!n&&!e.isWeekInMonth(o);if(i||s){if(!e.props.peekNextMonth)break;a=!0}}return t})),Dt(Ot(e),"onMonthClick",(function(t,n){e.handleDayClick(Ut(Be.default(e.props.day,n)),t)})),Dt(Ot(e),"onMonthMouseEnter",(function(t){e.handleDayMouseEnter(Ut(Be.default(e.props.day,t)))})),Dt(Ot(e),"handleMonthNavigation",(function(t,n){e.isDisabled(n)||e.isExcluded(n)||(e.props.setPreSelection(n),e.MONTH_REFS[t].current&&e.MONTH_REFS[t].current.focus())})),Dt(Ot(e),"onMonthKeyDown",(function(t,n){var r=e.props,a=r.selected,o=r.preSelection,i=r.disabledKeyboardNavigation,s=r.showTwoColumnMonthYearPicker,l=r.showFourColumnMonthYearPicker,u=r.setPreSelection,c=t.key;if("Tab"!==c&&t.preventDefault(),!i){var f=Vn(l,s),p=Kn[f].verticalNavigationOffset,d=Kn[f].grid;switch(c){case"Enter":e.onMonthClick(t,n),u(a);break;case"ArrowRight":e.handleMonthNavigation(11===n?0:n+1,we.default(o,1));break;case"ArrowLeft":e.handleMonthNavigation(0===n?11:n-1,Ee.default(o,1));break;case"ArrowUp":e.handleMonthNavigation(d[0].includes(n)?n+12-p:n-p,Ee.default(o,p));break;case"ArrowDown":e.handleMonthNavigation(d[d.length-1].includes(n)?n-12+p:n+p,we.default(o,p))}}})),Dt(Ot(e),"onQuarterClick",(function(t,n){e.handleDayClick(qt(He.default(e.props.day,n)),t)})),Dt(Ot(e),"onQuarterMouseEnter",(function(t){e.handleDayMouseEnter(qt(He.default(e.props.day,t)))})),Dt(Ot(e),"handleQuarterNavigation",(function(t,n){e.isDisabled(n)||e.isExcluded(n)||(e.props.setPreSelection(n),e.QUARTER_REFS[t-1].current&&e.QUARTER_REFS[t-1].current.focus())})),Dt(Ot(e),"onQuarterKeyDown",(function(t,n){var r=t.key;if(!e.props.disabledKeyboardNavigation)switch(r){case"Enter":e.onQuarterClick(t,n),e.props.setPreSelection(e.props.selected);break;case"ArrowRight":e.handleQuarterNavigation(4===n?1:n+1,xe.default(e.props.preSelection,1));break;case"ArrowLeft":e.handleQuarterNavigation(1===n?4:n-1,Se.default(e.props.preSelection,1))}})),Dt(Ot(e),"getMonthClassNames",(function(t){var n=e.props,r=n.day,a=n.startDate,o=n.endDate,i=n.selected,s=n.minDate,l=n.maxDate,u=n.preSelection,c=n.monthClassName,f=n.excludeDates,p=n.includeDates,d=c?c(Be.default(r,t)):void 0,h=Be.default(r,t);return pe.default("react-datepicker__month-text","react-datepicker__month-".concat(t),d,{"react-datepicker__month-text--disabled":(s||l||f||p)&&sn(h,e.props),"react-datepicker__month-text--selected":e.isSelectedMonth(r,t,i),"react-datepicker__month-text--keyboard-selected":!e.props.disabledKeyboardNavigation&&Ze.default(u)===t,"react-datepicker__month-text--in-selecting-range":e.isInSelectingRangeMonth(t),"react-datepicker__month-text--in-range":ln(a,o,t,r),"react-datepicker__month-text--range-start":e.isRangeStartMonth(t),"react-datepicker__month-text--range-end":e.isRangeEndMonth(t),"react-datepicker__month-text--selecting-range-start":e.isSelectingMonthRangeStart(t),"react-datepicker__month-text--selecting-range-end":e.isSelectingMonthRangeEnd(t),"react-datepicker__month-text--today":e.isCurrentMonth(r,t)})})),Dt(Ot(e),"getTabIndex",(function(t){var n=Ze.default(e.props.preSelection);return e.props.disabledKeyboardNavigation||t!==n?"-1":"0"})),Dt(Ot(e),"getQuarterTabIndex",(function(t){var n=Re.default(e.props.preSelection);return e.props.disabledKeyboardNavigation||t!==n?"-1":"0"})),Dt(Ot(e),"getAriaLabel",(function(t){var n=e.props,r=n.chooseDayAriaLabelPrefix,a=void 0===r?"Choose":r,o=n.disabledDayAriaLabelPrefix,i=void 0===o?"Not available":o,s=n.day,l=Be.default(s,t),u=e.isDisabled(l)||e.isExcluded(l)?i:a;return"".concat(u," ").concat(Yt(l,"MMMM yyyy"))})),Dt(Ot(e),"getQuarterClassNames",(function(t){var n=e.props,r=n.day,a=n.startDate,o=n.endDate,i=n.selected,s=n.minDate,l=n.maxDate,u=n.preSelection;return pe.default("react-datepicker__quarter-text","react-datepicker__quarter-".concat(t),{"react-datepicker__quarter-text--disabled":(s||l)&&un(He.default(r,t),e.props),"react-datepicker__quarter-text--selected":e.isSelectedQuarter(r,t,i),"react-datepicker__quarter-text--keyboard-selected":Re.default(u)===t,"react-datepicker__quarter-text--in-selecting-range":e.isInSelectingRangeQuarter(t),"react-datepicker__quarter-text--in-range":pn(a,o,t,r),"react-datepicker__quarter-text--range-start":e.isRangeStartQuarter(t),"react-datepicker__quarter-text--range-end":e.isRangeEndQuarter(t)})})),Dt(Ot(e),"getMonthContent",(function(t){var n=e.props,r=n.showFullMonthYearPicker,a=n.renderMonthContent,o=n.locale,i=rn(t,o),s=nn(t,o);return a?a(t,i,s):r?s:i})),Dt(Ot(e),"getQuarterContent",(function(t){var n=e.props,r=n.renderQuarterContent,a=function(e,t){return Yt(He.default(Ft(),e),"QQQ",t)}(t,n.locale);return r?r(t,a):a})),Dt(Ot(e),"renderMonths",(function(){var t=e.props,n=t.showTwoColumnMonthYearPicker,r=t.showFourColumnMonthYearPicker,a=t.day,o=t.selected;return Kn[Vn(r,n)].grid.map((function(t,n){return fe.default.createElement("div",{className:"react-datepicker__month-wrapper",key:n},t.map((function(t,n){return fe.default.createElement("div",{ref:e.MONTH_REFS[t],key:n,onClick:function(n){e.onMonthClick(n,t)},onKeyDown:function(n){e.onMonthKeyDown(n,t)},onMouseEnter:function(){return e.onMonthMouseEnter(t)},tabIndex:e.getTabIndex(t),className:e.getMonthClassNames(t),role:"option","aria-label":e.getAriaLabel(t),"aria-current":e.isCurrentMonth(a,t)?"date":void 0,"aria-selected":e.isSelectedMonth(a,t,o)},e.getMonthContent(t))})))}))})),Dt(Ot(e),"renderQuarters",(function(){var t=e.props,n=t.day,r=t.selected;return fe.default.createElement("div",{className:"react-datepicker__quarter-wrapper"},[1,2,3,4].map((function(t,a){return fe.default.createElement("div",{key:a,ref:e.QUARTER_REFS[a],role:"option",onClick:function(n){e.onQuarterClick(n,t)},onKeyDown:function(n){e.onQuarterKeyDown(n,t)},onMouseEnter:function(){return e.onQuarterMouseEnter(t)},className:e.getQuarterClassNames(t),"aria-selected":e.isSelectedQuarter(n,t,r),tabIndex:e.getQuarterTabIndex(t),"aria-current":e.isCurrentQuarter(n,t)?"date":void 0},e.getQuarterContent(t))})))})),Dt(Ot(e),"getClassNames",(function(){var t=e.props,n=t.selectingDate,r=t.selectsStart,a=t.selectsEnd,o=t.showMonthYearPicker,i=t.showQuarterYearPicker;return pe.default("react-datepicker__month",{"react-datepicker__month--selecting-range":n&&(r||a)},{"react-datepicker__monthPicker":o},{"react-datepicker__quarterPicker":i})})),e}return kt(n,[{key:"render",value:function(){var e=this.props,t=e.showMonthYearPicker,n=e.showQuarterYearPicker,r=e.day,a=e.ariaLabelPrefix,o=void 0===a?"month ":a;return fe.default.createElement("div",{className:this.getClassNames(),onMouseLeave:this.handleMouseLeave,"aria-label":"".concat(o," ").concat(Yt(r,"yyyy-MM")),role:"listbox"},t?this.renderMonths():n?this.renderQuarters():this.renderWeeks())}}]),n}(fe.default.Component),Gn=function(e){Et(n,e);var t=Mt(n);function n(){var e;wt(this,n);for(var r=arguments.length,a=new Array(r),o=0;o0&&void 0!==arguments[0]?arguments[0]:{}).className||"").split(/\s+/);return er.some((function(t){return e.indexOf(t)>=0}))})(e.target)&&r.props.onDropdownFocus()})),Dt(Ot(r),"getDateInView",(function(){var e=r.props,t=e.preSelection,n=e.selected,a=e.openToDate,o=xn(r.props),i=kn(r.props),s=Ft();return a||n||t||(o&&ut.default(s,o)?o:i&<.default(s,i)?i:s)})),Dt(Ot(r),"increaseMonth",(function(){r.setState((function(e){var t=e.date;return{date:we.default(t,1)}}),(function(){return r.handleMonthChange(r.state.date)}))})),Dt(Ot(r),"decreaseMonth",(function(){r.setState((function(e){var t=e.date;return{date:Ee.default(t,1)}}),(function(){return r.handleMonthChange(r.state.date)}))})),Dt(Ot(r),"handleDayClick",(function(e,t,n){r.props.onSelect(e,t,n),r.props.setPreSelection&&r.props.setPreSelection(e)})),Dt(Ot(r),"handleDayMouseEnter",(function(e){r.setState({selectingDate:e}),r.props.onDayMouseEnter&&r.props.onDayMouseEnter(e)})),Dt(Ot(r),"handleMonthMouseLeave",(function(){r.setState({selectingDate:null}),r.props.onMonthMouseLeave&&r.props.onMonthMouseLeave()})),Dt(Ot(r),"handleYearMouseEnter",(function(e,t){r.setState({selectingDate:ze.default(Ft(),t)}),r.props.onYearMouseEnter&&r.props.onYearMouseEnter(e,t)})),Dt(Ot(r),"handleYearMouseLeave",(function(e,t){r.props.onYearMouseLeave&&r.props.onYearMouseLeave(e,t)})),Dt(Ot(r),"handleYearChange",(function(e){r.props.onYearChange&&(r.props.onYearChange(e),r.setState({isRenderAriaLiveMessage:!0})),r.props.adjustDateOnChange&&(r.props.onSelect&&r.props.onSelect(e),r.props.setOpen&&r.props.setOpen(!0)),r.props.setPreSelection&&r.props.setPreSelection(e)})),Dt(Ot(r),"handleMonthChange",(function(e){r.handleCustomMonthChange(e),r.props.adjustDateOnChange&&(r.props.onSelect&&r.props.onSelect(e),r.props.setOpen&&r.props.setOpen(!0)),r.props.setPreSelection&&r.props.setPreSelection(e)})),Dt(Ot(r),"handleCustomMonthChange",(function(e){r.props.onMonthChange&&(r.props.onMonthChange(e),r.setState({isRenderAriaLiveMessage:!0}))})),Dt(Ot(r),"handleMonthYearChange",(function(e){r.handleYearChange(e),r.handleMonthChange(e)})),Dt(Ot(r),"changeYear",(function(e){r.setState((function(t){var n=t.date;return{date:ze.default(n,e)}}),(function(){return r.handleYearChange(r.state.date)}))})),Dt(Ot(r),"changeMonth",(function(e){r.setState((function(t){var n=t.date;return{date:Be.default(n,e)}}),(function(){return r.handleMonthChange(r.state.date)}))})),Dt(Ot(r),"changeMonthYear",(function(e){r.setState((function(t){var n=t.date;return{date:ze.default(Be.default(n,Ze.default(e)),je.default(e))}}),(function(){return r.handleMonthYearChange(r.state.date)}))})),Dt(Ot(r),"header",(function(){var e=zt(arguments.length>0&&void 0!==arguments[0]?arguments[0]:r.state.date,r.props.locale,r.props.calendarStartDay),t=[];return r.props.showWeekNumbers&&t.push(fe.default.createElement("div",{key:"W",className:"react-datepicker__day-name"},r.props.weekLabel||"#")),t.concat([0,1,2,3,4,5,6].map((function(t){var n=be.default(e,t),a=r.formatWeekday(n,r.props.locale),o=r.props.weekDayClassName?r.props.weekDayClassName(n):void 0;return fe.default.createElement("div",{key:t,className:pe.default("react-datepicker__day-name",o)},a)})))})),Dt(Ot(r),"formatWeekday",(function(e,t){return r.props.formatWeekDay?function(e,t,n){return t(Yt(e,"EEEE",n))}(e,r.props.formatWeekDay,t):r.props.useWeekdaysShort?function(e,t){return Yt(e,"EEE",t)}(e,t):function(e,t){return Yt(e,"EEEEEE",t)}(e,t)})),Dt(Ot(r),"decreaseYear",(function(){r.setState((function(e){var t=e.date;return{date:_e.default(t,r.props.showYearPicker?r.props.yearItemNumber:1)}}),(function(){return r.handleYearChange(r.state.date)}))})),Dt(Ot(r),"clearSelectingDate",(function(){r.setState({selectingDate:null})})),Dt(Ot(r),"renderPreviousButton",(function(){if(!r.props.renderCustomHeader){var e;switch(!0){case r.props.showMonthYearPicker:e=yn(r.state.date,r.props);break;case r.props.showYearPicker:e=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.minDate,r=t.yearItemNumber,a=void 0===r?jt:r,o=_n(Wt(_e.default(e,a)),a).endPeriod,i=n&&je.default(n);return i&&i>o||!1}(r.state.date,r.props);break;default:e=gn(r.state.date,r.props)}if((r.props.forceShowMonthNavigation||r.props.showDisabledMonthNavigation||!e)&&!r.props.showTimeSelectOnly){var t=["react-datepicker__navigation","react-datepicker__navigation--previous"],n=r.decreaseMonth;(r.props.showMonthYearPicker||r.props.showQuarterYearPicker||r.props.showYearPicker)&&(n=r.decreaseYear),e&&r.props.showDisabledMonthNavigation&&(t.push("react-datepicker__navigation--previous--disabled"),n=null);var a=r.props.showMonthYearPicker||r.props.showQuarterYearPicker||r.props.showYearPicker,o=r.props,i=o.previousMonthButtonLabel,s=o.previousYearButtonLabel,l=r.props,u=l.previousMonthAriaLabel,c=void 0===u?"string"==typeof i?i:"Previous Month":u,f=l.previousYearAriaLabel,p=void 0===f?"string"==typeof s?s:"Previous Year":f;return fe.default.createElement("button",{type:"button",className:t.join(" "),onClick:n,onKeyDown:r.props.handleOnKeyDown,"aria-label":a?p:c},fe.default.createElement("span",{className:["react-datepicker__navigation-icon","react-datepicker__navigation-icon--previous"].join(" ")},a?r.props.previousYearButtonLabel:r.props.previousMonthButtonLabel))}}})),Dt(Ot(r),"increaseYear",(function(){r.setState((function(e){var t=e.date;return{date:ke.default(t,r.props.showYearPicker?r.props.yearItemNumber:1)}}),(function(){return r.handleYearChange(r.state.date)}))})),Dt(Ot(r),"renderNextButton",(function(){if(!r.props.renderCustomHeader){var e;switch(!0){case r.props.showMonthYearPicker:e=wn(r.state.date,r.props);break;case r.props.showYearPicker:e=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.maxDate,r=t.yearItemNumber,a=void 0===r?jt:r,o=_n(ke.default(e,a),a).startPeriod,i=n&&je.default(n);return i&&i0&&void 0!==arguments[0]?arguments[0]:r.state.date,t=["react-datepicker__current-month"];return r.props.showYearDropdown&&t.push("react-datepicker__current-month--hasYearDropdown"),r.props.showMonthDropdown&&t.push("react-datepicker__current-month--hasMonthDropdown"),r.props.showMonthYearDropdown&&t.push("react-datepicker__current-month--hasMonthYearDropdown"),fe.default.createElement("div",{className:t.join(" ")},Yt(e,r.props.dateFormat,r.props.locale))})),Dt(Ot(r),"renderYearDropdown",(function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(r.props.showYearDropdown&&!e)return fe.default.createElement(Nn,{adjustDateOnChange:r.props.adjustDateOnChange,date:r.state.date,onSelect:r.props.onSelect,setOpen:r.props.setOpen,dropdownMode:r.props.dropdownMode,onChange:r.changeYear,minDate:r.props.minDate,maxDate:r.props.maxDate,year:je.default(r.state.date),scrollableYearDropdown:r.props.scrollableYearDropdown,yearDropdownItemNumber:r.props.yearDropdownItemNumber})})),Dt(Ot(r),"renderMonthDropdown",(function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(r.props.showMonthDropdown&&!e)return fe.default.createElement(Rn,{dropdownMode:r.props.dropdownMode,locale:r.props.locale,onChange:r.changeMonth,month:Ze.default(r.state.date),useShortMonthInDropdown:r.props.useShortMonthInDropdown})})),Dt(Ot(r),"renderMonthYearDropdown",(function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(r.props.showMonthYearDropdown&&!e)return fe.default.createElement(Yn,{dropdownMode:r.props.dropdownMode,locale:r.props.locale,dateFormat:r.props.dateFormat,onChange:r.changeMonthYear,minDate:r.props.minDate,maxDate:r.props.maxDate,date:r.state.date,scrollableMonthYearDropdown:r.props.scrollableMonthYearDropdown})})),Dt(Ot(r),"handleTodayButtonClick",(function(e){r.props.onSelect(Kt(),e),r.props.setPreSelection&&r.props.setPreSelection(Kt())})),Dt(Ot(r),"renderTodayButton",(function(){if(r.props.todayButton&&!r.props.showTimeSelectOnly)return fe.default.createElement("div",{className:"react-datepicker__today-button",onClick:function(e){return r.handleTodayButtonClick(e)}},r.props.todayButton)})),Dt(Ot(r),"renderDefaultHeader",(function(e){var t=e.monthDate,n=e.i;return fe.default.createElement("div",{className:"react-datepicker__header ".concat(r.props.showTimeSelect?"react-datepicker__header--has-time-select":"")},r.renderCurrentMonth(t),fe.default.createElement("div",{className:"react-datepicker__header__dropdown react-datepicker__header__dropdown--".concat(r.props.dropdownMode),onFocus:r.handleDropdownFocus},r.renderMonthDropdown(0!==n),r.renderMonthYearDropdown(0!==n),r.renderYearDropdown(0!==n)),fe.default.createElement("div",{className:"react-datepicker__day-names"},r.header(t)))})),Dt(Ot(r),"renderCustomHeader",(function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.monthDate,n=e.i;if(r.props.showTimeSelect&&!r.state.monthContainer||r.props.showTimeSelectOnly)return null;var a=gn(r.state.date,r.props),o=bn(r.state.date,r.props),i=yn(r.state.date,r.props),s=wn(r.state.date,r.props),l=!r.props.showMonthYearPicker&&!r.props.showQuarterYearPicker&&!r.props.showYearPicker;return fe.default.createElement("div",{className:"react-datepicker__header react-datepicker__header--custom",onFocus:r.props.onDropdownFocus},r.props.renderCustomHeader(bt(bt({},r.state),{},{customHeaderCount:n,monthDate:t,changeMonth:r.changeMonth,changeYear:r.changeYear,decreaseMonth:r.decreaseMonth,increaseMonth:r.increaseMonth,decreaseYear:r.decreaseYear,increaseYear:r.increaseYear,prevMonthButtonDisabled:a,nextMonthButtonDisabled:o,prevYearButtonDisabled:i,nextYearButtonDisabled:s})),l&&fe.default.createElement("div",{className:"react-datepicker__day-names"},r.header(t)))})),Dt(Ot(r),"renderYearHeader",(function(){var e=r.state.date,t=r.props,n=t.showYearPicker,a=_n(e,t.yearItemNumber),o=a.startPeriod,i=a.endPeriod;return fe.default.createElement("div",{className:"react-datepicker__header react-datepicker-year-header"},n?"".concat(o," - ").concat(i):je.default(e))})),Dt(Ot(r),"renderHeader",(function(e){switch(!0){case void 0!==r.props.renderCustomHeader:return r.renderCustomHeader(e);case r.props.showMonthYearPicker||r.props.showQuarterYearPicker||r.props.showYearPicker:return r.renderYearHeader(e);default:return r.renderDefaultHeader(e)}})),Dt(Ot(r),"renderMonths",(function(){var e;if(!r.props.showTimeSelectOnly&&!r.props.showYearPicker){for(var t=[],n=r.props.showPreviousMonths?r.props.monthsShown-1:0,a=Ee.default(r.state.date,n),o=null!==(e=r.props.monthSelectedIn)&&void 0!==e?e:n,i=0;i0;t.push(fe.default.createElement("div",{key:u,ref:function(e){r.monthContainer=e},className:"react-datepicker__month-container"},r.renderHeader({monthDate:l,i:i}),fe.default.createElement(Qn,{chooseDayAriaLabelPrefix:r.props.chooseDayAriaLabelPrefix,disabledDayAriaLabelPrefix:r.props.disabledDayAriaLabelPrefix,weekAriaLabelPrefix:r.props.weekAriaLabelPrefix,ariaLabelPrefix:r.props.monthAriaLabelPrefix,onChange:r.changeMonthYear,day:l,dayClassName:r.props.dayClassName,calendarStartDay:r.props.calendarStartDay,monthClassName:r.props.monthClassName,onDayClick:r.handleDayClick,handleOnKeyDown:r.props.handleOnDayKeyDown,onDayMouseEnter:r.handleDayMouseEnter,onMouseLeave:r.handleMonthMouseLeave,onWeekSelect:r.props.onWeekSelect,orderInDisplay:i,formatWeekNumber:r.props.formatWeekNumber,locale:r.props.locale,minDate:r.props.minDate,maxDate:r.props.maxDate,excludeDates:r.props.excludeDates,excludeDateIntervals:r.props.excludeDateIntervals,highlightDates:r.props.highlightDates,holidays:r.props.holidays,selectingDate:r.state.selectingDate,includeDates:r.props.includeDates,includeDateIntervals:r.props.includeDateIntervals,inline:r.props.inline,shouldFocusDayInline:r.props.shouldFocusDayInline,fixedHeight:r.props.fixedHeight,filterDate:r.props.filterDate,preSelection:r.props.preSelection,setPreSelection:r.props.setPreSelection,selected:r.props.selected,selectsStart:r.props.selectsStart,selectsEnd:r.props.selectsEnd,selectsRange:r.props.selectsRange,selectsDisabledDaysInRange:r.props.selectsDisabledDaysInRange,showWeekNumbers:r.props.showWeekNumbers,startDate:r.props.startDate,endDate:r.props.endDate,peekNextMonth:r.props.peekNextMonth,setOpen:r.props.setOpen,shouldCloseOnSelect:r.props.shouldCloseOnSelect,renderDayContents:r.props.renderDayContents,renderMonthContent:r.props.renderMonthContent,renderQuarterContent:r.props.renderQuarterContent,renderYearContent:r.props.renderYearContent,disabledKeyboardNavigation:r.props.disabledKeyboardNavigation,showMonthYearPicker:r.props.showMonthYearPicker,showFullMonthYearPicker:r.props.showFullMonthYearPicker,showTwoColumnMonthYearPicker:r.props.showTwoColumnMonthYearPicker,showFourColumnMonthYearPicker:r.props.showFourColumnMonthYearPicker,showYearPicker:r.props.showYearPicker,showQuarterYearPicker:r.props.showQuarterYearPicker,isInputFocused:r.props.isInputFocused,containerRef:r.containerRef,monthShowsDuplicateDaysEnd:c,monthShowsDuplicateDaysStart:f})))}return t}})),Dt(Ot(r),"renderYears",(function(){if(!r.props.showTimeSelectOnly)return r.props.showYearPicker?fe.default.createElement("div",{className:"react-datepicker__year--container"},r.renderHeader(),fe.default.createElement(Xn,Ct({onDayClick:r.handleDayClick,selectingDate:r.state.selectingDate,clearSelectingDate:r.clearSelectingDate,date:r.state.date},r.props,{onYearMouseEnter:r.handleYearMouseEnter,onYearMouseLeave:r.handleYearMouseLeave}))):void 0})),Dt(Ot(r),"renderTimeSection",(function(){if(r.props.showTimeSelect&&(r.state.monthContainer||r.props.showTimeSelectOnly))return fe.default.createElement(Gn,{selected:r.props.selected,openToDate:r.props.openToDate,onChange:r.props.onTimeChange,timeClassName:r.props.timeClassName,format:r.props.timeFormat,includeTimes:r.props.includeTimes,intervals:r.props.timeIntervals,minTime:r.props.minTime,maxTime:r.props.maxTime,excludeTimes:r.props.excludeTimes,filterTime:r.props.filterTime,timeCaption:r.props.timeCaption,todayButton:r.props.todayButton,showMonthDropdown:r.props.showMonthDropdown,showMonthYearDropdown:r.props.showMonthYearDropdown,showYearDropdown:r.props.showYearDropdown,withPortal:r.props.withPortal,monthRef:r.state.monthContainer,injectTimes:r.props.injectTimes,locale:r.props.locale,handleOnKeyDown:r.props.handleOnKeyDown,showTimeSelectOnly:r.props.showTimeSelectOnly})})),Dt(Ot(r),"renderInputTimeSection",(function(){var e=new Date(r.props.selected),t=It(e)&&Boolean(r.props.selected)?"".concat(Sn(e.getHours()),":").concat(Sn(e.getMinutes())):"";if(r.props.showTimeInput)return fe.default.createElement($n,{date:e,timeString:t,timeInputLabel:r.props.timeInputLabel,onChange:r.props.onTimeChange,customTimeInput:r.props.customTimeInput})})),Dt(Ot(r),"renderAriaLiveRegion",(function(){var e,t=_n(r.state.date,r.props.yearItemNumber),n=t.startPeriod,a=t.endPeriod;return e=r.props.showYearPicker?"".concat(n," - ").concat(a):r.props.showMonthYearPicker||r.props.showQuarterYearPicker?je.default(r.state.date):"".concat(nn(Ze.default(r.state.date),r.props.locale)," ").concat(je.default(r.state.date)),fe.default.createElement("span",{role:"alert","aria-live":"polite",className:"react-datepicker__aria-live"},r.state.isRenderAriaLiveMessage&&e)})),Dt(Ot(r),"renderChildren",(function(){if(r.props.children)return fe.default.createElement("div",{className:"react-datepicker__children-container"},r.props.children)})),r.containerRef=fe.default.createRef(),r.state={date:r.getDateInView(),selectingDate:null,monthContainer:null,isRenderAriaLiveMessage:!1},r}return kt(n,[{key:"componentDidMount",value:function(){var e=this;this.props.showTimeSelect&&(this.assignMonthContainer=void e.setState({monthContainer:e.monthContainer}))}},{key:"componentDidUpdate",value:function(e){var t=this;if(!this.props.preSelection||Xt(this.props.preSelection,e.preSelection)&&this.props.monthSelectedIn===e.monthSelectedIn)this.props.openToDate&&!Xt(this.props.openToDate,e.openToDate)&&this.setState({date:this.props.openToDate});else{var n=!Qt(this.state.date,this.props.preSelection);this.setState({date:this.props.preSelection},(function(){return n&&t.handleCustomMonthChange(t.state.date)}))}}},{key:"render",value:function(){var e=this.props.container||Jn;return fe.default.createElement("div",{ref:this.containerRef},fe.default.createElement(e,{className:pe.default("react-datepicker",this.props.className,{"react-datepicker--time-only":this.props.showTimeSelectOnly}),showPopperArrow:this.props.showPopperArrow,arrowProps:this.props.arrowProps},this.renderAriaLiveRegion(),this.renderPreviousButton(),this.renderNextButton(),this.renderMonths(),this.renderYears(),this.renderTodayButton(),this.renderTimeSection(),this.renderInputTimeSection(),this.renderChildren()))}}],[{key:"defaultProps",get:function(){return{onDropdownFocus:function(){},monthsShown:1,forceShowMonthNavigation:!1,timeCaption:"Time",previousYearButtonLabel:"Previous Year",nextYearButtonLabel:"Next Year",previousMonthButtonLabel:"Previous Month",nextMonthButtonLabel:"Next Month",customTimeInput:null,yearItemNumber:jt}}}]),n}(fe.default.Component),nr=function(e){var t=e.icon,n=e.className,r="react-datepicker__calendar-icon";return fe.default.isValidElement(t)?fe.default.cloneElement(t,{className:"".concat(t.props.className||""," ").concat(r," ").concat(n)}):"string"==typeof t?fe.default.createElement("i",{className:"".concat(r," ").concat(t," ").concat(n),"aria-hidden":"true"}):fe.default.createElement("svg",{className:"".concat(r," ").concat(n),xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 448 512"},fe.default.createElement("path",{d:"M96 32V64H48C21.5 64 0 85.5 0 112v48H448V112c0-26.5-21.5-48-48-48H352V32c0-17.7-14.3-32-32-32s-32 14.3-32 32V64H160V32c0-17.7-14.3-32-32-32S96 14.3 96 32zM448 192H0V464c0 26.5 21.5 48 48 48H400c26.5 0 48-21.5 48-48V192z"}))};nr.defaultProps={className:""};var rr=nr,ar=function(e){Et(n,e);var t=Mt(n);function n(e){var r;return wt(this,n),(r=t.call(this,e)).el=document.createElement("div"),r}return kt(n,[{key:"componentDidMount",value:function(){this.portalRoot=(this.props.portalHost||document).getElementById(this.props.portalId),this.portalRoot||(this.portalRoot=document.createElement("div"),this.portalRoot.setAttribute("id",this.props.portalId),(this.props.portalHost||document.body).appendChild(this.portalRoot)),this.portalRoot.appendChild(this.el)}},{key:"componentWillUnmount",value:function(){this.portalRoot.removeChild(this.el)}},{key:"render",value:function(){return vt.default.createPortal(this.props.children,this.el)}}]),n}(fe.default.Component),or=function(e){return!e.disabled&&-1!==e.tabIndex},ir=function(e){Et(n,e);var t=Mt(n);function n(e){var r;return wt(this,n),Dt(Ot(r=t.call(this,e)),"getTabChildren",(function(){return Array.prototype.slice.call(r.tabLoopRef.current.querySelectorAll("[tabindex], a, button, input, select, textarea"),1,-1).filter(or)})),Dt(Ot(r),"handleFocusStart",(function(){var e=r.getTabChildren();e&&e.length>1&&e[e.length-1].focus()})),Dt(Ot(r),"handleFocusEnd",(function(){var e=r.getTabChildren();e&&e.length>1&&e[0].focus()})),r.tabLoopRef=fe.default.createRef(),r}return kt(n,[{key:"render",value:function(){return this.props.enableTabLoop?fe.default.createElement("div",{className:"react-datepicker__tab-loop",ref:this.tabLoopRef},fe.default.createElement("div",{className:"react-datepicker__tab-loop__start",tabIndex:"0",onFocus:this.handleFocusStart}),this.props.children,fe.default.createElement("div",{className:"react-datepicker__tab-loop__end",tabIndex:"0",onFocus:this.handleFocusEnd})):this.props.children}}],[{key:"defaultProps",get:function(){return{enableTabLoop:!0}}}]),n}(fe.default.Component),sr=function(e){Et(n,e);var t=Mt(n);function n(){return wt(this,n),t.apply(this,arguments)}return kt(n,[{key:"render",value:function(){var e,t=this.props,n=t.className,r=t.wrapperClassName,a=t.hidePopper,o=t.popperComponent,i=t.popperModifiers,s=t.popperPlacement,l=t.popperProps,u=t.targetComponent,c=t.enableTabLoop,f=t.popperOnKeyDown,p=t.portalId,d=t.portalHost;if(!a){var h=pe.default("react-datepicker-popper",n);e=fe.default.createElement(le.Popper,Ct({modifiers:i,placement:s},l),(function(e){var t=e.ref,n=e.style,r=e.placement,a=e.arrowProps;return fe.default.createElement(ir,{enableTabLoop:c},fe.default.createElement("div",{ref:t,style:n,className:h,"data-placement":r,onKeyDown:f},fe.default.cloneElement(o,{arrowProps:a})))}))}this.props.popperContainer&&(e=fe.default.createElement(this.props.popperContainer,{},e)),p&&!a&&(e=fe.default.createElement(ar,{portalId:p,portalHost:d},e));var v=pe.default("react-datepicker-wrapper",r);return fe.default.createElement(le.Manager,{className:"react-datepicker-manager"},fe.default.createElement(le.Reference,null,(function(e){var t=e.ref;return fe.default.createElement("div",{ref:t,className:v},u)})),e)}}],[{key:"defaultProps",get:function(){return{hidePopper:!0,popperModifiers:[],popperProps:{},popperPlacement:"bottom-start"}}}]),n}(fe.default.Component),lr="react-datepicker-ignore-onclickoutside",ur=ht.default(tr),cr="Date input not valid.",fr=function(e){Et(n,e);var t=Mt(n);function n(e){var r;return wt(this,n),Dt(Ot(r=t.call(this,e)),"getPreSelection",(function(){return r.props.openToDate?r.props.openToDate:r.props.selectsEnd&&r.props.startDate?r.props.startDate:r.props.selectsStart&&r.props.endDate?r.props.endDate:Ft()})),Dt(Ot(r),"calcInitialState",(function(){var e,t,n=null===(e=r.props.holidays)||void 0===e?void 0:e.reduce((function(e,t){var n=new Date(t.date);return he.default(n)?[].concat(Pt(e),[bt(bt({},t),{},{date:n})]):e}),[]),a=r.getPreSelection(),o=xn(r.props),i=kn(r.props),s=o&&ut.default(a,Qe.default(o))?o:i&<.default(a,et.default(i))?i:a;return{open:r.props.startOpen||!1,preventFocus:!1,preSelection:null!==(t=r.props.selectsRange?r.props.startDate:r.props.selected)&&void 0!==t?t:s,highlightDates:Dn(r.props.highlightDates),holidays:Cn(n),focused:!1,shouldFocusDayInline:!1,isRenderAriaLiveMessage:!1}})),Dt(Ot(r),"clearPreventFocusTimeout",(function(){r.preventFocusTimeout&&clearTimeout(r.preventFocusTimeout)})),Dt(Ot(r),"setFocus",(function(){r.input&&r.input.focus&&r.input.focus({preventScroll:!0})})),Dt(Ot(r),"setBlur",(function(){r.input&&r.input.blur&&r.input.blur(),r.cancelFocusInput()})),Dt(Ot(r),"setOpen",(function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];r.setState({open:e,preSelection:e&&r.state.open?r.state.preSelection:r.calcInitialState().preSelection,lastPreSelectChange:dr},(function(){e||r.setState((function(e){return{focused:!!t&&e.focused}}),(function(){!t&&r.setBlur(),r.setState({inputValue:null})}))}))})),Dt(Ot(r),"inputOk",(function(){return de.default(r.state.preSelection)})),Dt(Ot(r),"isCalendarOpen",(function(){return void 0===r.props.open?r.state.open&&!r.props.disabled&&!r.props.readOnly:r.props.open})),Dt(Ot(r),"handleFocus",(function(e){r.state.preventFocus||(r.props.onFocus(e),r.props.preventOpenOnFocus||r.props.readOnly||r.setOpen(!0)),r.setState({focused:!0})})),Dt(Ot(r),"sendFocusBackToInput",(function(){r.preventFocusTimeout&&r.clearPreventFocusTimeout(),r.setState({preventFocus:!0},(function(){r.preventFocusTimeout=setTimeout((function(){r.setFocus(),r.setState({preventFocus:!1})}))}))})),Dt(Ot(r),"cancelFocusInput",(function(){clearTimeout(r.inputFocusTimeout),r.inputFocusTimeout=null})),Dt(Ot(r),"deferFocusInput",(function(){r.cancelFocusInput(),r.inputFocusTimeout=setTimeout((function(){return r.setFocus()}),1)})),Dt(Ot(r),"handleDropdownFocus",(function(){r.cancelFocusInput()})),Dt(Ot(r),"handleBlur",(function(e){(!r.state.open||r.props.withPortal||r.props.showTimeInput)&&r.props.onBlur(e),r.setState({focused:!1})})),Dt(Ot(r),"handleCalendarClickOutside",(function(e){r.props.inline||r.setOpen(!1),r.props.onClickOutside(e),r.props.withPortal&&e.preventDefault()})),Dt(Ot(r),"handleChange",(function(){for(var e=arguments.length,t=new Array(e),n=0;n0&&(c=pt.default(o,i.slice(0,o.length),new Date)),It(c)||(c=new Date(o))),It(c)&&p?c:null));r.props.showTimeSelectOnly&&r.props.selected&&!Xt(d,r.props.selected)&&(d=null==d?mt.default(r.props.selected,{hours:Pe.default(r.props.selected),minutes:Me.default(r.props.selected),seconds:Oe.default(r.props.selected)}):mt.default(r.props.selected,{hours:Pe.default(d),minutes:Me.default(d),seconds:Oe.default(d)})),!d&&a.target.value||r.setSelected(d,a,!0)}})),Dt(Ot(r),"handleSelect",(function(e,t,n){if(r.props.shouldCloseOnSelect&&!r.props.showTimeSelect&&r.sendFocusBackToInput(),r.props.onChangeRaw&&r.props.onChangeRaw(t),r.setSelected(e,t,!1,n),r.props.showDateSelect&&r.setState({isRenderAriaLiveMessage:!0}),!r.props.shouldCloseOnSelect||r.props.showTimeSelect)r.setPreSelection(e);else if(!r.props.inline){r.props.selectsRange||r.setOpen(!1);var a=r.props,o=a.startDate,i=a.endDate;!o||i||ut.default(e,o)||r.setOpen(!1)}})),Dt(Ot(r),"setSelected",(function(e,t,n,a){var o=e;if(r.props.showYearPicker){if(null!==o&&fn(je.default(o),r.props))return}else if(r.props.showMonthYearPicker){if(null!==o&&sn(o,r.props))return}else if(null!==o&&an(o,r.props))return;var i=r.props,s=i.onChange,l=i.selectsRange,u=i.startDate,c=i.endDate;if(!$t(r.props.selected,o)||r.props.allowSameDay||l)if(null!==o&&(!r.props.selected||n&&(r.props.showTimeSelect||r.props.showTimeSelectOnly||r.props.showTimeInput)||(o=Ht(o,{hour:Pe.default(r.props.selected),minute:Me.default(r.props.selected),second:Oe.default(r.props.selected)})),r.props.inline||r.setState({preSelection:o}),r.props.focusSelectedMonth||r.setState({monthSelectedIn:a})),l){var f=u&&c;u||c?u&&!c&&(ut.default(o,u)?s([o,null],t):s([u,o],t)):s([o,null],t),f&&s([o,null],t)}else s(o,t);n||(r.props.onSelect(o,t),r.setState({inputValue:null}))})),Dt(Ot(r),"setPreSelection",(function(e){var t=void 0!==r.props.minDate,n=void 0!==r.props.maxDate,a=!0;if(e){var o=Qe.default(e);if(t&&n)a=Jt(e,r.props.minDate,r.props.maxDate);else if(t){var i=Qe.default(r.props.minDate);a=lt.default(e,i)||$t(o,i)}else if(n){var s=et.default(r.props.maxDate);a=ut.default(e,s)||$t(o,s)}}a&&r.setState({preSelection:e})})),Dt(Ot(r),"handleTimeChange",(function(e){var t=r.props.selected?r.props.selected:r.getPreSelection(),n=r.props.selected?e:Ht(t,{hour:Pe.default(e),minute:Me.default(e)});r.setState({preSelection:n}),r.props.onChange(n),r.props.shouldCloseOnSelect&&(r.sendFocusBackToInput(),r.setOpen(!1)),r.props.showTimeInput&&r.setOpen(!0),(r.props.showTimeSelectOnly||r.props.showTimeSelect)&&r.setState({isRenderAriaLiveMessage:!0}),r.setState({inputValue:null})})),Dt(Ot(r),"onInputClick",(function(){r.props.disabled||r.props.readOnly||r.setOpen(!0),r.props.onInputClick()})),Dt(Ot(r),"onInputKeyDown",(function(e){r.props.onKeyDown(e);var t=e.key;if(r.state.open||r.props.inline||r.props.preventOpenOnFocus){if(r.state.open){if("ArrowDown"===t||"ArrowUp"===t){e.preventDefault();var n=r.calendar.componentNode&&r.calendar.componentNode.querySelector('.react-datepicker__day[tabindex="0"]');return void(n&&n.focus({preventScroll:!0}))}var a=Ft(r.state.preSelection);"Enter"===t?(e.preventDefault(),r.inputOk()&&r.state.lastPreSelectChange===dr?(r.handleSelect(a,e),!r.props.shouldCloseOnSelect&&r.setPreSelection(a)):r.setOpen(!1)):"Escape"===t?(e.preventDefault(),r.sendFocusBackToInput(),r.setOpen(!1)):"Tab"===t&&r.setOpen(!1),r.inputOk()||r.props.onInputError({code:1,msg:cr})}}else"ArrowDown"!==t&&"ArrowUp"!==t&&"Enter"!==t||r.onInputClick()})),Dt(Ot(r),"onPortalKeyDown",(function(e){"Escape"===e.key&&(e.preventDefault(),r.setState({preventFocus:!0},(function(){r.setOpen(!1),setTimeout((function(){r.setFocus(),r.setState({preventFocus:!1})}))})))})),Dt(Ot(r),"onDayKeyDown",(function(e){r.props.onKeyDown(e);var t=e.key,n=Ft(r.state.preSelection);if("Enter"===t)e.preventDefault(),r.handleSelect(n,e),!r.props.shouldCloseOnSelect&&r.setPreSelection(n);else if("Escape"===t)e.preventDefault(),r.setOpen(!1),r.inputOk()||r.props.onInputError({code:1,msg:cr});else if(!r.props.disabledKeyboardNavigation){var a;switch(t){case"ArrowLeft":a=De.default(n,1);break;case"ArrowRight":a=be.default(n,1);break;case"ArrowUp":a=Ce.default(n,1);break;case"ArrowDown":a=ye.default(n,1);break;case"PageUp":a=Ee.default(n,1);break;case"PageDown":a=we.default(n,1);break;case"Home":a=_e.default(n,1);break;case"End":a=ke.default(n,1)}if(!a)return void(r.props.onInputError&&r.props.onInputError({code:1,msg:cr}));if(e.preventDefault(),r.setState({lastPreSelectChange:dr}),r.props.adjustDateOnChange&&r.setSelected(a),r.setPreSelection(a),r.props.inline){var o=Ze.default(n),i=Ze.default(a),s=je.default(n),l=je.default(a);o!==i||s!==l?r.setState({shouldFocusDayInline:!0}):r.setState({shouldFocusDayInline:!1})}}})),Dt(Ot(r),"onPopperKeyDown",(function(e){"Escape"===e.key&&(e.preventDefault(),r.sendFocusBackToInput())})),Dt(Ot(r),"onClearClick",(function(e){e&&e.preventDefault&&e.preventDefault(),r.sendFocusBackToInput(),r.props.selectsRange?r.props.onChange([null,null],e):r.props.onChange(null,e),r.setState({inputValue:null})})),Dt(Ot(r),"clear",(function(){r.onClearClick()})),Dt(Ot(r),"onScroll",(function(e){"boolean"==typeof r.props.closeOnScroll&&r.props.closeOnScroll?e.target!==document&&e.target!==document.documentElement&&e.target!==document.body||r.setOpen(!1):"function"==typeof r.props.closeOnScroll&&r.props.closeOnScroll(e)&&r.setOpen(!1)})),Dt(Ot(r),"renderCalendar",(function(){return r.props.inline||r.isCalendarOpen()?fe.default.createElement(ur,{ref:function(e){r.calendar=e},locale:r.props.locale,calendarStartDay:r.props.calendarStartDay,chooseDayAriaLabelPrefix:r.props.chooseDayAriaLabelPrefix,disabledDayAriaLabelPrefix:r.props.disabledDayAriaLabelPrefix,weekAriaLabelPrefix:r.props.weekAriaLabelPrefix,monthAriaLabelPrefix:r.props.monthAriaLabelPrefix,adjustDateOnChange:r.props.adjustDateOnChange,setOpen:r.setOpen,shouldCloseOnSelect:r.props.shouldCloseOnSelect,dateFormat:r.props.dateFormatCalendar,useWeekdaysShort:r.props.useWeekdaysShort,formatWeekDay:r.props.formatWeekDay,dropdownMode:r.props.dropdownMode,selected:r.props.selected,preSelection:r.state.preSelection,onSelect:r.handleSelect,onWeekSelect:r.props.onWeekSelect,openToDate:r.props.openToDate,minDate:r.props.minDate,maxDate:r.props.maxDate,selectsStart:r.props.selectsStart,selectsEnd:r.props.selectsEnd,selectsRange:r.props.selectsRange,startDate:r.props.startDate,endDate:r.props.endDate,excludeDates:r.props.excludeDates,excludeDateIntervals:r.props.excludeDateIntervals,filterDate:r.props.filterDate,onClickOutside:r.handleCalendarClickOutside,formatWeekNumber:r.props.formatWeekNumber,highlightDates:r.state.highlightDates,holidays:r.state.holidays,includeDates:r.props.includeDates,includeDateIntervals:r.props.includeDateIntervals,includeTimes:r.props.includeTimes,injectTimes:r.props.injectTimes,inline:r.props.inline,shouldFocusDayInline:r.state.shouldFocusDayInline,peekNextMonth:r.props.peekNextMonth,showMonthDropdown:r.props.showMonthDropdown,showPreviousMonths:r.props.showPreviousMonths,useShortMonthInDropdown:r.props.useShortMonthInDropdown,showMonthYearDropdown:r.props.showMonthYearDropdown,showWeekNumbers:r.props.showWeekNumbers,showYearDropdown:r.props.showYearDropdown,withPortal:r.props.withPortal,forceShowMonthNavigation:r.props.forceShowMonthNavigation,showDisabledMonthNavigation:r.props.showDisabledMonthNavigation,scrollableYearDropdown:r.props.scrollableYearDropdown,scrollableMonthYearDropdown:r.props.scrollableMonthYearDropdown,todayButton:r.props.todayButton,weekLabel:r.props.weekLabel,outsideClickIgnoreClass:lr,fixedHeight:r.props.fixedHeight,monthsShown:r.props.monthsShown,monthSelectedIn:r.state.monthSelectedIn,onDropdownFocus:r.handleDropdownFocus,onMonthChange:r.props.onMonthChange,onYearChange:r.props.onYearChange,dayClassName:r.props.dayClassName,weekDayClassName:r.props.weekDayClassName,monthClassName:r.props.monthClassName,timeClassName:r.props.timeClassName,showDateSelect:r.props.showDateSelect,showTimeSelect:r.props.showTimeSelect,showTimeSelectOnly:r.props.showTimeSelectOnly,onTimeChange:r.handleTimeChange,timeFormat:r.props.timeFormat,timeIntervals:r.props.timeIntervals,minTime:r.props.minTime,maxTime:r.props.maxTime,excludeTimes:r.props.excludeTimes,filterTime:r.props.filterTime,timeCaption:r.props.timeCaption,className:r.props.calendarClassName,container:r.props.calendarContainer,yearItemNumber:r.props.yearItemNumber,yearDropdownItemNumber:r.props.yearDropdownItemNumber,previousMonthAriaLabel:r.props.previousMonthAriaLabel,previousMonthButtonLabel:r.props.previousMonthButtonLabel,nextMonthAriaLabel:r.props.nextMonthAriaLabel,nextMonthButtonLabel:r.props.nextMonthButtonLabel,previousYearAriaLabel:r.props.previousYearAriaLabel,previousYearButtonLabel:r.props.previousYearButtonLabel,nextYearAriaLabel:r.props.nextYearAriaLabel,nextYearButtonLabel:r.props.nextYearButtonLabel,timeInputLabel:r.props.timeInputLabel,disabledKeyboardNavigation:r.props.disabledKeyboardNavigation,renderCustomHeader:r.props.renderCustomHeader,popperProps:r.props.popperProps,renderDayContents:r.props.renderDayContents,renderMonthContent:r.props.renderMonthContent,renderQuarterContent:r.props.renderQuarterContent,renderYearContent:r.props.renderYearContent,onDayMouseEnter:r.props.onDayMouseEnter,onMonthMouseLeave:r.props.onMonthMouseLeave,onYearMouseEnter:r.props.onYearMouseEnter,onYearMouseLeave:r.props.onYearMouseLeave,selectsDisabledDaysInRange:r.props.selectsDisabledDaysInRange,showTimeInput:r.props.showTimeInput,showMonthYearPicker:r.props.showMonthYearPicker,showFullMonthYearPicker:r.props.showFullMonthYearPicker,showTwoColumnMonthYearPicker:r.props.showTwoColumnMonthYearPicker,showFourColumnMonthYearPicker:r.props.showFourColumnMonthYearPicker,showYearPicker:r.props.showYearPicker,showQuarterYearPicker:r.props.showQuarterYearPicker,showPopperArrow:r.props.showPopperArrow,excludeScrollbar:r.props.excludeScrollbar,handleOnKeyDown:r.props.onKeyDown,handleOnDayKeyDown:r.onDayKeyDown,isInputFocused:r.state.focused,customTimeInput:r.props.customTimeInput,setPreSelection:r.setPreSelection},r.props.children):null})),Dt(Ot(r),"renderAriaLiveRegion",(function(){var e,t=r.props,n=t.dateFormat,a=t.locale,o=r.props.showTimeInput||r.props.showTimeSelect?"PPPPp":"PPPP";return e=r.props.selectsRange?"Selected start date: ".concat(Bt(r.props.startDate,{dateFormat:o,locale:a}),". ").concat(r.props.endDate?"End date: "+Bt(r.props.endDate,{dateFormat:o,locale:a}):""):r.props.showTimeSelectOnly?"Selected time: ".concat(Bt(r.props.selected,{dateFormat:n,locale:a})):r.props.showYearPicker?"Selected year: ".concat(Bt(r.props.selected,{dateFormat:"yyyy",locale:a})):r.props.showMonthYearPicker?"Selected month: ".concat(Bt(r.props.selected,{dateFormat:"MMMM yyyy",locale:a})):r.props.showQuarterYearPicker?"Selected quarter: ".concat(Bt(r.props.selected,{dateFormat:"yyyy, QQQ",locale:a})):"Selected date: ".concat(Bt(r.props.selected,{dateFormat:o,locale:a})),fe.default.createElement("span",{role:"alert","aria-live":"polite",className:"react-datepicker__aria-live"},e)})),Dt(Ot(r),"renderDateInput",(function(){var e,t=pe.default(r.props.className,Dt({},lr,r.state.open)),n=r.props.customInput||fe.default.createElement("input",{type:"text"}),a=r.props.customInputRef||"ref",o="string"==typeof r.props.value?r.props.value:"string"==typeof r.state.inputValue?r.state.inputValue:r.props.selectsRange?function(e,t,n){if(!e)return"";var r=Bt(e,n),a=t?Bt(t,n):"";return"".concat(r," - ").concat(a)}(r.props.startDate,r.props.endDate,r.props):Bt(r.props.selected,r.props);return fe.default.cloneElement(n,(Dt(e={},a,(function(e){r.input=e})),Dt(e,"value",o),Dt(e,"onBlur",r.handleBlur),Dt(e,"onChange",r.handleChange),Dt(e,"onClick",r.onInputClick),Dt(e,"onFocus",r.handleFocus),Dt(e,"onKeyDown",r.onInputKeyDown),Dt(e,"id",r.props.id),Dt(e,"name",r.props.name),Dt(e,"form",r.props.form),Dt(e,"autoFocus",r.props.autoFocus),Dt(e,"placeholder",r.props.placeholderText),Dt(e,"disabled",r.props.disabled),Dt(e,"autoComplete",r.props.autoComplete),Dt(e,"className",pe.default(n.props.className,t)),Dt(e,"title",r.props.title),Dt(e,"readOnly",r.props.readOnly),Dt(e,"required",r.props.required),Dt(e,"tabIndex",r.props.tabIndex),Dt(e,"aria-describedby",r.props.ariaDescribedBy),Dt(e,"aria-invalid",r.props.ariaInvalid),Dt(e,"aria-labelledby",r.props.ariaLabelledBy),Dt(e,"aria-required",r.props.ariaRequired),e))})),Dt(Ot(r),"renderClearButton",(function(){var e=r.props,t=e.isClearable,n=e.selected,a=e.startDate,o=e.endDate,i=e.clearButtonTitle,s=e.clearButtonClassName,l=void 0===s?"":s,u=e.ariaLabelClose,c=void 0===u?"Close":u;return!t||null==n&&null==a&&null==o?null:fe.default.createElement("button",{type:"button",className:"react-datepicker__close-icon ".concat(l).trim(),"aria-label":c,onClick:r.onClearClick,title:i,tabIndex:-1})})),r.state=r.calcInitialState(),r.preventFocusTimeout=null,r}return kt(n,[{key:"componentDidMount",value:function(){window.addEventListener("scroll",this.onScroll,!0)}},{key:"componentDidUpdate",value:function(e,t){var n,r;e.inline&&(n=e.selected,r=this.props.selected,n&&r?Ze.default(n)!==Ze.default(r)||je.default(n)!==je.default(r):n!==r)&&this.setPreSelection(this.props.selected),void 0!==this.state.monthSelectedIn&&e.monthsShown!==this.props.monthsShown&&this.setState({monthSelectedIn:0}),e.highlightDates!==this.props.highlightDates&&this.setState({highlightDates:Dn(this.props.highlightDates)}),t.focused||$t(e.selected,this.props.selected)||this.setState({inputValue:null}),t.open!==this.state.open&&(!1===t.open&&!0===this.state.open&&this.props.onCalendarOpen(),!0===t.open&&!1===this.state.open&&this.props.onCalendarClose())}},{key:"componentWillUnmount",value:function(){this.clearPreventFocusTimeout(),window.removeEventListener("scroll",this.onScroll,!0)}},{key:"renderInputContainer",value:function(){var e=this.props,t=e.showIcon,n=e.icon,r=e.calendarIconClassname;return fe.default.createElement("div",{className:"react-datepicker__input-container".concat(t?" react-datepicker__view-calendar-icon":"")},t&&fe.default.createElement(rr,{icon:n,className:r}),this.state.isRenderAriaLiveMessage&&this.renderAriaLiveRegion(),this.renderDateInput(),this.renderClearButton())}},{key:"render",value:function(){var e=this.renderCalendar();if(this.props.inline)return e;if(this.props.withPortal){var t=this.state.open?fe.default.createElement(ir,{enableTabLoop:this.props.enableTabLoop},fe.default.createElement("div",{className:"react-datepicker__portal",tabIndex:-1,onKeyDown:this.onPortalKeyDown},e)):null;return this.state.open&&this.props.portalId&&(t=fe.default.createElement(ar,{portalId:this.props.portalId,portalHost:this.props.portalHost},t)),fe.default.createElement("div",null,this.renderInputContainer(),t)}return fe.default.createElement(sr,{className:this.props.popperClassName,wrapperClassName:this.props.wrapperClassName,hidePopper:!this.isCalendarOpen(),portalId:this.props.portalId,portalHost:this.props.portalHost,popperModifiers:this.props.popperModifiers,targetComponent:this.renderInputContainer(),popperContainer:this.props.popperContainer,popperComponent:e,popperPlacement:this.props.popperPlacement,popperProps:this.props.popperProps,popperOnKeyDown:this.onPopperKeyDown,enableTabLoop:this.props.enableTabLoop})}}],[{key:"defaultProps",get:function(){return{allowSameDay:!1,dateFormat:"MM/dd/yyyy",dateFormatCalendar:"LLLL yyyy",onChange:function(){},disabled:!1,disabledKeyboardNavigation:!1,dropdownMode:"scroll",onFocus:function(){},onBlur:function(){},onKeyDown:function(){},onInputClick:function(){},onSelect:function(){},onClickOutside:function(){},onMonthChange:function(){},onCalendarOpen:function(){},onCalendarClose:function(){},preventOpenOnFocus:!1,onYearChange:function(){},onInputError:function(){},monthsShown:1,readOnly:!1,withPortal:!1,selectsDisabledDaysInRange:!1,shouldCloseOnSelect:!0,showTimeSelect:!1,showTimeInput:!1,showPreviousMonths:!1,showMonthYearPicker:!1,showFullMonthYearPicker:!1,showTwoColumnMonthYearPicker:!1,showFourColumnMonthYearPicker:!1,showYearPicker:!1,showQuarterYearPicker:!1,strictParsing:!1,timeIntervals:30,timeCaption:"Time",previousMonthAriaLabel:"Previous Month",previousMonthButtonLabel:"Previous Month",nextMonthAriaLabel:"Next Month",nextMonthButtonLabel:"Next Month",previousYearAriaLabel:"Previous Year",previousYearButtonLabel:"Previous Year",nextYearAriaLabel:"Next Year",nextYearButtonLabel:"Next Year",timeInputLabel:"Time",enableTabLoop:!0,yearItemNumber:jt,focusSelectedMonth:!1,showPopperArrow:!0,excludeScrollbar:!0,customTimeInput:null,calendarStartDay:void 0}}}]),n}(fe.default.Component),pr="input",dr="navigate";e.CalendarContainer=Jn,e.default=fr,e.getDefaultLocale=en,e.registerLocale=function(e,t){var n="undefined"!=typeof window?window:globalThis;n.__localeData__||(n.__localeData__={}),n.__localeData__[e]=t},e.setDefaultLocale=function(e){("undefined"!=typeof window?window:globalThis).__localeId__=e},Object.defineProperty(e,"__esModule",{value:!0})}(t,n(67294),n(45697),n(94184),n(71381),n(12274),n(49546),n(58545),n(78343),n(77349),n(63500),n(11640),n(8791),n(21593),n(7069),n(77982),n(54559),n(58793),n(59319),n(77881),n(39159),n(85817),n(20466),n(55855),n(9827),n(78966),n(56605),n(95570),n(28789),n(39880),n(4543),n(37042),n(62225),n(11503),n(44749),n(37950),n(99890),n(92300),n(84129),n(91857),n(69119),n(584),n(43703),n(94431),n(38148),n(83894),n(67090),n(4135),n(10876),n(96843),n(3151),n(49160),n(60792),n(86117),n(42699),n(313),n(24257),n(19013),n(76417),n(23855),n(58949),n(73935),n(12053),n(92311))},69590:function(e){var t="undefined"!==typeof Element,n="function"===typeof Map,r="function"===typeof Set,a="function"===typeof ArrayBuffer&&!!ArrayBuffer.isView;function o(e,i){if(e===i)return!0;if(e&&i&&"object"==typeof e&&"object"==typeof i){if(e.constructor!==i.constructor)return!1;var s,l,u,c;if(Array.isArray(e)){if((s=e.length)!=i.length)return!1;for(l=s;0!==l--;)if(!o(e[l],i[l]))return!1;return!0}if(n&&e instanceof Map&&i instanceof Map){if(e.size!==i.size)return!1;for(c=e.entries();!(l=c.next()).done;)if(!i.has(l.value[0]))return!1;for(c=e.entries();!(l=c.next()).done;)if(!o(l.value[1],i.get(l.value[0])))return!1;return!0}if(r&&e instanceof Set&&i instanceof Set){if(e.size!==i.size)return!1;for(c=e.entries();!(l=c.next()).done;)if(!i.has(l.value[0]))return!1;return!0}if(a&&ArrayBuffer.isView(e)&&ArrayBuffer.isView(i)){if((s=e.length)!=i.length)return!1;for(l=s;0!==l--;)if(e[l]!==i[l])return!1;return!0}if(e.constructor===RegExp)return e.source===i.source&&e.flags===i.flags;if(e.valueOf!==Object.prototype.valueOf)return e.valueOf()===i.valueOf();if(e.toString!==Object.prototype.toString)return e.toString()===i.toString();if((s=(u=Object.keys(e)).length)!==Object.keys(i).length)return!1;for(l=s;0!==l--;)if(!Object.prototype.hasOwnProperty.call(i,u[l]))return!1;if(t&&e instanceof Element)return!1;for(l=s;0!==l--;)if(("_owner"!==u[l]&&"__v"!==u[l]&&"__o"!==u[l]||!e.$$typeof)&&!o(e[u[l]],i[u[l]]))return!1;return!0}return e!==e&&i!==i}e.exports=function(e,t){try{return o(e,t)}catch(n){if((n.message||"").match(/stack|recursion/i))return console.warn("react-fast-compare cannot handle circular refs"),!1;throw n}}},58949:function(e,t,n){"use strict";n.r(t),n.d(t,{IGNORE_CLASS_NAME:function(){return h}});var r=n(67294),a=n(73935);function o(e,t){return o=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},o(e,t)}function i(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function s(e,t,n){return e===t||(e.correspondingElement?e.correspondingElement.classList.contains(n):e.classList.contains(n))}var l,u,c=(void 0===l&&(l=0),function(){return++l}),f={},p={},d=["touchstart","touchmove"],h="ignore-react-onclickoutside";function v(e,t){var n={};return-1!==d.indexOf(t)&&u&&(n.passive=!e.props.preventDefault),n}t.default=function(e,t){var n,l,d=e.displayName||e.name||"Component";return l=n=function(n){var l,h;function m(e){var r;return(r=n.call(this,e)||this).__outsideClickHandler=function(e){if("function"!==typeof r.__clickOutsideHandlerProp){var t=r.getInstance();if("function"!==typeof t.props.handleClickOutside){if("function"!==typeof t.handleClickOutside)throw new Error("WrappedComponent: "+d+" lacks a handleClickOutside(event) function for processing outside click events.");t.handleClickOutside(e)}else t.props.handleClickOutside(e)}else r.__clickOutsideHandlerProp(e)},r.__getComponentNode=function(){var e=r.getInstance();return t&&"function"===typeof t.setClickOutsideRef?t.setClickOutsideRef()(e):"function"===typeof e.setClickOutsideRef?e.setClickOutsideRef():(0,a.findDOMNode)(e)},r.enableOnClickOutside=function(){if("undefined"!==typeof document&&!p[r._uid]){"undefined"===typeof u&&(u=function(){if("undefined"!==typeof window&&"function"===typeof window.addEventListener){var e=!1,t=Object.defineProperty({},"passive",{get:function(){e=!0}}),n=function(){};return window.addEventListener("testPassiveEventSupport",n,t),window.removeEventListener("testPassiveEventSupport",n,t),e}}()),p[r._uid]=!0;var e=r.props.eventTypes;e.forEach||(e=[e]),f[r._uid]=function(e){var t;null!==r.componentNode&&(r.props.preventDefault&&e.preventDefault(),r.props.stopPropagation&&e.stopPropagation(),r.props.excludeScrollbar&&(t=e,document.documentElement.clientWidth<=t.clientX||document.documentElement.clientHeight<=t.clientY)||function(e,t,n){if(e===t)return!0;for(;e.parentNode||e.host;){if(e.parentNode&&s(e,t,n))return!0;e=e.parentNode||e.host}return e}(e.composed&&e.composedPath&&e.composedPath().shift()||e.target,r.componentNode,r.props.outsideClickIgnoreClass)===document&&r.__outsideClickHandler(e))},e.forEach((function(e){document.addEventListener(e,f[r._uid],v(i(r),e))}))}},r.disableOnClickOutside=function(){delete p[r._uid];var e=f[r._uid];if(e&&"undefined"!==typeof document){var t=r.props.eventTypes;t.forEach||(t=[t]),t.forEach((function(t){return document.removeEventListener(t,e,v(i(r),t))})),delete f[r._uid]}},r.getRef=function(e){return r.instanceRef=e},r._uid=c(),r}h=n,(l=m).prototype=Object.create(h.prototype),l.prototype.constructor=l,o(l,h);var g=m.prototype;return g.getInstance=function(){if(e.prototype&&!e.prototype.isReactComponent)return this;var t=this.instanceRef;return t.getInstance?t.getInstance():t},g.componentDidMount=function(){if("undefined"!==typeof document&&document.createElement){var e=this.getInstance();if(t&&"function"===typeof t.handleClickOutside&&(this.__clickOutsideHandlerProp=t.handleClickOutside(e),"function"!==typeof this.__clickOutsideHandlerProp))throw new Error("WrappedComponent: "+d+" lacks a function for processing outside click events specified by the handleClickOutside config option.");this.componentNode=this.__getComponentNode(),this.props.disableOnClickOutside||this.enableOnClickOutside()}},g.componentDidUpdate=function(){this.componentNode=this.__getComponentNode()},g.componentWillUnmount=function(){this.disableOnClickOutside()},g.render=function(){var t=this.props;t.excludeScrollbar;var n=function(e,t){if(null==e)return{};var n,r,a={},o=Object.keys(e);for(r=0;r=0||(a[n]=e[n]);return a}(t,["excludeScrollbar"]);return e.prototype&&e.prototype.isReactComponent?n.ref=this.getRef:n.wrappedRef=this.getRef,n.disableOnClickOutside=this.disableOnClickOutside,n.enableOnClickOutside=this.enableOnClickOutside,(0,r.createElement)(e,n)},m}(r.Component),n.displayName="OnClickOutside("+d+")",n.defaultProps={eventTypes:["mousedown","touchstart"],excludeScrollbar:t&&t.excludeScrollbar||!1,outsideClickIgnoreClass:h,preventDefault:!1,stopPropagation:!1},n.getClass=function(){return e.getClass?e.getClass():e},l}},12053:function(e,t,n){"use strict";n.r(t),n.d(t,{Manager:function(){return i},Popper:function(){return Re},Reference:function(){return Fe},usePopper:function(){return Te}});var r=n(67294),a=r.createContext(),o=r.createContext();function i(e){var t=e.children,n=r.useState(null),i=n[0],s=n[1],l=r.useRef(!1);r.useEffect((function(){return function(){l.current=!0}}),[]);var u=r.useCallback((function(e){l.current||s(e)}),[]);return r.createElement(a.Provider,{value:i},r.createElement(o.Provider,{value:u},t))}var s=function(e){return Array.isArray(e)?e[0]:e},l=function(e){if("function"===typeof e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r0&&y(r.width)/e.offsetWidth||1,o=e.offsetHeight>0&&y(r.height)/e.offsetHeight||1);var i=(h(e)?d(e):window).visualViewport,s=!x()&&n,l=(r.left+(s&&i?i.offsetLeft:0))/a,u=(r.top+(s&&i?i.offsetTop:0))/o,c=r.width/a,f=r.height/o;return{width:c,height:f,top:u,right:l+c,bottom:u+f,left:l,x:l,y:u}}function D(e){var t=d(e);return{scrollLeft:t.pageXOffset,scrollTop:t.pageYOffset}}function C(e){return e?(e.nodeName||"").toLowerCase():null}function E(e){return((h(e)?e.ownerDocument:e.document)||window.document).documentElement}function S(e){return k(E(e)).left+D(e).scrollLeft}function _(e){return d(e).getComputedStyle(e)}function O(e){var t=_(e),n=t.overflow,r=t.overflowX,a=t.overflowY;return/auto|scroll|overlay|hidden/.test(n+a+r)}function M(e,t,n){void 0===n&&(n=!1);var r=v(t),a=v(t)&&function(e){var t=e.getBoundingClientRect(),n=y(t.width)/e.offsetWidth||1,r=y(t.height)/e.offsetHeight||1;return 1!==n||1!==r}(t),o=E(t),i=k(e,a,n),s={scrollLeft:0,scrollTop:0},l={x:0,y:0};return(r||!r&&!n)&&(("body"!==C(t)||O(o))&&(s=function(e){return e!==d(e)&&v(e)?{scrollLeft:(t=e).scrollLeft,scrollTop:t.scrollTop}:D(e);var t}(t)),v(t)?((l=k(t,!0)).x+=t.clientLeft,l.y+=t.clientTop):o&&(l.x=S(o))),{x:i.left+s.scrollLeft-l.x,y:i.top+s.scrollTop-l.y,width:i.width,height:i.height}}function P(e){var t=k(e),n=e.offsetWidth,r=e.offsetHeight;return Math.abs(t.width-n)<=1&&(n=t.width),Math.abs(t.height-r)<=1&&(r=t.height),{x:e.offsetLeft,y:e.offsetTop,width:n,height:r}}function T(e){return"html"===C(e)?e:e.assignedSlot||e.parentNode||(m(e)?e.host:null)||E(e)}function N(e){return["html","body","#document"].indexOf(C(e))>=0?e.ownerDocument.body:v(e)&&O(e)?e:N(T(e))}function A(e,t){var n;void 0===t&&(t=[]);var r=N(e),a=r===(null==(n=e.ownerDocument)?void 0:n.body),o=d(r),i=a?[o].concat(o.visualViewport||[],O(r)?r:[]):r,s=t.concat(i);return a?s:s.concat(A(T(i)))}function Z(e){return["table","td","th"].indexOf(C(e))>=0}function R(e){return v(e)&&"fixed"!==_(e).position?e.offsetParent:null}function j(e){for(var t=d(e),n=R(e);n&&Z(n)&&"static"===_(n).position;)n=R(n);return n&&("html"===C(n)||"body"===C(n)&&"static"===_(n).position)?t:n||function(e){var t=/firefox/i.test(w());if(/Trident/i.test(w())&&v(e)&&"fixed"===_(e).position)return null;var n=T(e);for(m(n)&&(n=n.host);v(n)&&["html","body"].indexOf(C(n))<0;){var r=_(n);if("none"!==r.transform||"none"!==r.perspective||"paint"===r.contain||-1!==["transform","perspective"].indexOf(r.willChange)||t&&"filter"===r.willChange||t&&r.filter&&"none"!==r.filter)return n;n=n.parentNode}return null}(e)||t}var L="top",F="bottom",I="right",Y="left",B="auto",H=[L,F,I,Y],z="start",U="end",W="clippingParents",q="viewport",K="popper",V="reference",Q=H.reduce((function(e,t){return e.concat([t+"-"+z,t+"-"+U])}),[]),G=[].concat(H,[B]).reduce((function(e,t){return e.concat([t,t+"-"+z,t+"-"+U])}),[]),X=["beforeRead","read","afterRead","beforeMain","main","afterMain","beforeWrite","write","afterWrite"];function $(e){var t=new Map,n=new Set,r=[];function a(e){n.add(e.name),[].concat(e.requires||[],e.requiresIfExists||[]).forEach((function(e){if(!n.has(e)){var r=t.get(e);r&&a(r)}})),r.push(e)}return e.forEach((function(e){t.set(e.name,e)})),e.forEach((function(e){n.has(e.name)||a(e)})),r}function J(e){var t;return function(){return t||(t=new Promise((function(n){Promise.resolve().then((function(){t=void 0,n(e())}))}))),t}}var ee={placement:"bottom",modifiers:[],strategy:"absolute"};function te(){for(var e=arguments.length,t=new Array(e),n=0;n=0?"x":"y"}function se(e){var t,n=e.reference,r=e.element,a=e.placement,o=a?ae(a):null,i=a?oe(a):null,s=n.x+n.width/2-r.width/2,l=n.y+n.height/2-r.height/2;switch(o){case L:t={x:s,y:n.y-r.height};break;case F:t={x:s,y:n.y+n.height};break;case I:t={x:n.x+n.width,y:l};break;case Y:t={x:n.x-r.width,y:l};break;default:t={x:n.x,y:n.y}}var u=o?ie(o):null;if(null!=u){var c="y"===u?"height":"width";switch(i){case z:t[u]=t[u]-(n[c]/2-r[c]/2);break;case U:t[u]=t[u]+(n[c]/2-r[c]/2)}}return t}var le={top:"auto",right:"auto",bottom:"auto",left:"auto"};function ue(e){var t,n=e.popper,r=e.popperRect,a=e.placement,o=e.variation,i=e.offsets,s=e.position,l=e.gpuAcceleration,u=e.adaptive,c=e.roundOffsets,f=e.isFixed,p=i.x,h=void 0===p?0:p,v=i.y,m=void 0===v?0:v,g="function"===typeof c?c({x:h,y:m}):{x:h,y:m};h=g.x,m=g.y;var b=i.hasOwnProperty("x"),w=i.hasOwnProperty("y"),x=Y,k=L,D=window;if(u){var C=j(n),S="clientHeight",O="clientWidth";if(C===d(n)&&"static"!==_(C=E(n)).position&&"absolute"===s&&(S="scrollHeight",O="scrollWidth"),a===L||(a===Y||a===I)&&o===U)k=F,m-=(f&&C===D&&D.visualViewport?D.visualViewport.height:C[S])-r.height,m*=l?1:-1;if(a===Y||(a===L||a===F)&&o===U)x=I,h-=(f&&C===D&&D.visualViewport?D.visualViewport.width:C[O])-r.width,h*=l?1:-1}var M,P=Object.assign({position:s},u&&le),T=!0===c?function(e,t){var n=e.x,r=e.y,a=t.devicePixelRatio||1;return{x:y(n*a)/a||0,y:y(r*a)/a||0}}({x:h,y:m},d(n)):{x:h,y:m};return h=T.x,m=T.y,l?Object.assign({},P,((M={})[k]=w?"0":"",M[x]=b?"0":"",M.transform=(D.devicePixelRatio||1)<=1?"translate("+h+"px, "+m+"px)":"translate3d("+h+"px, "+m+"px, 0)",M)):Object.assign({},P,((t={})[k]=w?m+"px":"",t[x]=b?h+"px":"",t.transform="",t))}var ce={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(e){var t=e.state,n=e.options,r=e.name,a=n.offset,o=void 0===a?[0,0]:a,i=G.reduce((function(e,n){return e[n]=function(e,t,n){var r=ae(e),a=[Y,L].indexOf(r)>=0?-1:1,o="function"===typeof n?n(Object.assign({},t,{placement:e})):n,i=o[0],s=o[1];return i=i||0,s=(s||0)*a,[Y,I].indexOf(r)>=0?{x:s,y:i}:{x:i,y:s}}(n,t.rects,o),e}),{}),s=i[t.placement],l=s.x,u=s.y;null!=t.modifiersData.popperOffsets&&(t.modifiersData.popperOffsets.x+=l,t.modifiersData.popperOffsets.y+=u),t.modifiersData[r]=i}},fe={left:"right",right:"left",bottom:"top",top:"bottom"};function pe(e){return e.replace(/left|right|bottom|top/g,(function(e){return fe[e]}))}var de={start:"end",end:"start"};function he(e){return e.replace(/start|end/g,(function(e){return de[e]}))}function ve(e,t){var n=t.getRootNode&&t.getRootNode();if(e.contains(t))return!0;if(n&&m(n)){var r=t;do{if(r&&e.isSameNode(r))return!0;r=r.parentNode||r.host}while(r)}return!1}function me(e){return Object.assign({},e,{left:e.x,top:e.y,right:e.x+e.width,bottom:e.y+e.height})}function ge(e,t,n){return t===q?me(function(e,t){var n=d(e),r=E(e),a=n.visualViewport,o=r.clientWidth,i=r.clientHeight,s=0,l=0;if(a){o=a.width,i=a.height;var u=x();(u||!u&&"fixed"===t)&&(s=a.offsetLeft,l=a.offsetTop)}return{width:o,height:i,x:s+S(e),y:l}}(e,n)):h(t)?function(e,t){var n=k(e,!1,"fixed"===t);return n.top=n.top+e.clientTop,n.left=n.left+e.clientLeft,n.bottom=n.top+e.clientHeight,n.right=n.left+e.clientWidth,n.width=e.clientWidth,n.height=e.clientHeight,n.x=n.left,n.y=n.top,n}(t,n):me(function(e){var t,n=E(e),r=D(e),a=null==(t=e.ownerDocument)?void 0:t.body,o=g(n.scrollWidth,n.clientWidth,a?a.scrollWidth:0,a?a.clientWidth:0),i=g(n.scrollHeight,n.clientHeight,a?a.scrollHeight:0,a?a.clientHeight:0),s=-r.scrollLeft+S(e),l=-r.scrollTop;return"rtl"===_(a||n).direction&&(s+=g(n.clientWidth,a?a.clientWidth:0)-o),{width:o,height:i,x:s,y:l}}(E(e)))}function be(e,t,n,r){var a="clippingParents"===t?function(e){var t=A(T(e)),n=["absolute","fixed"].indexOf(_(e).position)>=0&&v(e)?j(e):e;return h(n)?t.filter((function(e){return h(e)&&ve(e,n)&&"body"!==C(e)})):[]}(e):[].concat(t),o=[].concat(a,[n]),i=o[0],s=o.reduce((function(t,n){var a=ge(e,n,r);return t.top=g(a.top,t.top),t.right=b(a.right,t.right),t.bottom=b(a.bottom,t.bottom),t.left=g(a.left,t.left),t}),ge(e,i,r));return s.width=s.right-s.left,s.height=s.bottom-s.top,s.x=s.left,s.y=s.top,s}function ye(e){return Object.assign({},{top:0,right:0,bottom:0,left:0},e)}function we(e,t){return t.reduce((function(t,n){return t[n]=e,t}),{})}function xe(e,t){void 0===t&&(t={});var n=t,r=n.placement,a=void 0===r?e.placement:r,o=n.strategy,i=void 0===o?e.strategy:o,s=n.boundary,l=void 0===s?W:s,u=n.rootBoundary,c=void 0===u?q:u,f=n.elementContext,p=void 0===f?K:f,d=n.altBoundary,v=void 0!==d&&d,m=n.padding,g=void 0===m?0:m,b=ye("number"!==typeof g?g:we(g,H)),y=p===K?V:K,w=e.rects.popper,x=e.elements[v?y:p],D=be(h(x)?x:x.contextElement||E(e.elements.popper),l,c,i),C=k(e.elements.reference),S=se({reference:C,element:w,strategy:"absolute",placement:a}),_=me(Object.assign({},w,S)),O=p===K?_:C,M={top:D.top-O.top+b.top,bottom:O.bottom-D.bottom+b.bottom,left:D.left-O.left+b.left,right:O.right-D.right+b.right},P=e.modifiersData.offset;if(p===K&&P){var T=P[a];Object.keys(M).forEach((function(e){var t=[I,F].indexOf(e)>=0?1:-1,n=[L,F].indexOf(e)>=0?"y":"x";M[e]+=T[n]*t}))}return M}function ke(e,t,n){return g(e,b(t,n))}var De={name:"preventOverflow",enabled:!0,phase:"main",fn:function(e){var t=e.state,n=e.options,r=e.name,a=n.mainAxis,o=void 0===a||a,i=n.altAxis,s=void 0!==i&&i,l=n.boundary,u=n.rootBoundary,c=n.altBoundary,f=n.padding,p=n.tether,d=void 0===p||p,h=n.tetherOffset,v=void 0===h?0:h,m=xe(t,{boundary:l,rootBoundary:u,padding:f,altBoundary:c}),y=ae(t.placement),w=oe(t.placement),x=!w,k=ie(y),D="x"===k?"y":"x",C=t.modifiersData.popperOffsets,E=t.rects.reference,S=t.rects.popper,_="function"===typeof v?v(Object.assign({},t.rects,{placement:t.placement})):v,O="number"===typeof _?{mainAxis:_,altAxis:_}:Object.assign({mainAxis:0,altAxis:0},_),M=t.modifiersData.offset?t.modifiersData.offset[t.placement]:null,T={x:0,y:0};if(C){if(o){var N,A="y"===k?L:Y,Z="y"===k?F:I,R="y"===k?"height":"width",B=C[k],H=B+m[A],U=B-m[Z],W=d?-S[R]/2:0,q=w===z?E[R]:S[R],K=w===z?-S[R]:-E[R],V=t.elements.arrow,Q=d&&V?P(V):{width:0,height:0},G=t.modifiersData["arrow#persistent"]?t.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},X=G[A],$=G[Z],J=ke(0,E[R],Q[R]),ee=x?E[R]/2-W-J-X-O.mainAxis:q-J-X-O.mainAxis,te=x?-E[R]/2+W+J+$+O.mainAxis:K+J+$+O.mainAxis,ne=t.elements.arrow&&j(t.elements.arrow),re=ne?"y"===k?ne.clientTop||0:ne.clientLeft||0:0,se=null!=(N=null==M?void 0:M[k])?N:0,le=B+te-se,ue=ke(d?b(H,B+ee-se-re):H,B,d?g(U,le):U);C[k]=ue,T[k]=ue-B}if(s){var ce,fe="x"===k?L:Y,pe="x"===k?F:I,de=C[D],he="y"===D?"height":"width",ve=de+m[fe],me=de-m[pe],ge=-1!==[L,Y].indexOf(y),be=null!=(ce=null==M?void 0:M[D])?ce:0,ye=ge?ve:de-E[he]-S[he]-be+O.altAxis,we=ge?de+E[he]+S[he]-be-O.altAxis:me,De=d&&ge?function(e,t,n){var r=ke(e,t,n);return r>n?n:r}(ye,de,we):ke(d?ye:ve,de,d?we:me);C[D]=De,T[D]=De-de}t.modifiersData[r]=T}},requiresIfExists:["offset"]};var Ce={name:"arrow",enabled:!0,phase:"main",fn:function(e){var t,n=e.state,r=e.name,a=e.options,o=n.elements.arrow,i=n.modifiersData.popperOffsets,s=ae(n.placement),l=ie(s),u=[Y,I].indexOf(s)>=0?"height":"width";if(o&&i){var c=function(e,t){return ye("number"!==typeof(e="function"===typeof e?e(Object.assign({},t.rects,{placement:t.placement})):e)?e:we(e,H))}(a.padding,n),f=P(o),p="y"===l?L:Y,d="y"===l?F:I,h=n.rects.reference[u]+n.rects.reference[l]-i[l]-n.rects.popper[u],v=i[l]-n.rects.reference[l],m=j(o),g=m?"y"===l?m.clientHeight||0:m.clientWidth||0:0,b=h/2-v/2,y=c[p],w=g-f[u]-c[d],x=g/2-f[u]/2+b,k=ke(y,x,w),D=l;n.modifiersData[r]=((t={})[D]=k,t.centerOffset=k-x,t)}},effect:function(e){var t=e.state,n=e.options.element,r=void 0===n?"[data-popper-arrow]":n;null!=r&&("string"!==typeof r||(r=t.elements.popper.querySelector(r)))&&ve(t.elements.popper,r)&&(t.elements.arrow=r)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Ee(e,t,n){return void 0===n&&(n={x:0,y:0}),{top:e.top-t.height-n.y,right:e.right-t.width+n.x,bottom:e.bottom-t.height+n.y,left:e.left-t.width-n.x}}function Se(e){return[L,I,F,Y].some((function(t){return e[t]>=0}))}var _e=ne({defaultModifiers:[{name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(e){var t=e.state,n=e.instance,r=e.options,a=r.scroll,o=void 0===a||a,i=r.resize,s=void 0===i||i,l=d(t.elements.popper),u=[].concat(t.scrollParents.reference,t.scrollParents.popper);return o&&u.forEach((function(e){e.addEventListener("scroll",n.update,re)})),s&&l.addEventListener("resize",n.update,re),function(){o&&u.forEach((function(e){e.removeEventListener("scroll",n.update,re)})),s&&l.removeEventListener("resize",n.update,re)}},data:{}},{name:"popperOffsets",enabled:!0,phase:"read",fn:function(e){var t=e.state,n=e.name;t.modifiersData[n]=se({reference:t.rects.reference,element:t.rects.popper,strategy:"absolute",placement:t.placement})},data:{}},{name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(e){var t=e.state,n=e.options,r=n.gpuAcceleration,a=void 0===r||r,o=n.adaptive,i=void 0===o||o,s=n.roundOffsets,l=void 0===s||s,u={placement:ae(t.placement),variation:oe(t.placement),popper:t.elements.popper,popperRect:t.rects.popper,gpuAcceleration:a,isFixed:"fixed"===t.options.strategy};null!=t.modifiersData.popperOffsets&&(t.styles.popper=Object.assign({},t.styles.popper,ue(Object.assign({},u,{offsets:t.modifiersData.popperOffsets,position:t.options.strategy,adaptive:i,roundOffsets:l})))),null!=t.modifiersData.arrow&&(t.styles.arrow=Object.assign({},t.styles.arrow,ue(Object.assign({},u,{offsets:t.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),t.attributes.popper=Object.assign({},t.attributes.popper,{"data-popper-placement":t.placement})},data:{}},{name:"applyStyles",enabled:!0,phase:"write",fn:function(e){var t=e.state;Object.keys(t.elements).forEach((function(e){var n=t.styles[e]||{},r=t.attributes[e]||{},a=t.elements[e];v(a)&&C(a)&&(Object.assign(a.style,n),Object.keys(r).forEach((function(e){var t=r[e];!1===t?a.removeAttribute(e):a.setAttribute(e,!0===t?"":t)})))}))},effect:function(e){var t=e.state,n={popper:{position:t.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(t.elements.popper.style,n.popper),t.styles=n,t.elements.arrow&&Object.assign(t.elements.arrow.style,n.arrow),function(){Object.keys(t.elements).forEach((function(e){var r=t.elements[e],a=t.attributes[e]||{},o=Object.keys(t.styles.hasOwnProperty(e)?t.styles[e]:n[e]).reduce((function(e,t){return e[t]="",e}),{});v(r)&&C(r)&&(Object.assign(r.style,o),Object.keys(a).forEach((function(e){r.removeAttribute(e)})))}))}},requires:["computeStyles"]},ce,{name:"flip",enabled:!0,phase:"main",fn:function(e){var t=e.state,n=e.options,r=e.name;if(!t.modifiersData[r]._skip){for(var a=n.mainAxis,o=void 0===a||a,i=n.altAxis,s=void 0===i||i,l=n.fallbackPlacements,u=n.padding,c=n.boundary,f=n.rootBoundary,p=n.altBoundary,d=n.flipVariations,h=void 0===d||d,v=n.allowedAutoPlacements,m=t.options.placement,g=ae(m),b=l||(g===m||!h?[pe(m)]:function(e){if(ae(e)===B)return[];var t=pe(e);return[he(e),t,he(t)]}(m)),y=[m].concat(b).reduce((function(e,n){return e.concat(ae(n)===B?function(e,t){void 0===t&&(t={});var n=t,r=n.placement,a=n.boundary,o=n.rootBoundary,i=n.padding,s=n.flipVariations,l=n.allowedAutoPlacements,u=void 0===l?G:l,c=oe(r),f=c?s?Q:Q.filter((function(e){return oe(e)===c})):H,p=f.filter((function(e){return u.indexOf(e)>=0}));0===p.length&&(p=f);var d=p.reduce((function(t,n){return t[n]=xe(e,{placement:n,boundary:a,rootBoundary:o,padding:i})[ae(n)],t}),{});return Object.keys(d).sort((function(e,t){return d[e]-d[t]}))}(t,{placement:n,boundary:c,rootBoundary:f,padding:u,flipVariations:h,allowedAutoPlacements:v}):n)}),[]),w=t.rects.reference,x=t.rects.popper,k=new Map,D=!0,C=y[0],E=0;E=0,P=M?"width":"height",T=xe(t,{placement:S,boundary:c,rootBoundary:f,altBoundary:p,padding:u}),N=M?O?I:Y:O?F:L;w[P]>x[P]&&(N=pe(N));var A=pe(N),Z=[];if(o&&Z.push(T[_]<=0),s&&Z.push(T[N]<=0,T[A]<=0),Z.every((function(e){return e}))){C=S,D=!1;break}k.set(S,Z)}if(D)for(var R=function(e){var t=y.find((function(t){var n=k.get(t);if(n)return n.slice(0,e).every((function(e){return e}))}));if(t)return C=t,"break"},j=h?3:1;j>0;j--){if("break"===R(j))break}t.placement!==C&&(t.modifiersData[r]._skip=!0,t.placement=C,t.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}},De,Ce,{name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(e){var t=e.state,n=e.name,r=t.rects.reference,a=t.rects.popper,o=t.modifiersData.preventOverflow,i=xe(t,{elementContext:"reference"}),s=xe(t,{altBoundary:!0}),l=Ee(i,r),u=Ee(s,a,o),c=Se(l),f=Se(u);t.modifiersData[n]={referenceClippingOffsets:l,popperEscapeOffsets:u,isReferenceHidden:c,hasPopperEscaped:f},t.attributes.popper=Object.assign({},t.attributes.popper,{"data-popper-reference-hidden":c,"data-popper-escaped":f})}}]}),Oe=n(69590),Me=n.n(Oe),Pe=[],Te=function(e,t,n){void 0===n&&(n={});var a=r.useRef(null),o={onFirstUpdate:n.onFirstUpdate,placement:n.placement||"bottom",strategy:n.strategy||"absolute",modifiers:n.modifiers||Pe},i=r.useState({styles:{popper:{position:o.strategy,left:"0",top:"0"},arrow:{position:"absolute"}},attributes:{}}),s=i[0],l=i[1],u=r.useMemo((function(){return{name:"updateState",enabled:!0,phase:"write",fn:function(e){var t=e.state,n=Object.keys(t.elements);p.flushSync((function(){l({styles:c(n.map((function(e){return[e,t.styles[e]||{}]}))),attributes:c(n.map((function(e){return[e,t.attributes[e]]})))})}))},requires:["computeStyles"]}}),[]),d=r.useMemo((function(){var e={onFirstUpdate:o.onFirstUpdate,placement:o.placement,strategy:o.strategy,modifiers:[].concat(o.modifiers,[u,{name:"applyStyles",enabled:!1}])};return Me()(a.current,e)?a.current||e:(a.current=e,e)}),[o.onFirstUpdate,o.placement,o.strategy,o.modifiers,u]),h=r.useRef();return f((function(){h.current&&h.current.setOptions(d)}),[d]),f((function(){if(null!=e&&null!=t){var r=(n.createPopper||_e)(e,t,d);return h.current=r,function(){r.destroy(),h.current=null}}}),[e,t,n.createPopper]),{state:h.current?h.current.state:null,styles:s.styles,attributes:s.attributes,update:h.current?h.current.update:null,forceUpdate:h.current?h.current.forceUpdate:null}},Ne=function(){},Ae=function(){return Promise.resolve(null)},Ze=[];function Re(e){var t=e.placement,n=void 0===t?"bottom":t,o=e.strategy,i=void 0===o?"absolute":o,l=e.modifiers,c=void 0===l?Ze:l,f=e.referenceElement,p=e.onFirstUpdate,d=e.innerRef,h=e.children,v=r.useContext(a),m=r.useState(null),g=m[0],b=m[1],y=r.useState(null),w=y[0],x=y[1];r.useEffect((function(){u(d,g)}),[d,g]);var k=r.useMemo((function(){return{placement:n,strategy:i,onFirstUpdate:p,modifiers:[].concat(c,[{name:"arrow",enabled:null!=w,options:{element:w}}])}}),[n,i,p,c,w]),D=Te(f||v,g,k),C=D.state,E=D.styles,S=D.forceUpdate,_=D.update,O=r.useMemo((function(){return{ref:b,style:E.popper,placement:C?C.placement:n,hasPopperEscaped:C&&C.modifiersData.hide?C.modifiersData.hide.hasPopperEscaped:null,isReferenceHidden:C&&C.modifiersData.hide?C.modifiersData.hide.isReferenceHidden:null,arrowProps:{style:E.arrow,ref:x},forceUpdate:S||Ne,update:_||Ae}}),[b,x,n,C,E,_,S]);return s(h)(O)}var je=n(42473),Le=n.n(je);function Fe(e){var t=e.children,n=e.innerRef,a=r.useContext(o),i=r.useCallback((function(e){u(n,e),l(a,e)}),[n,a]);return r.useEffect((function(){return function(){return u(n,null)}}),[]),r.useEffect((function(){Le()(Boolean(a),"`Reference` should not be used outside of a `Manager` component.")}),[a]),s(t)({ref:i})}},88632:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["bgColor","bgD","fgD","fgColor","size","title","viewBoxSize"]);return i.default.createElement("svg",r({},f,{height:l,ref:t,viewBox:"0 0 "+c+" "+c,width:l}),u?i.default.createElement("title",null,u):null,i.default.createElement("path",{d:a,fill:n}),i.default.createElement("path",{d:o,fill:s}))}));c.displayName="QRCodeSvg",c.propTypes=l,c.defaultProps=u,t.default=c},1653:function(e,t,n){"use strict";var r=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["bgColor","fgColor","level","size","value"]),d=new a.default(-1,o.default[s]);d.addData(f),d.make();var h=d.modules;return l.default.createElement(u.default,r({},p,{bgColor:n,bgD:h.map((function(e,t){return e.map((function(e,n){return e?"":"M "+n+" "+t+" l 1 0 0 1 -1 0 Z"})).join(" ")})).join(" "),fgColor:i,fgD:h.map((function(e,t){return e.map((function(e,n){return e?"M "+n+" "+t+" l 1 0 0 1 -1 0 Z":""})).join(" ")})).join(" "),ref:t,size:c,viewBoxSize:h.length}))}));p.displayName="QRCode",p.propTypes=f,p.defaultProps={bgColor:"#FFFFFF",fgColor:"#000000",level:"L",size:256},t.ZP=p},24754:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.autoprefix=void 0;var r,a=n(2525),o=(r=a)&&r.__esModule?r:{default:r},i=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"span";return function(n){function r(){var n,o,l;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,r);for(var u=arguments.length,c=Array(u),f=0;f1&&void 0!==arguments[1]?arguments[1]:"span";return function(n){function r(){var n,o,l;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,r);for(var u=arguments.length,c=Array(u),f=0;f0&&void 0!==arguments[0]?arguments[0]:[],n=[];return(0,i.default)(t,(function(t){Array.isArray(t)?e(t).map((function(e){return n.push(e)})):(0,o.default)(t)?(0,a.default)(t,(function(e,t){!0===e&&n.push(t),n.push(t+"-"+e)})):(0,r.default)(t)&&n.push(t)})),n};t.default=l},79941:function(e,t,n){"use strict";t.tz=void 0;var r=u(n(14147)),a=u(n(18556)),o=u(n(24754)),i=u(n(91765)),s=u(n(36002)),l=u(n(57742));function u(e){return e&&e.__esModule?e:{default:e}}i.default,t.tz=i.default,s.default,l.default;var c=function(e){for(var t=arguments.length,n=Array(t>1?t-1:0),i=1;i1&&void 0!==arguments[1])||arguments[1];n[e]=t};return 0===e&&r("first-child"),e===t-1&&r("last-child"),(0===e||e%2===0)&&r("even"),1===Math.abs(e%2)&&r("odd"),r("nth-child",e),n}},18556:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.mergeClasses=void 0;var r=i(n(2525)),a=i(n(50361)),o=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:[],n=e.default&&(0,a.default)(e.default)||{};return t.map((function(t){var a=e[t];return a&&(0,r.default)(a,(function(e,t){n[t]||(n[t]={}),n[t]=o({},n[t],a[t])})),t})),n};t.default=s},42473:function(e){"use strict";var t=function(){};e.exports=t}}]); \ No newline at end of file diff --git a/web/gui/v2/6613.b8903cda67bd33100ce4.chunk.js.LICENSE.txt b/web/gui/v2/8459.add89d7bb0434b110cd3.chunk.js.LICENSE.txt similarity index 100% rename from web/gui/v2/6613.b8903cda67bd33100ce4.chunk.js.LICENSE.txt rename to web/gui/v2/8459.add89d7bb0434b110cd3.chunk.js.LICENSE.txt diff --git a/web/gui/v2/8663.c6d53b400dd7461702e6.chunk.js b/web/gui/v2/8663.c6d53b400dd7461702e6.chunk.js new file mode 100644 index 00000000000000..4a8a9d428dd3ec --- /dev/null +++ b/web/gui/v2/8663.c6d53b400dd7461702e6.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="1ed24772-ca97-4754-aca2-cd9973693fb0",e._sentryDebugIdIdentifier="sentry-dbid-1ed24772-ca97-4754-aca2-cd9973693fb0")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8663],{36925:function(e,t,n){n.d(t,{vV:function(){return r}});n(74916),n(77601);var a=/^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/,r=function(e){return!!e&&a.test(e)}},86414:function(e,t,n){n.d(t,{m:function(){return d}});var a=n(93433),r=n(29439),o=(n(26699),n(32023),n(92222),n(21249),n(57640),n(9924),n(57327),n(41539),n(88449),n(2490),n(59849),n(74916),n(15306),n(67294)),i=n(93416),l=n(36925),c=(0,n(71893).default)(i.Select).withConfig({displayName:"styled__StyledSelect",componentId:"sc-bkkrx-0"})(["width:100%;"]),s=(n(66992),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(51532),n(10072),n(23042),n(99137),n(71957),n(96306),n(103),n(74592),n(58276),n(35082),n(12813),n(18222),n(38563),n(50336),n(7512),function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"name";return e.length?"string"===typeof e[0]?(0,a.Z)(new Set(e)):(0,a.Z)(new Map(e.map((function(e){return[e[t],e]}))).values()):[]}),u=[],d=function(e){var t=e.invitations,n=e.setInvitations,d=(0,o.useState)(u),f=(0,r.Z)(d,2),m=f[0],p=f[1],v=(0,o.useState)(""),b=(0,r.Z)(v,2),g=b[0],h=b[1],y=(0,o.useState)(""),w=(0,r.Z)(y,2),E=w[0],x=w[1],k=function(){return x("")},Z=(0,o.useCallback)((function(e){h(e.toLowerCase())}),[h]),C=o.useCallback((function(e){var t=e.emails,a=void 0===t?u:t,r=e.invitations,o=void 0===r?u:r;if(e.isEmailValid){var i=s(a),l=s(o);return p(i),n(l),h(""),void k()}x("Invalid Email")}),[n]),S=(0,o.useCallback)((function(e){return(0,l.vV)(e)&&!m.includes(e)}),[m]),I=(0,o.useCallback)((function(e){C({isEmailValid:!0,emails:e.map((function(e){return e.value})),invitations:e.map((function(e){return{email:e.value,name:e.value.split("@")[0]}}))})}),[p]),O=(0,o.useCallback)((function(e){var n=e.clipboardData.getData("Text").toLowerCase().replace(/ /g,",").replace(/,,/g,",").split(",").filter((function(e){return S(e)}))||u;C({emails:[].concat((0,a.Z)(m),(0,a.Z)(n)),invitations:[].concat((0,a.Z)(t),(0,a.Z)(n.map((function(e){return{email:e,name:e.split("@")[0]}})))),isEmailValid:n.length>0}),e.preventDefault()}),[m,t,S,C]),j=(0,o.useCallback)((function(e){if(g)switch(k(),e.key){case"Enter":case"Tab":case",":case" ":C({emails:[].concat((0,a.Z)(m),[g]),invitations:[].concat((0,a.Z)(t),[{email:g,name:g.split("@")[0]}]),isEmailValid:S(g)}),e.preventDefault()}}),[m,g,t,S,C]);return o.createElement(i.Flex,{justifyContent:"space-between",column:!0,onPaste:O},o.createElement(c,{components:{DropdownIndicator:null},inputValue:g,isClearable:!0,isMulti:!0,menuIsOpen:!1,onBlur:function(){g&&C({emails:[].concat((0,a.Z)(m),[g]),invitations:[].concat((0,a.Z)(t),[{email:g,name:g.split("@")[0]}]),isEmailValid:S(g)})},onChange:I,onInputChange:Z,onKeyDown:j,onClear:function(){p(u),n(u)},placeholder:"Enter an email and hit enter",value:m.map((function(e){return{label:t=e,value:t};var t}))}),E&&o.createElement(i.Text,{color:"error"},E))}},18663:function(e,t,n){n.d(t,{l:function(){return U}});var a=n(87462),r=n(15861),o=n(29439),i=n(64687),l=n.n(i),c=(n(21249),n(57640),n(9924),n(57327),n(41539),n(88449),n(2490),n(59849),n(92222),n(67294)),s=n(93416),u=n(92432),d=n(74855),f=n(52631),m=n(90025),p=n(91008),v=n(86414),b=n(74059),g=n(95383),h=n(35838),y=n(53338),w=n(16645),E=n(7693),x=n(92903),k=n(87181),Z=function(e){var t=e.id,n=e.handleDelete,a=(0,h.D)(t,"email");return c.createElement(s.Flex,{justifyContent:"between",alignItems:"center"},c.createElement(s.Flex,{gap:4},c.createElement(s.Icon,{color:"text",name:"check"}),c.createElement(s.Text,null,a)),c.createElement(s.Button,{flavour:"borderless",icon:"trashcan",onClick:function(){return n({email:a})}}))},C=n(93017),S=n(36925),I=n(33335),O=n(8048),j=n(46475),_=n(39979),D=n(54005),R=n(63346),T=function(e){var t=e.email;return(0,S.vV)(t)},P=(0,_.Z)(s.Button),B={header:"Invitations",text:"Invitations successfully sent!"},U=function(e){var t=e.onClose,n=e.isSubmodal,i=void 0!==n&&n,S=(0,b.OS)(),_=S.id,U=S.name,F=S.slug,N=(0,g.yE)(),V=(0,c.useState)(N),L=(0,o.Z)(V,2),M=L[0],q=L[1],A=(0,c.useState)([]),Q=(0,o.Z)(A,2),Y=Q[0],G=Q[1],J=(0,c.useState)([]),z=(0,o.Z)(J,2),H=z[0],K=z[1],$=(0,c.useState)(),W=(0,o.Z)($,2),X=W[0],ee=W[1],te=(0,D.Z)(),ne=te.sendLog,ae=te.isReady,re=(0,h.E)(_),oe=(0,o.Z)(re,4),ie=oe[0],le=oe[1],ce=oe[2],se=oe[3],ue=(0,C.Z)(),de=(0,o.Z)(ue,2)[1],fe=(0,c.useCallback)((function(e){var n=e||B,a=n.header,r=n.text,o=(0,d.sc)({header:a,text:r,success:!0}),i=H.filter(T).map((function(e){return e.email})).join(",");(0,u.L)("invite","invite-sent","".concat(X,"::").concat(i,"::").concat(M.join(",")),"","","invite-sent"),f.Z.success(o,{context:"manageInvitations"}),t(),ae&&ne({isSuccess:!0},!0)}),[ne,ae]),me=function(){var e=(0,r.Z)(l().mark((function e(){var t,n,a;return l().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:t=H.filter(T),n=t.map((function(e){return{email:e.email,name:e.name,role:X,roomIDs:M}})),a="".concat(window.location.origin,"/spaces/").concat(F,"/join-space"),ce(n,a,{onSuccess:fe,onError:function(e){de(e),ne({isFailure:!0,error:e.message},!0)}});case 4:case"end":return e.stop()}}),e)})));return function(){return e.apply(this,arguments)}}(),pe=(0,I.QB)(),ve=function(e){return function(t){var n=t.email;e&&le(e),K(H.filter((function(e){return e.email!==n}))),G(Y.filter((function(e){return e.email!==n})))}},be=(0,c.useCallback)((function(){q([])}),[q]),ge="member"===X;return c.createElement(w.u6,{onClose:t,closeOnClickOutside:!1},c.createElement(R.ZP,{feature:"ManageInvitationsModal"},c.createElement(E.x,{onClose:t,isSubmodal:i,title:"Invite Users"}),c.createElement(x.B,null,"Invite users to\xa0",U),c.createElement(w.TZ,null,c.createElement(k.qQ,null,"Send invitations to your team"),c.createElement(k.ne,null,"TIP: You can send more invitations at once, separate each with a comma."),c.createElement(v.m,{invitations:H,setInvitations:K}),c.createElement("br",null),c.createElement(k.qQ,null,"Rooms"),c.createElement(s.Flex,{alignItems:"center",justifyContent:"between",margin:[1,0,2]},c.createElement(s.TextSmall,null,"Choose one or more rooms you'd like to invite users to."),!!M.length&&c.createElement(s.Button,{onClick:be,padding:[0],flavour:"borderless","data-ga":"rooms-clear",label:"Clear",small:!0},"Clear")),c.createElement(s.Box,{"data-testid":"invite-selectRoom"},c.createElement(m.Z,(0,a.Z)({selectedValue:M,onChange:q},ge?{formatOptions:function(e){return{isDisabled:e.name===y.TY}},filterValues:function(e){return e.label===y.TY}}:{}))),c.createElement("br",null),c.createElement(k.qQ,null,"Role"),c.createElement(k.ne,null,"Choose a role for invited user."," ",c.createElement(p.Z,{href:j.R,target:"_blank",rel:"noopener noreferrer",Component:s.TextSmall},"Learn more")),c.createElement(s.Box,{"data-testid":"invite-selectRole"},c.createElement(O.Z,{availableRoles:pe,dataGA:"invite-to-space",dataTestId:"invite-selectRole",onChange:function(e){ee(e.target.value)},value:X})),c.createElement(k.p7,null,c.createElement(P,{label:"Send",onClick:me,disabled:0===H.length||!X,flavour:"hollow",isLoading:se,"data-ga":"manage-invitations-modal::click-send::modal-footer"})),c.createElement(s.H5,{margin:[2,0,0]},"Invitations awaiting response"),c.createElement(s.Flex,{column:!0},ie.length>0?ie.map((function(e){return c.createElement(Z,{key:e,handleDelete:ve(e),id:e})})):c.createElement(k.xB,null,c.createElement("br",null),c.createElement(k.qQ,null,"You haven't invited any users yet."))))))}},87181:function(e,t,n){n.d(t,{ne:function(){return c},p7:function(){return i},qQ:function(){return o},xB:function(){return l}});var a=n(71893),r=n(93416),o=(0,a.default)(r.H5).withConfig({displayName:"styled__StyledH5",componentId:"sc-1kusjmi-0"})(["display:flex;align-items:center;"]),i=a.default.div.withConfig({displayName:"styled__FormRow",componentId:"sc-1kusjmi-1"})(["width:100%;display:flex;flex-flow:row no-wrap;justify-content:flex-end;margin-top:",";"],(0,r.getSizeBy)(2)),l=a.default.div.withConfig({displayName:"styled__StyledUserInvitationEmptyListItem",componentId:"sc-1kusjmi-2"})(["display:flex;flex-flow:column nowrap;align-items:center;"]),c=(0,a.default)(r.TextSmall).withConfig({displayName:"styled__StyledSecondaryText",componentId:"sc-1kusjmi-3"})(["margin:2px 0 8px;"])},46475:function(e,t,n){n.d(t,{R:function(){return a}});var a="https://learn.netdata.cloud/docs/nightly/concepts/role-based-access-model"},8048:function(e,t,n){n(2707),n(21249),n(57640),n(9924),n(26699),n(32023),n(9653),n(92222);var a=n(67294),r=n(93416),o=n(71893),i=n(64637),l=n(87917),c=n(74059),s=n(91008),u=n(79655),d=n(33335),f=n(99826),m={admin:"Users with this role can control Spaces, Rooms, Nodes, Users and Billing. They can also access any Room in the Space.",member:"Users with this role can create Rooms and invite other Members. They can only see the Rooms they belong to and all Nodes in the All Nodes room",manager:"Users with this role can manage Rooms and Users. They can access any Room in the Space.",troubleshooter:"Users with this role can use Netdata to troubleshoot, not manage entities. They can access any Room in the Space.",observer:"Users with this role can only view data in specific Rooms.",billing:"Users with this role can handle billing options and invoices."},p=(0,o.default)(r.Flex).withConfig({displayName:"rolePicker__PlanBadge",componentId:"sc-ypuqww-0"})(["pointer-events:auto;"]);t.Z=function(e){var t=e.availableRoles,n=e.dataGA,o=e.dataTestId,v=e.onChange,b=e.value,g=(0,c.OS)("plan"),h=(0,a.useMemo)((function(){return(0,l.xJ)(g).map((function(e){return{isChecked:e===b,isEnabled:t.includes(e),role:e}})).sort((function(e,t){return Number(t.isEnabled)-Number(e.isEnabled)}))}),[t,l.xJ,g,b]),y=(0,d.gI)("billing:ReadAll"),w=(0,f.Z)().url;return a.createElement(r.Flex,{column:!0,gap:2,"data-testid":"".concat(o,"-roleOptions")},h.map((function(e){var t=e.isChecked,l=e.isEnabled,c=e.role,d=l?void 0:"medium",f="troubleshooter"===c?"pro":"business";return a.createElement(r.RadioButton,{key:c,checked:t,"data-ga":"".concat(n,"::select-role-").concat(c,"::global-view"),"data-testid":"".concat(o,"-").concat(c,"Option"),disabled:!l,onChange:v,value:c,alignItems:"start"},a.createElement(r.Flex,{column:!0},a.createElement(r.Flex,{gap:2,alignItems:"center"},a.createElement(r.Text,{opacity:d},(0,i.fm)(c)),!l&&a.createElement(p,{background:"sideBarMini",border:{side:"all",color:"border"},cursor:"initial",padding:[1],round:!0},a.createElement(s.Z,{align:"bottom",as:u.rU,boxProps:{as:r.Flex},color:"text",Component:r.TextMicro,content:"Upgrade your plan in order to use this role","data-ga":"".concat(n,"::click-plan-badge-").concat(f,"::global-view"),disabled:!y,hoverColor:"textFocus",showToolTip:!0,strong:!0,to:w},"Upgrade now!"))),a.createElement(r.TextSmall,{color:"textLite",opacity:d},m[c])))})))}},35838:function(e,t,n){n.d(t,{D:function(){return D},E:function(){return _}});var a=n(15861),r=n(29439),o=n(4942),i=n(93433),l=n(64687),c=n.n(l),s=(n(66992),n(41539),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(21249),n(57640),n(9924),n(15581),n(2490),n(34514),n(54747),n(57327),n(88449),n(59849),n(47941),n(82526),n(38880),n(49337),n(33321),n(69070),n(67294)),u=n(4480),d=(0,u.xu)({key:"invitation",default:{id:"",email:""}}),f=(0,u.xu)({key:"invitationIds",default:[]}),m=n(45987),p=n(26398),v=["role"];function b(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}var g=function(e){return e.map((function(e){return{id:e.id,email:e.email}}))},h=function(e){return p.Z.get("/api/v2/spaces/".concat(e,"/invitations"),{transform:g})},y={member:1,admin:2,manager:3,troubleshooter:4,observer:5,billing:6},w=function(e){return e.map((function(e){var t=e.role,n=(0,m.Z)(e,v);if(void 0===y[t])throw new Error("role not found");return function(e){for(var t=1;t1};function S(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function I(e){for(var t=1;t0?(Z=(0,l.default)(t),x=(0,l.default)(e)):(Z=(0,l.default)(e),x=(0,l.default)(t));var I,S=(0,m.Z)(x,Z),M=((0,D.Z)(x)-(0,D.Z)(Z))/1e3,k=Math.round((S-M)/60);if(k<2)return null!==n&&void 0!==n&&n.includeSeconds?S<5?i.formatDistance("lessThanXSeconds",5,E):S<10?i.formatDistance("lessThanXSeconds",10,E):S<20?i.formatDistance("lessThanXSeconds",20,E):S<40?i.formatDistance("halfAMinute",0,E):S<60?i.formatDistance("lessThanXMinutes",1,E):i.formatDistance("xMinutes",1,E):0===k?i.formatDistance("lessThanXMinutes",1,E):i.formatDistance("xMinutes",k,E);if(k<45)return i.formatDistance("xMinutes",k,E);if(k<90)return i.formatDistance("aboutXHours",1,E);if(k30)){var r=localStorage.getItem("dismissLicenceWarning");r&&(0,i.default)((0,o.default)(r))&&!function(e){return(0,u.Z)(1,arguments),(0,l.default)(e).getTime()0||(d(!0),a<0&&w(!0))}}}}),[e]),c?h?r.createElement(S.Layer,{full:!0,backdropProps:{backdropBlur:"3px"}},r.createElement(M.Z,{testId:"onprem-banner",width:"100%",background:"errorBackground",position:"absolute",top:"0",tooltipProps:{align:"top"},zIndex:20},r.createElement(S.Flex,{justifyContent:"center",alignItems:"center",width:"100%",gap:2},r.createElement(S.Text,null,"Your Netdata Enterprise On-prem License has expired on ",(0,x.default)(p,"PPPP"),". Please contact your admin /"," ",r.createElement(k.Z,{"data-testid":"renew",href:"mailto:billing@netdata.cloud",as:"a",cursor:"pointer",textDecoration:"underline",color:"main"},"billing@netdata.cloud")," ","to renew your license.")))):r.createElement(M.Z,{testId:"onprem-banner",width:"100%",background:"warningBackground",position:"absolute",top:"0",onClose:function(){d(),localStorage.setItem("dismissLicenceWarning",(0,s.Z)(new Date,{days:1}).toISOString())},tooltipProps:{align:"top"},zIndex:20},r.createElement(S.Flex,{justifyContent:"center",alignItems:"center",width:"100%",gap:2},r.createElement(S.Text,null,"Your Netdata Enterprise On-prem License will expire in ",Z(p),". Please contact your admin /"," ",r.createElement(k.Z,{"data-testid":"renew",href:"mailto:billing@netdata.cloud",as:"a",cursor:"pointer",textDecoration:"underline",color:"main"},"billing@netdata.cloud")," ","to renew your license."))):null}}}]); \ No newline at end of file diff --git a/web/gui/v2/8977.321a7a13b267546a6e7c.chunk.js b/web/gui/v2/8977.321a7a13b267546a6e7c.chunk.js new file mode 100644 index 00000000000000..5a26dd0510e60c --- /dev/null +++ b/web/gui/v2/8977.321a7a13b267546a6e7c.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="58e3915b-2995-41ea-91ae-ea72a0b7b46d",e._sentryDebugIdIdentifier="sentry-dbid-58e3915b-2995-41ea-91ae-ea72a0b7b46d")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8977],{98977:function(e,n,f){f.r(n);f(41539),f(88674),f(17727);var t=f(67294),u=f(1043),d=f(78266),l=f(54005);n.default=function(){var e=(0,l.Z)(),n=e.sendLog,f=e.isReady,o=(0,u.T4)(),a=(0,t.useCallback)((function(){n({feature:"SignUpThankYou"}).finally((function(){o()}))}),[o,f]);return(0,t.useEffect)((function(){var e=!0,n=null;return f?a():n=setTimeout((function(){e&&a()}),1e3),function(){e=!1,n&&(clearTimeout(n),n=null)}}),[f]),t.createElement(d.Z,null)}}}]); \ No newline at end of file diff --git a/web/gui/v2/90.c0f1d633c6e943af5628.chunk.js b/web/gui/v2/90.c0f1d633c6e943af5628.chunk.js new file mode 100644 index 00000000000000..a972a266f385b3 --- /dev/null +++ b/web/gui/v2/90.c0f1d633c6e943af5628.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="23da8a4f-59e8-47e8-801f-ce9233a00cba",e._sentryDebugIdIdentifier="sentry-dbid-23da8a4f-59e8-47e8-801f-ce9233a00cba")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[90],{16978:function(e,t,n){var a=n(87462),r=n(45987),o=n(67294),c=n(54576),l=n(70486),i=n(86954),s=n(20581),u=n(16374),d=n(1981),m=n(28138),g=n(18892),f=n(63076),p=n(65855),h=["width","height"],v=(0,o.forwardRef)((function(e,t){var n=e.width,c=e.height,l=(0,r.Z)(e,h);return o.createElement(i.ChartWrapper,{width:n,height:c},o.createElement(p.x1,(0,a.Z)({hasHeader:!1,hasFilters:!1,hasFooter:!1,width:n,height:c},l,{ref:t})))})),E=(0,l.default)(v,{tile:!0}),y={dygraph:p.ZP,easypiechart:m.ZP,gauge:u.ZP,number:g.ZP,groupBoxes:s.ZP,d3pie:d.default,bars:f.ZP},w=function(e){var t=(0,c.useChart)(),n=(0,o.useMemo)((function(){if(!t)return null;var e=t.getAttributes(),n=e.chartLibrary;return e.sparkline?E:y[n]}),[t]);return n?o.createElement(n,(0,a.Z)({},e,{chart:t})):null};t.Z=(0,c.withChartProvider)((0,o.memo)(w))},82210:function(e,t,n){var a=n(67294),r=n(71893),o=n(93416),c=n(5710),l=n(37503),i=(0,r.default)(o.Flex).withConfig({displayName:"restrictedContentMessage__StyledBox",componentId:"sc-wbsw81-0"})(["transform:translate(-50%,-50%);"]),s={node:"This node is locked so you can't see the Single Node dashboard.",alert:"This node is locked so you can't see the full alert details. ",dashboard:"This dashboard is locked so you can't see it."};t.Z=function(e){var t=e.flavour,n=void 0===t?"node":t;return a.createElement(i,{column:!0,width:"100%",gap:2,alignItems:"center",position:"absolute",top:"50%",left:"50%",padding:[4],round:2},a.createElement(o.TextBigger,null,s[n]||""),a.createElement(l.Z,null,a.createElement(o.TextBig,{color:"primary"},"Upgrade for no limitations!")),a.createElement(o.TextBig,null,"or"),a.createElement(c.Z,null,a.createElement(o.TextBig,{color:"primary"},"Change your active node selection to unlock it.")))}},54244:function(e,t,n){var a=n(67294),r=n(93416);t.Z=function(e){var t=e.flavour,n=e.icon,o=e.children;return a.createElement(r.Pill,{icon:n,textProps:{textTransform:"capitalize"},flavour:t},o)}},50709:function(e,t,n){n.d(t,{Z:function(){return d}});var a=n(29439),r=n(67294),o=n(93416),c=n(12008),l=(n(92222),n(74916),n(15306),n(26398)),i=n(91008),s=n(37518),u=n(12899),d=function(e){var t=e.alertId,n=(0,c.E5)(t),d=n.name,m=function(e){var t=(0,r.useState)(!0),n=(0,a.Z)(t,2),o=n[0],c=n[1],i="".concat("https://community.netdata.cloud/t","/").concat(null===e||void 0===e?void 0:e.replace(/[._]/g,"-"));return(0,r.useEffect)((function(){e&&l.Z.get(i).then((function(){return c(!1)})).catch((function(){return c(!0)}))}),[e]),[i,o]}(void 0===d?"unknown alert":d),g=(0,a.Z)(m,2),f=g[0],p=g[1],h=(0,s.UL)(),v=(0,u.R3)(),E=(0,a.Z)(v,2)[1],y=(0,r.useMemo)((function(){return{alignSelf:"start",onClick:function(){return E({roomId:h,alert:n})}}}),[f,p,E]);return r.createElement(i.Z,y,"Learn more about this alert",!p&&r.createElement(o.Icon,{name:"arrow_left",rotate:2,size:"small",margin:[0,0,-.75,1],color:"success"}))}},80854:function(e,t,n){var a=n(87462),r=n(45987),o=n(67294),c=n(93416),l=["iconName","iconSize","children"];t.Z=function(e){var t=e.iconName,n=e.iconSize,i=e.children,s=(0,r.Z)(e,l);return o.createElement(c.Flex,(0,a.Z)({gap:2,alignItems:"center"},s),o.createElement(c.Icon,(0,a.Z)({name:t,color:"textLite"},n&&{height:n,width:n})),o.createElement(c.Text,{strong:!0},i))}},29876:function(e,t,n){n.d(t,{Z:function(){return g}});var a=n(87462),r=n(45987),o=n(67294),c=n(93416),l=n(12008),i=(n(92222),n(89405)),s=n(50358),u=n(39840),d=function(e){var t=e.label,n=e.testid,a=e.status,r=e.value,l=e.when,d=e.units,m=(0,i.rA)(),g=m.localeTimeString,f=m.localeDateString,p=(0,o.useMemo)((function(){var e=new Date(1e3*l);return e&&"".concat(f(e,{long:!1})," ").concat(g(e,{secs:!0}))}),[l,f,g]),h=(0,u.pK)(r,d);return o.createElement(c.Flex,{gap:2,alignItems:"center"},o.createElement(c.Flex,{width:"100px"},o.createElement(c.TextSmall,null,t)),o.createElement(c.Flex,{gap:2,alignItems:"center"},o.createElement(c.TextSmall,{color:"text","data-testid":"".concat(n,"-dateTime")},p),o.createElement(s.Z,{loaded:!0,status:a,valueWithUnit:h,"data-testid":"".concat(n,"-value")})))},m=["alertId"],g=function(e){var t=e.alertId,n=(0,r.Z)(e,m),i=(0,l.E5)(t),s=i.fullyLoaded,u=void 0!==s&&s,g=i.units,f=i.lastStatusChangeValue,p=i.lastStatusChange,h=i.status,v=i.lastUpdated,E=i.value;i.prevStatus,i.prevValue,i.prevDuration;return u?o.createElement(c.Flex,(0,a.Z)({gap:2,column:!0,"data-testid":"alertValues"},n),v&&o.createElement(d,{label:"Latest",status:h,testid:"alertValues-latest",value:E,when:v,units:g}),p&&o.createElement(d,{label:"Triggered",status:h,testid:"alertValues-triggered",value:f,when:p,units:g})):null}},92253:function(e,t,n){var a=n(4942),r=(n(92222),n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(67294)),o=n(16978),c=n(50308),l=n.n(c),i=n(93416),s=n(91128),u=n(78266),d=n(74059),m=n(37518),g=n(9058),f=n(22648),p=n(39840);function h(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function v(e){for(var t=1;t1?"of the sum of dimensions":"of all values of dimension";l="".concat(s," ").concat(i.join(", "))}var u=t?", with options ".concat(t):"";return"".concat(a," ").concat(l," of chart ").concat(c,", starting ").concat(p(r+o)," and up to ").concat(p(o)).concat(u)}({lookupOptions:s,lookupDimensions:l,lookupMethod:i,lookupAfter:r,lookupBefore:c,instance:n}):""}),[r,n])},v=n(80854),E=function(e){var t=e.heading,n=e.value,o=e.testId;return n&&n.length?a.createElement(r.Flex,{column:!0,gap:3},t&&a.createElement(r.H6,{textTransform:"uppercase",color:"textDescription"},t),a.createElement(c.ZP,{"data-testid":o},n)):null},y=(0,a.memo)(E),w=n(25935),x=n(87462),b=function(e){var t=e.iconName,n=e.category,o=e.iconSize,c=void 0===o?"16px":o,l=e.value,i=e.testid;return l?a.createElement(r.Flex,{alignItems:"center",gap:1},a.createElement(r.Flex,{alignItems:"center",gap:1,basis:"100px"},a.createElement(r.Icon,(0,x.Z)({name:t,color:"textLite"},c&&{height:c,width:c})),a.createElement(r.TextSmall,{whiteSpace:"nowrap",color:"textDescription"},n,":")),a.createElement(r.TextSmall,{strong:!0,"data-testid":i},l)):null},S=function(e){var t=e.id,n=e.testid,c=(0,o.E5)(t),l=c.calculation,s=c.warning,u=c.critical,m=c.updateEvery,g=h(t),f=(0,a.useMemo)((function(){return(0,w.ZP)(l)}),[l]);return a.createElement(r.Flex,{column:!0,gap:4},a.createElement(v.Z,{iconName:"code"},"Configuration"),a.createElement(y,{heading:"DB lookup",value:g,testId:"".concat(n,"-lookup")}),a.createElement(y,{heading:"Calculation",value:f,testId:"".concat(n,"-calculation")}),a.createElement(r.H6,{textTransform:"uppercase",color:"textDescription"},"Severity Conditions"),a.createElement(r.Flex,{column:!0,gap:2},a.createElement(d,{title:"Warning when",value:s,testId:"".concat(n,"-warningWhen"),status:"warning"},a.createElement(i,{critical:!1})),a.createElement(d,{title:"Critical when",value:u,testId:"".concat(n,"-criticalWhen"),status:"critical"},a.createElement(i,{critical:!0}))),a.createElement(r.H6,{textTransform:"uppercase",color:"textDescription"},"Check"),a.createElement(b,{iconName:"clock_hollow",category:"Check every",value:"".concat(m," seconds"),testid:"".concat(n,"-checkEvery")}))},k=(0,a.memo)(S),Z=n(97945),I=n(80239),C=n(45987),T=n(54244),N=n(63860),F=n(50358),_=["isHighlighted","lastStatusChangeValue","lastStatusChange","units","status","nodeName"],D=function(e){var t=e.isHighlighted,n=e.lastStatusChangeValue,o=e.lastStatusChange,c=e.units,l=e.status,i=e.nodeName,s=(0,C.Z)(e,_);return a.createElement(r.Flex,(0,x.Z)({background:t&&"modalTabsBackground",border:{side:"all",color:"borderSecondary"},color:"textDescription",justifyContent:"between",padding:[4],round:!0},s),a.createElement(r.Flex,{column:!0,justifyContent:"between",gap:2},i&&a.createElement(r.Text,{strong:!0},i),a.createElement(N.Z,{rawTime:o,secs:!0,"data-testid":"nodeItem-lastStatusChange"})),a.createElement(r.Flex,{height:5,gap:2,alignItems:"center",alignSelf:"start"},a.createElement(u.Z,{content:"Triggered value",align:"bottom"},a.createElement(r.Box,null,a.createElement(F.Z,{loaded:!0,status:l,units:c,value:n,"data-testid":"nodeItem-alertValue"}))),a.createElement(T.Z,{flavour:l,icon:"alarm_bell","data-testid":"alertView-alertPill-value"},l)))},P=function(e){var t=e.children;return a.createElement(r.Flex,{gap:2,column:!0},t)},O=function(e){var t=e.id,n=e.nodeName,c=e.testid,l=void 0===c?"alertDetailsModal":c,i=(0,o.E5)(t),u=i.class,d=i.instanceName,m=i.component,g=i.family,f=i.nodeId,p=i.type,h=i.lastStatusChangeValue,E=i.lastStatusChange,y=i.units,w=i.status,x=i.notificationType,S=(0,Z.iy)(f,"name");return a.createElement(r.Flex,{column:!0,gap:4},a.createElement(v.Z,{iconName:"information",margin:[0,0,0,-.5]},"Alert Info"),a.createElement(P,null,a.createElement(b,{iconName:"nodes_hollow",category:"Node",value:n||S,testid:"".concat(l,"-hostname")}),a.createElement(b,{iconName:"charts_view",category:"Instance",value:d,testid:"".concat(l,"-chartId")}),a.createElement(b,{iconName:"data_retention",category:"Type",value:p,testid:"".concat(l,"-type")}),a.createElement(b,{iconName:"last_week",category:"Hostname"}),a.createElement(b,{iconName:"metrics",category:"Component",value:m,testid:"".concat(l,"-component")}),a.createElement(b,{iconName:"applications_hollow",category:"Family",value:g,testid:"".concat(l,"-family")}),a.createElement(b,{iconName:"networkingStack",category:"Class",value:u,testid:"".concat(l,"-class")}),a.createElement(b,{iconName:"incident_manager",category:"Event ID",testid:"".concat(l,"-eventId")})),a.createElement(k,{id:t,testid:l}),a.createElement(v.Z,{iconName:"nodes_hollow",iconSize:"20px"},"Instance Values - Node Instances"),a.createElement(r.Flex,{column:!0,gap:2},"agent"!==x&&a.createElement(r.Text,{color:"textDescription"},"A node may be claimed through multiple instances on the cloud (streaming through parent) and Netdata aggregates the alerts from all the instances and displays a single Active alert based on the highest severity, latest change."),a.createElement(D,{lastStatusChangeValue:h,lastStatusChange:E,units:y,status:w,nodeName:S,isHighlighted:!0})),a.createElement(v.Z,{iconName:"gear",iconSize:"20px"},"Edit Alert"),a.createElement(r.Text,{"data-testid":"".concat(l,"-edit-info")},"If you wish to edit and configure this alert, please:"),a.createElement(s,{href:I.UV,target:"_blank",padding:"0 4px 0 0","data-testid":"".concat(l,"-editAlertConfig-link"),"data-ga":"alert-modal::click-edit::alerts-view"},"Visit the documentation",a.createElement(r.Icon,{name:"arrow_left",rotate:2,size:"small",margin:[0,0,-.75,1],color:"success"})))}},50358:function(e,t,n){var a=n(87462),r=n(45987),o=(n(92222),n(67294)),c=n(93416),l=n(45771),i=["loaded","status","units","value","valueWithUnit"],s={critical:{background:"errorSemi",border:{side:"all",color:"error"},color:"error"},warning:{background:"warningSemi",border:{side:"all",color:"warning"},color:"warning"},cleared:{background:"successSemi",border:{side:"all",color:"success"},color:"success"},default:{background:"generic",border:{side:"all",color:"border"},color:"text"}};t.Z=function(e){var t=e.loaded,n=e.status,u=e.units,d=e.value,m=e.valueWithUnit,g=(0,r.Z)(e,i),f=s[n]||s.default,p=f.background,h=f.border,v=f.color,E=(0,o.useMemo)((function(){if(m)return m;if(void 0===d)return"-";var e=(0,l.J)(d);return"".concat(e," ").concat(u||"-")}),[d,m,u]);return o.createElement(c.Flex,(0,a.Z)({background:p,border:h,padding:[.5,2],round:9,width:{min:10},justifyContent:"center"},g),o.createElement(c.TextSmall,{color:v,strong:!0,whiteSpace:"nowrap",truncate:!0},t?E:"-"))}},12899:function(e,t,n){n.d(t,{R3:function(){return d},Nu:function(){return m},UD:function(){return u}});var a=n(4480),r=n(26398),o=n(74059),c=n(37518),l=(0,a.cn)({key:"assistantAlert",default:null}),i=(0,a.cn)({key:"assistantAlertsAtom",default:[]}),s=(0,a.CG)({key:"assistantSelector",get:function(e){var t=e.space,n=e.room,a=e.alarm,o=e.node,c=e.chart;return function(){return function(e){var t=e.space,n=e.room,a=e.alarm,o=e.node,c=e.chart;return r.Z.post("/api/v1/troubleshoot",{space:t,room:n,alarm:a,node:o,chart:c})}({space:t,room:n,alarm:a,node:o||"dummy-node-id",chart:c||"dummy-chart-id"})}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),u=function(e){var t,n=e.alarm,r=e.node,l=e.chart,i=(0,o.th)(),u=(0,c.UL)(),d=(0,a.$P)(s({space:i,room:u,alarm:n,node:r,chart:l}));return{loaded:"loading"!==d.state,value:null===(t=d.contents)||void 0===t?void 0:t.data,hasError:"hasError"===d.state}},d=function(){return(0,a.FV)(l)},m=function(){return(0,a.FV)(i)}}}]); \ No newline at end of file diff --git a/web/gui/v2/9020.618bce1feb9efd7ead50.chunk.js b/web/gui/v2/9020.618bce1feb9efd7ead50.chunk.js new file mode 100644 index 00000000000000..cb41e0bb1401f7 --- /dev/null +++ b/web/gui/v2/9020.618bce1feb9efd7ead50.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="6318f37b-c085-40d8-9cf9-ff420fe82250",e._sentryDebugIdIdentifier="sentry-dbid-6318f37b-c085-40d8-9cf9-ff420fe82250")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9020],{9020:function(e,t,n){n.r(t),n.d(t,{default:function(){return de}});n(21249),n(57640),n(9924);var a=n(67294),l=n(93416),o=n(91008),r=n(25185),s=n(87462),i=n(29439),c=n(45987),u=(n(69720),n(50308)),d=n.n(u),m=n(82351),f=n(35539),p=["scope","setScope","isDisabled"],b=function(e){var t=e.scope,n=e.setScope,o=void 0===n?d():n,r=e.isDisabled,u=(0,c.Z)(e,p);return a.createElement(l.Flex,(0,s.Z)({gap:3},u),Object.entries(f.k0).map((function(e){var n=(0,i.Z)(e,2),s=n[0],c=n[1],u=c.text,d=c.tooltip;return a.createElement(l.RadioButton,{key:s,checked:s===t,onChange:function(){return o(s)},disabled:r},a.createElement(m.Z,{content:d,align:"bottom"},a.createElement(l.TextSmall,{color:"textDescription"},u)))})))},g=n(36412),h=(n(38862),n(46667)),v=n(4942),E=function(e){var t=e.selectedKey,n=e.selectedValue,o=e.onAddHostLabel,r=void 0===o?d():o,s=e.onRemoveHostLabel,c=void 0===s?d():s,u=e.isDefault,f=void 0!==u&&u,p=e.showPlaceholder,b=void 0===p?d():p,h=e.isDisabled,E=(0,a.useState)(t||""),C=(0,i.Z)(E,2),D=C[0],x=C[1],y=(0,a.useState)(n||""),Z=(0,i.Z)(y,2),S=Z[0],O=Z[1],k=function(){D&&S&&(r((0,v.Z)({},D,S)),b())};return a.createElement(l.Flex,{gap:2},a.createElement(g.Z,{component:"input",onChange:x,onBlur:k,placeholder:"Host key",value:D,disabled:h||!f}),a.createElement(g.Z,{component:"input",onChange:O,onBlur:k,placeholder:"Host value",value:S,disabled:h||!f||!D}),f?a.createElement(m.Z,{content:"Save label pair",align:"bottom"},a.createElement(l.Button,{flavour:"borderless",disabled:!D||!S},a.createElement(l.Icon,{name:"check",color:"primary",size:"small"}))):a.createElement(m.Z,{content:"Remove label pair",align:"bottom"},a.createElement(l.Button,{flavour:"borderless",onClick:function(){return c(t)}},a.createElement(l.Icon,{name:"x",color:"primary",size:"small"}))))},C=function(e){var t=e.hostLabels,n=e.onAddHostLabel,l=e.onRemoveHostLabel;return Object.entries(t).map((function(e){var t=(0,i.Z)(e,2),o=t[0],r=t[1];return a.createElement(E,{key:o,onAddHostLabel:n,selectedKey:o,selectedValue:r,onRemoveHostLabel:l})}))},D=function(e){var t=e.hostLabels,n=e.onAddHostLabel,o=void 0===n?d():n,r=e.onRemoveHostLabel,s=void 0===r?d():r,c=e.isEdit,u=e.isDisabled,m=!!Object.entries(t||{}).length,f=(0,h.Z)(!1),p=(0,i.Z)(f,3),b=p[0],g=p[2];return a.createElement(l.Flex,{column:!0,gap:1},a.createElement(l.TextSmall,{color:"textLite"},"Host labels"),m&&a.createElement(C,{hostLabels:t,onAddHostLabel:o,onRemoveHostLabel:s}),(!m||b)&&a.createElement(E,{key:JSON.stringify(t),onAddHostLabel:o,isDefault:!0,showPlaceholder:g,isDisabled:u}),c&&!b&&m&&a.createElement(l.Flex,{justifyContent:"end"},a.createElement(l.Button,{flavour:"borderless",onClick:g},"Add host label")))},x=n(95665);function y(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),a=e.getDate(),l=new Date(0);return l.setFullYear(t,n,a+1),l.setHours(0,0,0,0),l}var Z=n(69119),S=n(29422),O=n(4822),k=Object.entries(f.lI).map((function(e){var t=(0,i.Z)(e,2),n=t[0];return{label:t[1],value:n}})),w=y(),L=(0,Z.default)((0,S.Z)(w,{years:1})),A=function(e){var t=e.duration,n=e.setDuration,o=e.setDate,r=e.endDate,s=e.isDisabled,i=(0,O.by)("offset");return a.createElement(l.Flex,{gap:2},a.createElement(g.Z,{component:"select",title:"Duration",onChange:n,options:k,placeholder:"Select duration",value:t,isDisabled:s}),"custom"===t.value&&a.createElement(l.Flex,{alignSelf:"end"},a.createElement(x.Z,{isSinglePicker:!0,values:{singleDate:r},minDate:w,maxDate:L,utc:i,onChange:o,isPlaying:!1,accessorProps:f.mD,padding:[4,0],width:"auto",accessorTooltipContent:"Select end date"})))};function R(){return(0,Z.default)(Date.now())}var H=R(),I=y(),j=function(e){var t=e.start,n=e.end,o=e.onChange,r=e.isDisabled,s=e.isEdit,c=(0,a.useState)(s?"schedule":f.JG),u=(0,i.Z)(c,2),d=u[0],p=u[1],b=(0,a.useState)(f.yu),g=(0,i.Z)(b,2),h=g[0],v=g[1],E=(0,a.useState)(0),C=(0,i.Z)(E,2),D=C[0],k=C[1],w=(0,O.by)("offset"),L=(0,a.useState)(t?new Date(t):s?null:H),j=(0,i.Z)(L,2),P=j[0],F=j[1],_=(0,a.useState)(n?new Date(n):s?null:I),N=(0,i.Z)(_,2),U=N[0],B=N[1],M=(0,a.useState)((0,Z.default)((0,S.Z)(P,{days:1}))),T=(0,i.Z)(M,2),z=T[0],K=T[1];(0,a.useEffect)((function(){var e=(0,S.Z)(P,{days:1});K(e),!s&&P>=U&&B(e)}),[P]),(0,a.useEffect)((function(){o({start:P,end:U,scheduleOption:d,duration:h})}),[P,U,d,h]);var V=y(),Q=(0,Z.default)((0,S.Z)(V,{years:1}));return a.createElement(l.Flex,{column:!0,gap:3},!s&&a.createElement(l.Flex,{gap:3},Object.entries(f.Zg).map((function(e){var t=(0,i.Z)(e,2),n=t[0],o=t[1],s=o.text,c=o.tooltip;return a.createElement(l.RadioButton,{key:n,checked:n===d,onChange:function(){return p(n)},disabled:r},a.createElement(m.Z,{content:c,align:"bottom"},a.createElement(l.TextSmall,{color:"textDescription"},s)))}))),"now"===d?a.createElement(A,{duration:h,setDuration:v,setDate:function(e){F(Date.now()),B(e)},endDate:U,isDisabled:r}):a.createElement(l.Flex,{gap:2},a.createElement(l.Flex,{column:!0,gap:2,flex:{grow:1,shrink:1},basis:0},a.createElement(l.TextSmall,null,"Start date"),a.createElement(x.Z,{isSinglePicker:!0,values:{singleDate:new Date(P)},minDate:R(),maxDate:Q,utc:w,onChange:function(e){F(e),k((function(e){return e+1}))},isPlaying:!1,accessorProps:f.mD,padding:[4,0],width:"auto",accessorTooltipContent:"Select start date"})),a.createElement(l.Flex,{column:!0,gap:2,flex:{grow:1,shrink:1},basis:0},a.createElement(l.TextSmall,null,"End date"),a.createElement(x.Z,{key:D,isSinglePicker:!0,values:{singleDate:U?new Date(U):U},minDate:z,maxDate:Q,utc:w,onChange:B,isPlaying:!1,accessorProps:f.mD,padding:[4,0],width:"auto",accessorTooltipContent:"Select end date"}))))},P=function(e){var t=e.content,n=void 0===t?"Loading alerts...":t;return a.createElement(l.Flex,{height:45,alignItems:"center",justifyContent:"center"},a.createElement(l.Text,null,n))},F=n(27308),_=function(e){return a.createElement(l.Flex,(0,s.Z)({gap:2,alignItems:"center"},e),a.createElement(l.Icon,{size:"small",color:"warning",name:"warning_triangle"}),a.createElement(l.Text,null,"This feature is only available to paid plans"),a.createElement(F.Z,null))},N=(n(82526),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(57327),n(41539),n(88449),n(2490),n(59849),n(47941),n(69826),n(31672),n(59461),n(26699),n(32023),n(88674),n(17727),n(89250)),U=n(13477),B=n(33335),M=n(74059),T=n(96929),z=n(3689),K=n(93742),V=n(25403),Q=n(4480),Y=n(48450),G=(0,Q.CG)({key:"spaceAlertMetas",get:function(e){return function(){return(0,Y.uk)(e)}}}),J=n(62447),W=n(45396),$=n(93017),q=["id","name"],X=["rooms","nodes","hostLabels","startsAt","lastsUntil"],ee=["accountId","scope","rooms","nodes","hostLabels","startsAt","lastsUntil","scheduleOption","duration"];function te(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function ne(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:[],n=function(e){return e.length<=1?e:e[e.length-1].value==f.sr.value?[f.sr]:e.filter((function(e){return!!e.value}))}(e),a=n.map((function(e){return e.value||E})),l=t.length0&&n(d)?(h=o(d),y=c(e,t,d,h,y,l-1)-1):(a(y+1),e[y]=d),y++),g++;return y};e.exports=c},62617:function(e,t,r){"use strict";var n=r(46916),o=r(60614),a=r(19670),i=r(24942),c=r(71246),s=r(58173),u=r(5112),l=r(28091),f=u("asyncIterator");e.exports=function(e){var t,r=a(e),u=!0,p=s(r,f);return o(p)||(p=c(r),u=!1),void 0!==p?t=n(p,r):(t=r,u=!0),a(t),i(u?t:new l(i(t)))}},72897:function(e,t,r){"use strict";var n=r(46916),o=r(19670),a=r(24942),i=r(71246);e.exports=function(e,t){t&&"string"===typeof e||o(e);var r=i(e);return a(o(void 0!==r?n(r,e):e))}},20403:function(e){"use strict";var t=Math.log,r=Math.LOG10E;e.exports=Math.log10||function(e){return t(e)*r}},34553:function(e,t,r){"use strict";var n=r(82109),o=r(42092).findIndex,a=r(51223),i="findIndex",c=!0;i in[]&&Array(1)[i]((function(){c=!1})),n({target:"Array",proto:!0,forced:c},{findIndex:function(e){return o(this,e,arguments.length>1?arguments[1]:void 0)}}),a(i)},86535:function(e,t,r){"use strict";var n=r(82109),o=r(6790),a=r(19662),i=r(47908),c=r(26244),s=r(65417);n({target:"Array",proto:!0},{flatMap:function(e){var t,r=i(this),n=c(r);return a(e),(t=s(r,0)).length=o(t,r,r,n,0,1,e,arguments.length>1?arguments[1]:void 0),t}})},99244:function(e,t,r){"use strict";r(51223)("flatMap")},40658:function(e,t,r){"use strict";r(82109)({target:"Math",stat:!0},{log10:r(20403)})},74326:function(e,t,r){"use strict";var n=r(82109),o=r(46916),a=r(19662),i=r(19670),c=r(70111),s=r(24942),u=r(45348),l=r(76178),f=r(62617),p=r(21753),d=r(31913),h=u((function(e){var t=this,r=t.iterator,n=t.mapper;return new e((function(a,s){var u=function(e){t.done=!0,s(e)},d=function(e){p(r,u,e,u)},h=function(){try{e.resolve(i(o(t.next,r))).then((function(r){try{if(i(r).done)t.done=!0,a(l(void 0,!0));else{var o=r.value;try{var s=n(o,t.counter++),p=function(e){try{t.inner=f(e),y()}catch(r){d(r)}};c(s)?e.resolve(s).then(p,d):p(s)}catch(h){d(h)}}}catch(g){u(g)}}),u)}catch(s){u(s)}},y=function(){var r=t.inner;if(r)try{e.resolve(i(o(r.next,r.iterator))).then((function(e){try{i(e).done?(t.inner=null,h()):a(l(e.value,!1))}catch(r){d(r)}}),d)}catch(n){d(n)}else h()};y()}))}));n({target:"AsyncIterator",proto:!0,real:!0,forced:d},{flatMap:function(e){return i(this),a(e),new h(s(this),{mapper:e,inner:null})}})},82499:function(e,t,r){"use strict";var n=r(82109),o=r(46916),a=r(19662),i=r(19670),c=r(24942),s=r(72897),u=r(54956),l=r(99212),f=r(31913),p=u((function(){for(var e,t,r=this.iterator,n=this.mapper;;){if(t=this.inner)try{if(!(e=i(o(t.next,t.iterator))).done)return e.value;this.inner=null}catch(a){l(r,"throw",a)}if(e=i(o(this.next,r)),this.done=!!e.done)return;try{this.inner=s(n(e.value,this.counter++),!1)}catch(a){l(r,"throw",a)}}}));n({target:"Iterator",proto:!0,real:!0,forced:f},{flatMap:function(e){return i(this),a(e),new p(c(this),{mapper:e,inner:null})}})},72307:function(e,t,r){e=r.nmd(e);var n="__lodash_hash_undefined__",o=1,a=2,i=9007199254740991,c="[object Arguments]",s="[object Array]",u="[object AsyncFunction]",l="[object Boolean]",f="[object Date]",p="[object Error]",d="[object Function]",h="[object GeneratorFunction]",y="[object Map]",g="[object Number]",v="[object Null]",m="[object Object]",b="[object Promise]",w="[object Proxy]",O="[object RegExp]",D="[object Set]",S="[object String]",P="[object Symbol]",x="[object Undefined]",j="[object WeakMap]",M="[object ArrayBuffer]",z="[object DataView]",C=/^\[object .+?Constructor\]$/,_=/^(?:0|[1-9]\d*)$/,H={};H["[object Float32Array]"]=H["[object Float64Array]"]=H["[object Int8Array]"]=H["[object Int16Array]"]=H["[object Int32Array]"]=H["[object Uint8Array]"]=H["[object Uint8ClampedArray]"]=H["[object Uint16Array]"]=H["[object Uint32Array]"]=!0,H[c]=H[s]=H[M]=H[l]=H[z]=H[f]=H[p]=H[d]=H[y]=H[g]=H[m]=H[O]=H[D]=H[S]=H[j]=!1;var R="object"==typeof r.g&&r.g&&r.g.Object===Object&&r.g,k="object"==typeof self&&self&&self.Object===Object&&self,E=R||k||Function("return this")(),N=t&&!t.nodeType&&t,T=N&&e&&!e.nodeType&&e,L=T&&T.exports===N,A=L&&R.process,V=function(){try{return A&&A.binding&&A.binding("util")}catch(e){}}(),W=V&&V.isTypedArray;function F(e,t){for(var r=-1,n=null==e?0:e.length;++ru))return!1;var f=c.get(e);if(f&&c.get(t))return f==t;var p=-1,d=!0,h=r&a?new je:void 0;for(c.set(e,t),c.set(t,e);++p-1},Pe.prototype.set=function(e,t){var r=this.__data__,n=Ce(r,e);return n<0?(++this.size,r.push([e,t])):r[n][1]=t,this},xe.prototype.clear=function(){this.size=0,this.__data__={hash:new Se,map:new(fe||Pe),string:new Se}},xe.prototype.delete=function(e){var t=Le(this,e).delete(e);return this.size-=t?1:0,t},xe.prototype.get=function(e){return Le(this,e).get(e)},xe.prototype.has=function(e){return Le(this,e).has(e)},xe.prototype.set=function(e,t){var r=Le(this,e),n=r.size;return r.set(e,t),this.size+=r.size==n?0:1,this},je.prototype.add=je.prototype.push=function(e){return this.__data__.set(e,n),this},je.prototype.has=function(e){return this.__data__.has(e)},Me.prototype.clear=function(){this.__data__=new Pe,this.size=0},Me.prototype.delete=function(e){var t=this.__data__,r=t.delete(e);return this.size=t.size,r},Me.prototype.get=function(e){return this.__data__.get(e)},Me.prototype.has=function(e){return this.__data__.has(e)},Me.prototype.set=function(e,t){var r=this.__data__;if(r instanceof Pe){var n=r.__data__;if(!fe||n.length<199)return n.push([e,t]),this.size=++r.size,this;r=this.__data__=new xe(n)}return r.set(e,t),this.size=r.size,this};var Ve=ce?function(e){return null==e?[]:(e=Object(e),function(e,t){for(var r=-1,n=null==e?0:e.length,o=0,a=[];++r-1&&e%1==0&&e-1&&e%1==0&&e<=i}function $e(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}function Ke(e){return null!=e&&"object"==typeof e}var Ze=W?function(e){return function(t){return e(t)}}(W):function(e){return Ke(e)&&qe(e.length)&&!!H[_e(e)]};function Je(e){return null!=(t=e)&&qe(t.length)&&!Ge(t)?ze(e):Ee(e);var t}e.exports=function(e,t){return Re(e,t)}},41609:function(e,t,r){var n=r(280),o=r(64160),a=r(35694),i=r(1469),c=r(98612),s=r(44144),u=r(25726),l=r(36719),f=Object.prototype.hasOwnProperty;e.exports=function(e){if(null==e)return!0;if(c(e)&&(i(e)||"string"==typeof e||"function"==typeof e.splice||s(e)||l(e)||a(e)))return!e.length;var t=o(e);if("[object Map]"==t||"[object Set]"==t)return!e.size;if(u(e))return!n(e).length;for(var r in e)if(f.call(e,r))return!1;return!0}},48564:function(){},79424:function(){},75966:function(e,t,r){"use strict";function n(e){return n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=p(r(67294)),a=p(r(45697)),i=r(10434),c=r(1706),s=r(67493),u=r(7373),l=r(92886),f=p(r(86010));function p(e){return e&&e.__esModule?e:{default:e}}function d(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function h(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:this.props;return{cols:e.cols,containerPadding:e.containerPadding,containerWidth:e.containerWidth,margin:e.margin,maxRows:e.maxRows,rowHeight:e.rowHeight}}},{key:"createStyle",value:function(e){var t,r=this.props,n=r.usePercentages,o=r.containerWidth;return r.useCSSTransforms?t=(0,s.setTransform)(e):(t=(0,s.setTopLeft)(e),n&&(t.left=(0,s.perc)(e.left/o),t.width=(0,s.perc)(e.width/o))),t}},{key:"mixinDraggable",value:function(e,t){return o.default.createElement(i.DraggableCore,{disabled:!t,onStart:this.onDragStart,onDrag:this.onDrag,onStop:this.onDragStop,handle:this.props.handle,cancel:".react-resizable-handle"+(this.props.cancel?","+this.props.cancel:""),scale:this.props.transformScale,nodeRef:this.elementRef},e)}},{key:"mixinResizable",value:function(e,t,r){var n=this.props,a=n.cols,i=n.x,s=n.minW,l=n.minH,f=n.maxW,p=n.maxH,d=n.transformScale,h=n.resizeHandles,y=n.resizeHandle,g=this.getPositionParams(),v=(0,u.calcGridItemPosition)(g,0,0,a-i,0).width,m=(0,u.calcGridItemPosition)(g,0,0,s,l),b=(0,u.calcGridItemPosition)(g,0,0,f,p),w=[m.width,m.height],O=[Math.min(b.width,v),Math.min(b.height,1/0)];return o.default.createElement(c.Resizable,{draggableOpts:{disabled:!r},className:r?void 0:"react-resizable-hide",width:t.width,height:t.height,minConstraints:w,maxConstraints:O,onResizeStop:this.onResizeStop,onResizeStart:this.onResizeStart,onResize:this.onResize,transformScale:d,resizeHandles:h,handle:y},e)}},{key:"onResizeHandler",value:function(e,t,r){var n=t.node,o=t.size,a=this.props[r];if(a){var i=this.props,c=i.cols,s=i.x,l=i.y,f=i.i,p=i.maxH,d=i.minH,h=this.props,y=h.minW,g=h.maxW,v=(0,u.calcWH)(this.getPositionParams(),o.width,o.height,s,l),m=v.w,b=v.h;y=Math.max(y,1),g=Math.min(g,c-s),m=(0,u.clamp)(m,y,g),b=(0,u.clamp)(b,d,p),this.setState({resizing:"onResizeStop"===r?null:o}),a.call(this,f,m,b,{e:e,node:n,size:o})}}},{key:"render",value:function(){var e=this.props,t=e.x,r=e.y,n=e.w,a=e.h,i=e.isDraggable,c=e.isResizable,s=e.droppingPosition,l=e.useCSSTransforms,p=(0,u.calcGridItemPosition)(this.getPositionParams(),t,r,n,a,this.state),d=o.default.Children.only(this.props.children),y=o.default.cloneElement(d,{ref:this.elementRef,className:(0,f.default)("react-grid-item",d.props.className,this.props.className,{static:this.props.static,resizing:Boolean(this.state.resizing),"react-draggable":i,"react-draggable-dragging":Boolean(this.state.dragging),dropping:Boolean(s),cssTransforms:l}),style:h(h(h({},this.props.style),d.props.style),this.createStyle(p))});return y=this.mixinResizable(y,p,c),y=this.mixinDraggable(y,i)}}],r&&y(t.prototype,r),n&&y(t,n),Object.defineProperty(t,"prototype",{writable:!1}),l}(o.default.Component);t.default=O,w(O,"propTypes",{children:a.default.element,cols:a.default.number.isRequired,containerWidth:a.default.number.isRequired,rowHeight:a.default.number.isRequired,margin:a.default.array.isRequired,maxRows:a.default.number.isRequired,containerPadding:a.default.array.isRequired,x:a.default.number.isRequired,y:a.default.number.isRequired,w:a.default.number.isRequired,h:a.default.number.isRequired,minW:function(e,t){var r=e[t];return"number"!==typeof r?new Error("minWidth not Number"):r>e.w||r>e.maxW?new Error("minWidth larger than item width/maxWidth"):void 0},maxW:function(e,t){var r=e[t];return"number"!==typeof r?new Error("maxWidth not Number"):re.h||r>e.maxH?new Error("minHeight larger than item height/maxHeight"):void 0},maxH:function(e,t){var r=e[t];return"number"!==typeof r?new Error("maxHeight not Number"):re.length)&&(t=e.length);for(var r=0,n=new Array(t);r0){var a=1/0,i=1/0;o.forEach((function(t){t.x>e.x&&(a=Math.min(a,t.x)),t.y>e.y&&(i=Math.min(i,t.y))})),Number.isFinite(a)&&(e.w=a-e.x),Number.isFinite(i)&&(e.h=i-e.y)}}return t||(e.w=r,e.h=n),e})),m=g(v,2),b=m[0],w=m[1];if(w){var O={w:w.w,h:w.h,x:w.x,y:w.y,static:!0,i:t};e.props.onResize(b,l,w,O,a,i),e.setState({layout:h?b:(0,c.compact)(b,(0,c.compactType)(e.props),p),activeDrag:O})}})),P(D(e),"onResizeStop",(function(t,r,n,o){var a=o.e,i=o.node,s=e.state,u=s.layout,l=s.oldResizeItem,f=e.props,p=f.cols,d=f.allowOverlap,h=(0,c.getLayoutItem)(u,t);e.props.onResizeStop(u,l,h,null,a,i);var y=d?u:(0,c.compact)(u,(0,c.compactType)(e.props),p),g=e.state.oldLayout;e.setState({activeDrag:null,layout:y,oldResizeItem:null,oldLayout:null}),e.onLayoutMaybeChanged(y,g)})),P(D(e),"onDragOver",(function(t){var r;if(t.preventDefault(),t.stopPropagation(),j&&(null===(r=t.nativeEvent.target)||void 0===r||!r.classList.contains(x)))return!1;var n=e.props,a=n.droppingItem,i=n.onDropDragOver,c=n.margin,u=n.cols,l=n.rowHeight,f=n.maxRows,p=n.width,h=n.containerPadding,g=n.transformScale,v=null===i||void 0===i?void 0:i(t);if(!1===v)return e.state.droppingDOMNode&&e.removeDroppingPlaceholder(),!1;var m=y(y({},a),v),b=e.state.layout,w=t.nativeEvent,O=w.layerX,D=w.layerY,S={left:O/g,top:D/g,e:t};if(e.state.droppingDOMNode){if(e.state.droppingPosition){var P=e.state.droppingPosition,M=P.left,z=P.top;(M!=O||z!=D)&&e.setState({droppingPosition:S})}}else{var C={cols:u,margin:c,maxRows:f,rowHeight:l,containerWidth:p,containerPadding:h||c},_=(0,s.calcXY)(C,D,O,m.w,m.h);e.setState({droppingDOMNode:o.createElement("div",{key:m.i}),droppingPosition:S,layout:[].concat(d(b),[y(y({},m),{},{x:_.x,y:_.y,static:!1,isDraggable:!0})])})}})),P(D(e),"removeDroppingPlaceholder",(function(){var t=e.props,r=t.droppingItem,n=t.cols,o=e.state.layout,a=(0,c.compact)(o.filter((function(e){return e.i!==r.i})),(0,c.compactType)(e.props),n);e.setState({layout:a,droppingDOMNode:null,activeDrag:null,droppingPosition:void 0})})),P(D(e),"onDragLeave",(function(t){t.preventDefault(),t.stopPropagation(),e.dragEnterCounter--,0===e.dragEnterCounter&&e.removeDroppingPlaceholder()})),P(D(e),"onDragEnter",(function(t){t.preventDefault(),t.stopPropagation(),e.dragEnterCounter++})),P(D(e),"onDrop",(function(t){t.preventDefault(),t.stopPropagation();var r=e.props.droppingItem,n=e.state.layout,o=n.find((function(e){return e.i===r.i}));e.dragEnterCounter=0,e.removeDroppingPlaceholder(),e.props.onDrop(n,o,t)})),e}return t=f,n=[{key:"getDerivedStateFromProps",value:function(e,t){var r;return t.activeDrag?null:((0,a.default)(e.layout,t.propsLayout)&&e.compactType===t.compactType?(0,c.childrenEqual)(e.children,t.children)||(r=t.layout):r=e.layout,r?{layout:(0,c.synchronizeLayoutWithChildren)(r,e.children,e.cols,(0,c.compactType)(e),e.allowOverlap),compactType:e.compactType,children:e.children,propsLayout:e.layout}:null)}}],(r=[{key:"componentDidMount",value:function(){this.setState({mounted:!0}),this.onLayoutMaybeChanged(this.state.layout,this.props.layout)}},{key:"shouldComponentUpdate",value:function(e,t){return this.props.children!==e.children||!(0,c.fastRGLPropsEqual)(this.props,e,a.default)||this.state.activeDrag!==t.activeDrag||this.state.mounted!==t.mounted||this.state.droppingPosition!==t.droppingPosition}},{key:"componentDidUpdate",value:function(e,t){if(!this.state.activeDrag){var r=this.state.layout,n=t.layout;this.onLayoutMaybeChanged(r,n)}}},{key:"containerHeight",value:function(){if(this.props.autoSize){var e=(0,c.bottom)(this.state.layout),t=this.props.containerPadding?this.props.containerPadding[1]:this.props.margin[1];return e*this.props.rowHeight+(e-1)*this.props.margin[1]+2*t+"px"}}},{key:"onLayoutMaybeChanged",value:function(e,t){t||(t=this.state.layout),(0,a.default)(t,e)||this.props.onLayoutChange(e)}},{key:"placeholder",value:function(){var e=this.state.activeDrag;if(!e)return null;var t=this.props,r=t.width,n=t.cols,a=t.margin,i=t.containerPadding,c=t.rowHeight,s=t.maxRows,l=t.useCSSTransforms,f=t.transformScale;return o.createElement(u.default,{w:e.w,h:e.h,x:e.x,y:e.y,i:e.i,className:"react-grid-placeholder",containerWidth:r,cols:n,margin:a,containerPadding:i||a,maxRows:s,rowHeight:c,isDraggable:!1,isResizable:!1,isBounded:!1,useCSSTransforms:l,transformScale:f},o.createElement("div",null))}},{key:"processGridItem",value:function(e,t){if(e&&e.key){var r=(0,c.getLayoutItem)(this.state.layout,String(e.key));if(!r)return null;var n=this.props,a=n.width,i=n.cols,s=n.margin,l=n.containerPadding,f=n.rowHeight,p=n.maxRows,d=n.isDraggable,h=n.isResizable,y=n.isBounded,g=n.useCSSTransforms,v=n.transformScale,m=n.draggableCancel,b=n.draggableHandle,w=n.resizeHandles,O=n.resizeHandle,D=this.state,S=D.mounted,P=D.droppingPosition,x="boolean"===typeof r.isDraggable?r.isDraggable:!r.static&&d,j="boolean"===typeof r.isResizable?r.isResizable:!r.static&&h,M=r.resizeHandles||w,z=x&&y&&!1!==r.isBounded;return o.createElement(u.default,{containerWidth:a,cols:i,margin:s,containerPadding:l||s,maxRows:p,rowHeight:f,cancel:m,handle:b,onDragStop:this.onDragStop,onDragStart:this.onDragStart,onDrag:this.onDrag,onResizeStart:this.onResizeStart,onResize:this.onResize,onResizeStop:this.onResizeStop,isDraggable:x,isResizable:j,isBounded:z,useCSSTransforms:g&&S,usePercentages:!S,transformScale:v,w:r.w,h:r.h,x:r.x,y:r.y,i:r.i,minH:r.minH,minW:r.minW,maxH:r.maxH,maxW:r.maxW,static:r.static,droppingPosition:t?P:void 0,resizeHandles:M,resizeHandle:O},e)}}},{key:"render",value:function(){var e=this,t=this.props,r=t.className,n=t.style,a=t.isDroppable,s=t.innerRef,u=(0,i.default)(x,r),l=y({height:this.containerHeight()},n);return o.createElement("div",{ref:s,className:u,style:l,onDrop:a?this.onDrop:c.noop,onDragLeave:a?this.onDragLeave:c.noop,onDragEnter:a?this.onDragEnter:c.noop,onDragOver:a?this.onDragOver:c.noop},o.Children.map(this.props.children,(function(t){return e.processGridItem(t)})),a&&this.state.droppingDOMNode&&this.processGridItem(this.state.droppingDOMNode,!0),this.placeholder())}}])&&b(t.prototype,r),n&&b(t,n),Object.defineProperty(t,"prototype",{writable:!1}),f}(o.Component);t.default=M,P(M,"displayName","ReactGridLayout"),P(M,"propTypes",l.default),P(M,"defaultProps",{autoSize:!0,cols:12,className:"",style:{},draggableHandle:"",draggableCancel:"",containerPadding:null,rowHeight:150,maxRows:1/0,layout:[],margin:[10,10],isBounded:!1,isDraggable:!0,isResizable:!0,allowOverlap:!1,isDroppable:!1,useCSSTransforms:!0,transformScale:1,verticalCompact:!0,compactType:"vertical",preventCollision:!1,droppingItem:{i:"__dropping-elem__",h:1,w:1},resizeHandles:["se"],onLayoutChange:c.noop,onDragStart:c.noop,onDrag:c.noop,onDragStop:c.noop,onResizeStart:c.noop,onResize:c.noop,onResizeStop:c.noop,onDrop:c.noop,onDropDragOver:c.noop})},92886:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.resizeHandleType=t.resizeHandleAxesType=t.default=void 0;var n=a(r(45697)),o=a(r(67294));function a(e){return e&&e.__esModule?e:{default:e}}var i=n.default.arrayOf(n.default.oneOf(["s","w","e","n","sw","nw","se","ne"]));t.resizeHandleAxesType=i;var c=n.default.oneOfType([n.default.node,n.default.func]);t.resizeHandleType=c;var s={className:n.default.string,style:n.default.object,width:n.default.number,autoSize:n.default.bool,cols:n.default.number,draggableCancel:n.default.string,draggableHandle:n.default.string,verticalCompact:function(e){e.verticalCompact,0},compactType:n.default.oneOf(["vertical","horizontal"]),layout:function(e){var t=e.layout;void 0!==t&&r(67493).validateLayout(t,"layout")},margin:n.default.arrayOf(n.default.number),containerPadding:n.default.arrayOf(n.default.number),rowHeight:n.default.number,maxRows:n.default.number,isBounded:n.default.bool,isDraggable:n.default.bool,isResizable:n.default.bool,allowOverlap:n.default.bool,preventCollision:n.default.bool,useCSSTransforms:n.default.bool,transformScale:n.default.number,isDroppable:n.default.bool,resizeHandles:i,resizeHandle:c,onLayoutChange:n.default.func,onDragStart:n.default.func,onDrag:n.default.func,onDragStop:n.default.func,onResizeStart:n.default.func,onResize:n.default.func,onResizeStop:n.default.func,onDrop:n.default.func,droppingItem:n.default.shape({i:n.default.string.isRequired,w:n.default.number.isRequired,h:n.default.number.isRequired}),children:function(e,t){var r=e[t],n={};o.default.Children.forEach(r,(function(e){if(null!=(null===e||void 0===e?void 0:e.key)){if(n[e.key])throw new Error('Duplicate child key "'+e.key+'" found! This will cause problems in ReactGridLayout.');n[e.key]=!0}}))},innerRef:n.default.any};t.default=s},65966:function(e,t,r){"use strict";function n(e){return n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n(e)}t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==n(e)&&"function"!==typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var o={},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in e)if("default"!==i&&Object.prototype.hasOwnProperty.call(e,i)){var c=a?Object.getOwnPropertyDescriptor(e,i):null;c&&(c.get||c.set)?Object.defineProperty(o,i,c):o[i]=e[i]}o.default=e,r&&r.set(e,o);return o}(r(67294)),a=f(r(45697)),i=f(r(72307)),c=r(67493),s=r(5651),u=f(r(49580)),l=["breakpoint","breakpoints","cols","layouts","margin","containerPadding","onBreakpointChange","onLayoutChange","onWidthChange"];function f(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function d(){return d=Object.assign||function(e){for(var t=1;t=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}function y(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function g(e){for(var t=1;t=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}(t,c);return r&&!this.mounted?o.createElement("div",{className:(0,i.default)(this.props.className,g),style:this.props.style,ref:this.elementRef}):o.createElement(e,l({innerRef:this.elementRef},n,this.state))}}]),a}(o.Component),y(t,"defaultProps",{measureBeforeMount:!1}),y(t,"propTypes",{measureBeforeMount:a.default.bool}),t};var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==n(e)&&"function"!==typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var o={},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in e)if("default"!==i&&Object.prototype.hasOwnProperty.call(e,i)){var c=a?Object.getOwnPropertyDescriptor(e,i):null;c&&(c.get||c.set)?Object.defineProperty(o,i,c):o[i]=e[i]}o.default=e,r&&r.set(e,o);return o}(r(67294)),a=s(r(45697)),i=s(r(86010)),c=["measureBeforeMount"];function s(e){return e&&e.__esModule?e:{default:e}}function u(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function l(){return l=Object.assign||function(e){for(var t=1;te[c]&&(n=c)}return n},t.getColsFromBreakpoint=function(e,t){if(!t[e])throw new Error("ResponsiveReactGridLayout: `cols` entry for breakpoint "+e+" is missing!");return t[e]},t.sortBreakpoints=o;var n=r(67493);function o(e){return Object.keys(e).sort((function(t,r){return e[t]-e[r]}))}},67493:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.bottom=f,t.childrenEqual=function(e,t){return(0,n.default)(o.default.Children.map(e,(function(e){return null===e||void 0===e?void 0:e.key})),o.default.Children.map(t,(function(e){return null===e||void 0===e?void 0:e.key})))},t.cloneLayout=p,t.cloneLayoutItem=h,t.collides=g,t.compact=v,t.compactItem=w,t.compactType=function(e){var t=e||{},r=t.verticalCompact,n=t.compactType;return!1===r?null:n},t.correctBounds=O,t.fastPositionEqual=function(e,t){return e.left===t.left&&e.top===t.top&&e.width===t.width&&e.height===t.height},t.fastRGLPropsEqual=void 0,t.getAllCollisions=P,t.getFirstCollision=S,t.getLayoutItem=D,t.getStatics=x,t.modifyLayout=d,t.moveElement=j,t.moveElementAwayFromCollision=M,t.noop=void 0,t.perc=function(e){return 100*e+"%"},t.setTopLeft=function(e){var t=e.top,r=e.left,n=e.width,o=e.height;return{top:"".concat(t,"px"),left:"".concat(r,"px"),width:"".concat(n,"px"),height:"".concat(o,"px"),position:"absolute"}},t.setTransform=function(e){var t=e.top,r=e.left,n=e.width,o=e.height,a="translate(".concat(r,"px,").concat(t,"px)");return{transform:a,WebkitTransform:a,MozTransform:a,msTransform:a,OTransform:a,width:"".concat(n,"px"),height:"".concat(o,"px"),position:"absolute"}},t.sortLayoutItems=z,t.sortLayoutItemsByColRow=_,t.sortLayoutItemsByRowCol=C,t.synchronizeLayoutWithChildren=function(e,t,r,n,a){e=e||[];var i=[];o.default.Children.forEach(t,(function(t){if(null!=(null===t||void 0===t?void 0:t.key)){var r=D(e,String(t.key));if(r)i.push(h(r));else{!u&&t.props._grid&&console.warn("`_grid` properties on children have been deprecated as of React 15.2. Please use `data-grid` or add your properties directly to the `layout`.");var n=t.props["data-grid"]||t.props._grid;n?(u||H([n],"ReactGridLayout.children"),i.push(h(c(c({},n),{},{i:t.key})))):i.push(h({w:1,h:1,x:0,y:f(i),i:String(t.key)}))}}}));var s=O(i,{cols:r});return a?s:v(s,n,r)},t.validateLayout=H,t.withLayoutItem=function(e,t,r){var n=D(e,t);return n?(n=r(h(n)),[e=d(e,n),n]):[e,null]};var n=a(r(72307)),o=a(r(67294));function a(e){return e&&e.__esModule?e:{default:e}}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function c(e){for(var t=1;tr&&(r=t);return r}function p(e){for(var t=Array(e.length),r=0,n=e.length;r=t.x+t.w)&&(!(e.y+e.h<=t.y)&&!(e.y>=t.y+t.h))))}function v(e,t,r){for(var n=x(e),o=z(e,t),a=Array(e.length),i=0,c=o.length;it.y+t.h)break;g(t,i)&&b(e,i,r+t[o],n)}}t[n]=r}function w(e,t,r,n,o){var a,i="horizontal"===r;if("vertical"===r)for(t.y=Math.min(f(e),t.y);t.y>0&&!S(e,t);)t.y--;else if(i)for(;t.x>0&&!S(e,t);)t.x--;for(;a=S(e,t);)i?b(o,t,a.x+a.w,"x"):b(o,t,a.y+a.h,"y"),i&&t.x+t.w>n&&(t.x=n-t.w,t.y++);return t.y=Math.max(t.y,0),t.x=Math.max(t.x,0),t}function O(e,t){for(var r=x(e),n=0,o=e.length;nt.cols&&(a.x=t.cols-a.w),a.x<0&&(a.x=0,a.w=t.cols),a.static)for(;S(r,a);)a.y++;else r.push(a)}return e}function D(e,t){for(var r=0,n=e.length;r=n:"horizontal"===i&&"number"===typeof r&&u>=r)&&(f=f.reverse());var d=P(f,t),h=d.length>0;if(h&&s)return p(e);if(h&&a)return R("Collision prevented on ".concat(t.i,", reverting.")),t.x=u,t.y=l,t.moved=!1,e;for(var y=0,g=d.length;yt.y||e.y===t.y&&e.x>t.x?1:e.y===t.y&&e.x===t.x?0:-1}))}function _(e){return e.slice(0).sort((function(e,t){return e.x>t.x||e.x===t.x&&e.y>t.y?1:-1}))}function H(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"Layout",r=["x","y","w","h"];if(!Array.isArray(e))throw new Error(t+" must be an array!");for(var n=0,o=e.length;n=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}function m(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function b(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r, without drag handlers. This will make this component effectively undraggable. Please attach `onDrag` or `onStop` handlers so you can adjust the `position` of this element."),t}return t=l,n=[{key:"getDerivedStateFromProps",value:function(e,t){var r=e.position,n=t.prevPropsPosition;return!r||n&&r.x===n.x&&r.y===n.y?null:((0,p.default)("Draggable: getDerivedStateFromProps %j",{position:r,prevPropsPosition:n}),{x:r.x,y:r.y,prevPropsPosition:b({},r)})}}],(r=[{key:"componentDidMount",value:function(){"undefined"!==typeof window.SVGElement&&this.findDOMNode()instanceof window.SVGElement&&this.setState({isElementSVG:!0})}},{key:"componentWillUnmount",value:function(){this.setState({dragging:!1})}},{key:"findDOMNode",value:function(){var e,t,r;return null!==(e=null===(t=this.props)||void 0===t||null===(r=t.nodeRef)||void 0===r?void 0:r.current)&&void 0!==e?e:i.default.findDOMNode(this)}},{key:"render",value:function(){var e,t=this.props,r=(t.axis,t.bounds,t.children),n=t.defaultPosition,a=t.defaultClassName,i=t.defaultClassNameDragging,l=t.defaultClassNameDragged,p=t.position,h=t.positionOffset,y=(t.scale,v(t,d)),m={},w=null,O=!Boolean(p)||this.state.dragging,D=p||n,S={x:(0,u.canDragX)(this)&&O?this.state.x:D.x,y:(0,u.canDragY)(this)&&O?this.state.y:D.y};this.state.isElementSVG?w=(0,s.createSVGTransform)(S,h):m=(0,s.createCSSTransform)(S,h);var P=(0,c.default)(r.props.className||"",a,(M(e={},i,this.state.dragging),M(e,l,this.state.dragged),e));return o.createElement(f.default,g({},y,{onStart:this.onDragStart,onDrag:this.onDrag,onStop:this.onDragStop}),o.cloneElement(o.Children.only(r),{className:P,style:b(b({},r.props.style),m),transform:w}))}}])&&D(t.prototype,r),n&&D(t,n),Object.defineProperty(t,"prototype",{writable:!1}),l}(o.Component);t.default=z,M(z,"displayName","Draggable"),M(z,"propTypes",b(b({},f.default.propTypes),{},{axis:a.default.oneOf(["both","x","y","none"]),bounds:a.default.oneOfType([a.default.shape({left:a.default.number,right:a.default.number,top:a.default.number,bottom:a.default.number}),a.default.string,a.default.oneOf([!1])]),defaultClassName:a.default.string,defaultClassNameDragging:a.default.string,defaultClassNameDragged:a.default.string,defaultPosition:a.default.shape({x:a.default.number,y:a.default.number}),positionOffset:a.default.shape({x:a.default.oneOfType([a.default.number,a.default.string]),y:a.default.oneOfType([a.default.number,a.default.string])}),position:a.default.shape({x:a.default.number,y:a.default.number}),className:l.dontSetMe,style:l.dontSetMe,transform:l.dontSetMe})),M(z,"defaultProps",b(b({},f.default.defaultProps),{},{axis:"both",bounds:!1,defaultClassName:"react-draggable",defaultClassNameDragging:"react-draggable-dragging",defaultClassNameDragged:"react-draggable-dragged",defaultPosition:{x:0,y:0},scale:1}))},66436:function(e,t,r){"use strict";function n(e){return n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==n(e)&&"function"!==typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var o={},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in e)if("default"!==i&&Object.prototype.hasOwnProperty.call(e,i)){var c=a?Object.getOwnPropertyDescriptor(e,i):null;c&&(c.get||c.set)?Object.defineProperty(o,i,c):o[i]=e[i]}o.default=e,r&&r.set(e,o);return o}(r(67294)),a=f(r(45697)),i=f(r(73935)),c=r(73655),s=r(83524),u=r(80365),l=f(r(39103));function f(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function d(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var r=null==e?null:"undefined"!==typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null==r)return;var n,o,a=[],i=!0,c=!1;try{for(r=r.call(e);!(i=(n=r.next()).done)&&(a.push(n.value),!t||a.length!==t);i=!0);}catch(s){c=!0,o=s}finally{try{i||null==r.return||r.return()}finally{if(c)throw o}}return a}(e,t)||function(e,t){if(!e)return;if("string"===typeof e)return h(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return h(e,t)}(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function h(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r not mounted on DragStart!");var n=r.ownerDocument;if(!(e.props.disabled||!(t.target instanceof n.defaultView.Node)||e.props.handle&&!(0,c.matchesSelectorAndParentsTo)(t.target,e.props.handle,r)||e.props.cancel&&(0,c.matchesSelectorAndParentsTo)(t.target,e.props.cancel,r))){"touchstart"===t.type&&t.preventDefault();var o=(0,c.getTouchIdentifier)(t);e.setState({touchIdentifier:o});var a=(0,s.getControlPosition)(t,o,m(e));if(null!=a){var i=a.x,u=a.y,f=(0,s.createCoreData)(m(e),i,u);(0,l.default)("DraggableCore: handleDragStart: %j",f),(0,l.default)("calling",e.props.onStart),!1!==e.props.onStart(t,f)&&!1!==e.mounted&&(e.props.enableUserSelectHack&&(0,c.addUserSelectStyles)(n),e.setState({dragging:!0,lastX:i,lastY:u}),(0,c.addEvent)(n,S.move,e.handleDrag),(0,c.addEvent)(n,S.stop,e.handleDragStop))}}})),w(m(e),"handleDrag",(function(t){var r=(0,s.getControlPosition)(t,e.state.touchIdentifier,m(e));if(null!=r){var n=r.x,o=r.y;if(Array.isArray(e.props.grid)){var a=n-e.state.lastX,i=o-e.state.lastY,c=d((0,s.snapToGrid)(e.props.grid,a,i),2);if(a=c[0],i=c[1],!a&&!i)return;n=e.state.lastX+a,o=e.state.lastY+i}var u=(0,s.createCoreData)(m(e),n,o);if((0,l.default)("DraggableCore: handleDrag: %j",u),!1!==e.props.onDrag(t,u)&&!1!==e.mounted)e.setState({lastX:n,lastY:o});else try{e.handleDragStop(new MouseEvent("mouseup"))}catch(p){var f=document.createEvent("MouseEvents");f.initMouseEvent("mouseup",!0,!0,window,0,0,0,0,0,!1,!1,!1,!1,0,null),e.handleDragStop(f)}}})),w(m(e),"handleDragStop",(function(t){if(e.state.dragging){var r=(0,s.getControlPosition)(t,e.state.touchIdentifier,m(e));if(null!=r){var n=r.x,o=r.y;if(Array.isArray(e.props.grid)){var a=n-e.state.lastX||0,i=o-e.state.lastY||0,u=d((0,s.snapToGrid)(e.props.grid,a,i),2);a=u[0],i=u[1],n=e.state.lastX+a,o=e.state.lastY+i}var f=(0,s.createCoreData)(m(e),n,o);if(!1===e.props.onStop(t,f)||!1===e.mounted)return!1;var p=e.findDOMNode();p&&e.props.enableUserSelectHack&&(0,c.removeUserSelectStyles)(p.ownerDocument),(0,l.default)("DraggableCore: handleDragStop: %j",f),e.setState({dragging:!1,lastX:NaN,lastY:NaN}),p&&((0,l.default)("DraggableCore: Removing handlers"),(0,c.removeEvent)(p.ownerDocument,S.move,e.handleDrag),(0,c.removeEvent)(p.ownerDocument,S.stop,e.handleDragStop))}}})),w(m(e),"onMouseDown",(function(t){return S=D,e.handleDragStart(t)})),w(m(e),"onMouseUp",(function(t){return S=D,e.handleDragStop(t)})),w(m(e),"onTouchStart",(function(t){return S=O,e.handleDragStart(t)})),w(m(e),"onTouchEnd",(function(t){return S=O,e.handleDragStop(t)})),e}return t=u,(r=[{key:"componentDidMount",value:function(){this.mounted=!0;var e=this.findDOMNode();e&&(0,c.addEvent)(e,O.start,this.onTouchStart,{passive:!1})}},{key:"componentWillUnmount",value:function(){this.mounted=!1;var e=this.findDOMNode();if(e){var t=e.ownerDocument;(0,c.removeEvent)(t,D.move,this.handleDrag),(0,c.removeEvent)(t,O.move,this.handleDrag),(0,c.removeEvent)(t,D.stop,this.handleDragStop),(0,c.removeEvent)(t,O.stop,this.handleDragStop),(0,c.removeEvent)(e,O.start,this.onTouchStart,{passive:!1}),this.props.enableUserSelectHack&&(0,c.removeUserSelectStyles)(t)}}},{key:"findDOMNode",value:function(){var e,t,r;return null!==(e=this.props)&&void 0!==e&&e.nodeRef?null===(t=this.props)||void 0===t||null===(r=t.nodeRef)||void 0===r?void 0:r.current:i.default.findDOMNode(this)}},{key:"render",value:function(){return o.cloneElement(o.Children.only(this.props.children),{onMouseDown:this.onMouseDown,onMouseUp:this.onMouseUp,onTouchEnd:this.onTouchEnd})}}])&&y(t.prototype,r),n&&y(t,n),Object.defineProperty(t,"prototype",{writable:!1}),u}(o.Component);t.default=P,w(P,"displayName","DraggableCore"),w(P,"propTypes",{allowAnyClick:a.default.bool,disabled:a.default.bool,enableUserSelectHack:a.default.bool,offsetParent:function(e,t){if(e[t]&&1!==e[t].nodeType)throw new Error("Draggable's offsetParent must be a DOM Node.")},grid:a.default.arrayOf(a.default.number),handle:a.default.string,cancel:a.default.string,nodeRef:a.default.object,onStart:a.default.func,onDrag:a.default.func,onStop:a.default.func,onMouseDown:a.default.func,scale:a.default.number,className:u.dontSetMe,style:u.dontSetMe,transform:u.dontSetMe}),w(P,"defaultProps",{allowAnyClick:!1,disabled:!1,enableUserSelectHack:!0,onStart:function(){},onDrag:function(){},onStop:function(){},onMouseDown:function(){},scale:1})},10434:function(e,t,r){"use strict";var n=r(48917),o=n.default,a=n.DraggableCore;e.exports=o,e.exports.default=o,e.exports.DraggableCore=a},73655:function(e,t,r){"use strict";function n(e){return n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.addClassName=d,t.addEvent=function(e,t,r,n){if(!e)return;var o=s({capture:!0},n);e.addEventListener?e.addEventListener(t,r,o):e.attachEvent?e.attachEvent("on"+t,r):e["on"+t]=r},t.addUserSelectStyles=function(e){if(!e)return;var t=e.getElementById("react-draggable-style-el");t||((t=e.createElement("style")).type="text/css",t.id="react-draggable-style-el",t.innerHTML=".react-draggable-transparent-selection *::-moz-selection {all: inherit;}\n",t.innerHTML+=".react-draggable-transparent-selection *::selection {all: inherit;}\n",e.getElementsByTagName("head")[0].appendChild(t));e.body&&d(e.body,"react-draggable-transparent-selection")},t.createCSSTransform=function(e,t){var r=p(e,t,"px");return u({},(0,a.browserPrefixToKey)("transform",a.default),r)},t.createSVGTransform=function(e,t){return p(e,t,"")},t.getTouch=function(e,t){return e.targetTouches&&(0,o.findInArray)(e.targetTouches,(function(e){return t===e.identifier}))||e.changedTouches&&(0,o.findInArray)(e.changedTouches,(function(e){return t===e.identifier}))},t.getTouchIdentifier=function(e){if(e.targetTouches&&e.targetTouches[0])return e.targetTouches[0].identifier;if(e.changedTouches&&e.changedTouches[0])return e.changedTouches[0].identifier},t.getTranslation=p,t.innerHeight=function(e){var t=e.clientHeight,r=e.ownerDocument.defaultView.getComputedStyle(e);return t-=(0,o.int)(r.paddingTop),t-=(0,o.int)(r.paddingBottom)},t.innerWidth=function(e){var t=e.clientWidth,r=e.ownerDocument.defaultView.getComputedStyle(e);return t-=(0,o.int)(r.paddingLeft),t-=(0,o.int)(r.paddingRight)},t.matchesSelector=f,t.matchesSelectorAndParentsTo=function(e,t,r){var n=e;do{if(f(n,t))return!0;if(n===r)return!1;n=n.parentNode}while(n);return!1},t.offsetXYFromParent=function(e,t,r){var n=t===t.ownerDocument.body?{left:0,top:0}:t.getBoundingClientRect(),o=(e.clientX+t.scrollLeft-n.left)/r,a=(e.clientY+t.scrollTop-n.top)/r;return{x:o,y:a}},t.outerHeight=function(e){var t=e.clientHeight,r=e.ownerDocument.defaultView.getComputedStyle(e);return t+=(0,o.int)(r.borderTopWidth),t+=(0,o.int)(r.borderBottomWidth)},t.outerWidth=function(e){var t=e.clientWidth,r=e.ownerDocument.defaultView.getComputedStyle(e);return t+=(0,o.int)(r.borderLeftWidth),t+=(0,o.int)(r.borderRightWidth)},t.removeClassName=h,t.removeEvent=function(e,t,r,n){if(!e)return;var o=s({capture:!0},n);e.removeEventListener?e.removeEventListener(t,r,o):e.detachEvent?e.detachEvent("on"+t,r):e["on"+t]=null},t.removeUserSelectStyles=function(e){if(!e)return;try{if(e.body&&h(e.body,"react-draggable-transparent-selection"),e.selection)e.selection.empty();else{var t=(e.defaultView||window).getSelection();t&&"Caret"!==t.type&&t.removeAllRanges()}}catch(r){}};var o=r(80365),a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==n(e)&&"function"!==typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var o={},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var c in e)if("default"!==c&&Object.prototype.hasOwnProperty.call(e,c)){var s=a?Object.getOwnPropertyDescriptor(e,c):null;s&&(s.get||s.set)?Object.defineProperty(o,c,s):o[c]=e[c]}o.default=e,r&&r.set(e,o);return o}(r(61e3));function i(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}function c(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function s(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:"transform";if("undefined"===typeof window)return"";var a=null===(e=window.document)||void 0===e||null===(t=e.documentElement)||void 0===t?void 0:t.style;if(!a)return"";if(n in a)return"";for(var i=0;i: Unmounted during event!");return t}},80365:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.dontSetMe=function(e,t,r){if(e[t])return new Error("Invalid prop ".concat(t," passed to ").concat(r," - do not set this, set it on the child."))},t.findInArray=function(e,t){for(var r=0,n=e.length;rr?r:e=0||(o[r]=e[r]);return o}function d(e){var r=e.renderLabel,n=e.labelProps,o=r(n);if("string"===typeof o||"number"===typeof o){n.dataEntry,n.dataIndex;var a=p(n,["dataEntry","dataIndex"]);return t.createElement("text",Object.assign({dominantBaseline:"central"},a),o)}return t.isValidElement(o)?o:null}function h(e){var t=1e14;return Math.round((e+Number.EPSILON)*t)/t}function y(e){var t=e.labelPosition,r=e.lineWidth,n=h(e.labelHorizontalShift);return 0===n?"middle":t>100?n>0?"start":"end":t<100-r?n>0?"end":"start":"middle"}function g(e,t){return e.map((function(e,r){var n,c=null!=(n=s(t.segmentsShift,r))?n:0,u=o(t.radius,t.labelPosition)+c,l=i(a(e.startAngle,e.degrees),u),f=l.dx,p=l.dy;return{x:t.center[0],y:t.center[1],dx:f,dy:p,textAnchor:y({labelPosition:t.labelPosition,lineWidth:t.lineWidth,labelHorizontalShift:f}),dataEntry:e,dataIndex:r,style:s(t.labelStyle,r)}}))}function v(e,r){var n=r.label;if(n)return g(e,r).map((function(e,r){return t.createElement(d,{key:"label-"+(e.dataEntry.key||r),renderLabel:n,labelProps:e})}))}var m=function(e,t,r,n,o){var a=o-n;if(0===a)return[];var i=r*Math.cos(n)+e,c=r*Math.sin(n)+t,s=r*Math.cos(o)+e,u=r*Math.sin(o)+t;return[["M",i,c],["A",r,r,0,Math.abs(a)<=Math.PI?"0":"1",a<0?"0":"1",s,u]]};function b(e,t,o,a,i){var c=n(a,-359.999,359.999);return m(e,t,i,r(o),r(o+c)).map((function(e){return e.join(" ")})).join(" ")}function w(e){var n,s,u=e.cx,l=e.cy,f=e.lengthAngle,d=e.lineWidth,h=e.radius,y=e.shift,g=void 0===y?0:y,v=e.reveal,m=e.rounded,w=e.startAngle,O=e.title,D=p(e,["cx","cy","lengthAngle","lineWidth","radius","shift","reveal","rounded","startAngle","title"]),S=h-d/2,P=i(a(w,f),g),x=b(u+P.dx,l+P.dy,w,f,S);if(c(v)){var j=r(S)*f;s=(n=Math.abs(j))-o(n,v)}return t.createElement("path",Object.assign({d:x,fill:"none",strokeWidth:d,strokeDasharray:n,strokeDashoffset:s,strokeLinecap:m?"round":void 0},D),O&&t.createElement("title",null,O))}function O(e,t,r){var n="stroke-dashoffset "+e+"ms "+t;return r&&r.transition&&(n=n+","+r.transition),{transition:n}}function D(e){return e.animate&&!c(e.reveal)?100:e.reveal}function S(e,t){return e&&function(r){e(r,t)}}function P(e,r,n){var a=null!=n?n:D(r),i=r.radius,c=r.center,u=c[0],l=c[1],f=o(i,r.lineWidth),p=e.map((function(e,n){var o=s(r.segmentsStyle,n);return t.createElement(w,{cx:u,cy:l,key:e.key||n,lengthAngle:e.degrees,lineWidth:f,radius:i,rounded:r.rounded,reveal:a,shift:s(r.segmentsShift,n),startAngle:e.startAngle,title:e.title,style:Object.assign({},o,r.animate&&O(r.animationDuration,r.animationEasing,o)),stroke:e.color,tabIndex:r.segmentsTabIndex,onBlur:S(r.onBlur,n),onClick:S(r.onClick,n),onFocus:S(r.onFocus,n),onKeyDown:S(r.onKeyDown,n),onMouseOver:S(r.onMouseOver,n),onMouseOut:S(r.onMouseOut,n)})}));return r.background&&p.unshift(t.createElement(w,{cx:u,cy:l,key:"bg",lengthAngle:r.lengthAngle,lineWidth:f,radius:i,rounded:r.rounded,startAngle:r.startAngle,stroke:r.background})),p}var x={animationDuration:500,animationEasing:"ease-out",center:[50,50],data:[],labelPosition:50,lengthAngle:360,lineWidth:100,paddingAngle:0,radius:50,startAngle:0,viewBoxSize:[100,100]};function j(e){var r=u(e,x),n=t.useState(r.animate?0:null),o=n[0],a=n[1];t.useEffect((function(){r.animate&&a(null)}),[]);var i=f(r);return t.createElement("svg",{viewBox:"0 0 "+r.viewBoxSize[0]+" "+r.viewBoxSize[1],width:"100%",height:"100%",className:r.className,style:r.style},P(i,r,o),v(i,r),r.children)}e.PieChart=j,e.pieChartDefaultProps=x}(t,r(67294))},22827:function(e,t,r){"use strict";t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==typeof e&&"function"!==typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={},o=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&Object.prototype.hasOwnProperty.call(e,a)){var i=o?Object.getOwnPropertyDescriptor(e,a):null;i&&(i.get||i.set)?Object.defineProperty(n,a,i):n[a]=e[a]}n.default=e,r&&r.set(e,n);return n}(r(67294)),o=r(26422),a=r(59069),i=r(448),c=["children","className","draggableOpts","width","height","handle","handleSize","lockAspectRatio","axis","minConstraints","maxConstraints","onResize","onResizeStop","onResizeStart","resizeHandles","transformScale"];function s(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;tMath.abs(s*i)?t=e/i:e=t*i}var u=e,l=t,f=this.slack||[0,0],p=f[0],d=f[1];return e+=p,t+=d,n&&(e=Math.max(n[0],e),t=Math.max(n[1],t)),o&&(e=Math.min(o[0],e),t=Math.min(o[1],t)),this.slack=[p+(u-e),d+(l-t)],[e,t]},s.resizeHandler=function(e,t){var r=this;return function(n,o){var a=o.node,i=o.deltaX,c=o.deltaY;"onResizeStart"===e&&r.resetData();var s=("both"===r.props.axis||"x"===r.props.axis)&&"n"!==t&&"s"!==t,u=("both"===r.props.axis||"y"===r.props.axis)&&"e"!==t&&"w"!==t;if(s||u){var l=t[0],f=t[t.length-1],p=a.getBoundingClientRect();if(null!=r.lastHandleRect){if("w"===f)i+=p.left-r.lastHandleRect.left;if("n"===l)c+=p.top-r.lastHandleRect.top}r.lastHandleRect=p,"w"===f&&(i=-i),"n"===l&&(c=-c);var d=r.props.width+(s?i/r.props.transformScale:0),h=r.props.height+(u?c/r.props.transformScale:0),y=r.runConstraints(d,h);d=y[0],h=y[1];var g=d!==r.props.width||h!==r.props.height,v="function"===typeof r.props[e]?r.props[e]:null;v&&!("onResize"===e&&!g)&&(null==n.persist||n.persist(),v(n,{node:a,size:{width:d,height:h},handle:t})),"onResizeStop"===e&&r.resetData()}}},s.renderResizeHandle=function(e,t){var r=this.props.handle;if(!r)return n.createElement("span",{className:"react-resizable-handle react-resizable-handle-"+e,ref:t});if("function"===typeof r)return r(e,t);var o=f({ref:t},"string"===typeof r.type?{}:{handleAxis:e});return n.cloneElement(r,o)},s.render=function(){var e=this,t=this.props,r=t.children,i=t.className,s=t.draggableOpts,l=(t.width,t.height,t.handle,t.handleSize,t.lockAspectRatio,t.axis,t.minConstraints,t.maxConstraints,t.onResize,t.onResizeStop,t.onResizeStart,t.resizeHandles),p=(t.transformScale,function(e,t){if(null==e)return{};var r,n,o={},a=Object.keys(e);for(n=0;n=0||(o[r]=e[r]);return o}(t,c));return(0,a.cloneElement)(r,f(f({},p),{},{className:(i?i+" ":"")+"react-resizable",children:[].concat(r.props.children,l.map((function(t){var r,a=null!=(r=e.handleRefs[t])?r:e.handleRefs[t]=n.createRef();return n.createElement(o.DraggableCore,u({},s,{nodeRef:a,key:"resizableHandle-"+t,onStop:e.resizeHandler("onResizeStop",t),onStart:e.resizeHandler("onResizeStart",t),onDrag:e.resizeHandler("onResize",t)}),e.renderResizeHandle(t,a))})))}))},i}(n.Component);t.default=h,h.propTypes=i.resizableProps,h.defaultProps={axis:"both",handleSize:[20,20],lockAspectRatio:!1,minConstraints:[20,20],maxConstraints:[1/0,1/0],resizeHandles:["se"],transformScale:1}},8735:function(e,t,r){"use strict";t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==typeof e&&"function"!==typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var n={},o=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&Object.prototype.hasOwnProperty.call(e,a)){var i=o?Object.getOwnPropertyDescriptor(e,a):null;i&&(i.get||i.set)?Object.defineProperty(n,a,i):n[a]=e[a]}n.default=e,r&&r.set(e,n);return n}(r(67294)),o=s(r(45697)),a=s(r(22827)),i=r(448),c=["handle","handleSize","onResize","onResizeStart","onResizeStop","draggableOpts","minConstraints","maxConstraints","lockAspectRatio","axis","width","height","resizeHandles","style","transformScale"];function s(e){return e&&e.__esModule?e:{default:e}}function u(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[r]=e[r]);return o}(e,c);return n.createElement(a.default,{axis:h,draggableOpts:s,handle:t,handleSize:r,height:this.state.height,lockAspectRatio:d,maxConstraints:f,minConstraints:u,onResizeStart:o,onResize:this.onResize,onResizeStop:i,resizeHandles:y,transformScale:v,width:this.state.width},n.createElement("div",l({},m,{style:p(p({},g),{},{width:this.state.width+"px",height:this.state.height+"px"})})))},o}(n.Component);t.default=y,y.propTypes=p(p({},i.resizableProps),{},{children:o.default.element})},448:function(e,t,r){"use strict";t.__esModule=!0,t.resizableProps=void 0;var n,o=(n=r(45697))&&n.__esModule?n:{default:n};r(26422);var a={axis:o.default.oneOf(["both","x","y","none"]),className:o.default.string,children:o.default.element.isRequired,draggableOpts:o.default.shape({allowAnyClick:o.default.bool,cancel:o.default.string,children:o.default.node,disabled:o.default.bool,enableUserSelectHack:o.default.bool,offsetParent:o.default.node,grid:o.default.arrayOf(o.default.number),handle:o.default.string,nodeRef:o.default.object,onStart:o.default.func,onDrag:o.default.func,onStop:o.default.func,onMouseDown:o.default.func,scale:o.default.number}),height:function(){for(var e=arguments.length,t=new Array(e),r=0;r=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}function m(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function b(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r, without drag handlers. This will make this component effectively undraggable. Please attach `onDrag` or `onStop` handlers so you can adjust the `position` of this element."),t}return t=l,n=[{key:"getDerivedStateFromProps",value:function(e,t){var r=e.position,n=t.prevPropsPosition;return!r||n&&r.x===n.x&&r.y===n.y?null:((0,p.default)("Draggable: getDerivedStateFromProps %j",{position:r,prevPropsPosition:n}),{x:r.x,y:r.y,prevPropsPosition:b({},r)})}}],(r=[{key:"componentDidMount",value:function(){"undefined"!==typeof window.SVGElement&&this.findDOMNode()instanceof window.SVGElement&&this.setState({isElementSVG:!0})}},{key:"componentWillUnmount",value:function(){this.setState({dragging:!1})}},{key:"findDOMNode",value:function(){var e,t,r;return null!==(e=null===(t=this.props)||void 0===t||null===(r=t.nodeRef)||void 0===r?void 0:r.current)&&void 0!==e?e:i.default.findDOMNode(this)}},{key:"render",value:function(){var e,t=this.props,r=(t.axis,t.bounds,t.children),n=t.defaultPosition,a=t.defaultClassName,i=t.defaultClassNameDragging,l=t.defaultClassNameDragged,p=t.position,h=t.positionOffset,y=(t.scale,v(t,d)),m={},w=null,O=!Boolean(p)||this.state.dragging,D=p||n,S={x:(0,u.canDragX)(this)&&O?this.state.x:D.x,y:(0,u.canDragY)(this)&&O?this.state.y:D.y};this.state.isElementSVG?w=(0,s.createSVGTransform)(S,h):m=(0,s.createCSSTransform)(S,h);var P=(0,c.default)(r.props.className||"",a,(M(e={},i,this.state.dragging),M(e,l,this.state.dragged),e));return o.createElement(f.default,g({},y,{onStart:this.onDragStart,onDrag:this.onDrag,onStop:this.onDragStop}),o.cloneElement(o.Children.only(r),{className:P,style:b(b({},r.props.style),m),transform:w}))}}])&&D(t.prototype,r),n&&D(t,n),Object.defineProperty(t,"prototype",{writable:!1}),l}(o.Component);t.default=z,M(z,"displayName","Draggable"),M(z,"propTypes",b(b({},f.default.propTypes),{},{axis:a.default.oneOf(["both","x","y","none"]),bounds:a.default.oneOfType([a.default.shape({left:a.default.number,right:a.default.number,top:a.default.number,bottom:a.default.number}),a.default.string,a.default.oneOf([!1])]),defaultClassName:a.default.string,defaultClassNameDragging:a.default.string,defaultClassNameDragged:a.default.string,defaultPosition:a.default.shape({x:a.default.number,y:a.default.number}),positionOffset:a.default.shape({x:a.default.oneOfType([a.default.number,a.default.string]),y:a.default.oneOfType([a.default.number,a.default.string])}),position:a.default.shape({x:a.default.number,y:a.default.number}),className:l.dontSetMe,style:l.dontSetMe,transform:l.dontSetMe})),M(z,"defaultProps",b(b({},f.default.defaultProps),{},{axis:"both",bounds:!1,defaultClassName:"react-draggable",defaultClassNameDragging:"react-draggable-dragging",defaultClassNameDragged:"react-draggable-dragged",defaultPosition:{x:0,y:0},scale:1}))},49285:function(e,t,r){"use strict";function n(e){return n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==n(e)&&"function"!==typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var o={},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in e)if("default"!==i&&Object.prototype.hasOwnProperty.call(e,i)){var c=a?Object.getOwnPropertyDescriptor(e,i):null;c&&(c.get||c.set)?Object.defineProperty(o,i,c):o[i]=e[i]}o.default=e,r&&r.set(e,o);return o}(r(67294)),a=f(r(45697)),i=f(r(73935)),c=r(75323),s=r(63669),u=r(16723),l=f(r(5042));function f(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function d(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var r=null==e?null:"undefined"!==typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null==r)return;var n,o,a=[],i=!0,c=!1;try{for(r=r.call(e);!(i=(n=r.next()).done)&&(a.push(n.value),!t||a.length!==t);i=!0);}catch(s){c=!0,o=s}finally{try{i||null==r.return||r.return()}finally{if(c)throw o}}return a}(e,t)||function(e,t){if(!e)return;if("string"===typeof e)return h(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return h(e,t)}(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function h(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r not mounted on DragStart!");var n=r.ownerDocument;if(!(e.props.disabled||!(t.target instanceof n.defaultView.Node)||e.props.handle&&!(0,c.matchesSelectorAndParentsTo)(t.target,e.props.handle,r)||e.props.cancel&&(0,c.matchesSelectorAndParentsTo)(t.target,e.props.cancel,r))){"touchstart"===t.type&&t.preventDefault();var o=(0,c.getTouchIdentifier)(t);e.setState({touchIdentifier:o});var a=(0,s.getControlPosition)(t,o,m(e));if(null!=a){var i=a.x,u=a.y,f=(0,s.createCoreData)(m(e),i,u);(0,l.default)("DraggableCore: handleDragStart: %j",f),(0,l.default)("calling",e.props.onStart),!1!==e.props.onStart(t,f)&&!1!==e.mounted&&(e.props.enableUserSelectHack&&(0,c.addUserSelectStyles)(n),e.setState({dragging:!0,lastX:i,lastY:u}),(0,c.addEvent)(n,S.move,e.handleDrag),(0,c.addEvent)(n,S.stop,e.handleDragStop))}}})),w(m(e),"handleDrag",(function(t){var r=(0,s.getControlPosition)(t,e.state.touchIdentifier,m(e));if(null!=r){var n=r.x,o=r.y;if(Array.isArray(e.props.grid)){var a=n-e.state.lastX,i=o-e.state.lastY,c=d((0,s.snapToGrid)(e.props.grid,a,i),2);if(a=c[0],i=c[1],!a&&!i)return;n=e.state.lastX+a,o=e.state.lastY+i}var u=(0,s.createCoreData)(m(e),n,o);if((0,l.default)("DraggableCore: handleDrag: %j",u),!1!==e.props.onDrag(t,u)&&!1!==e.mounted)e.setState({lastX:n,lastY:o});else try{e.handleDragStop(new MouseEvent("mouseup"))}catch(p){var f=document.createEvent("MouseEvents");f.initMouseEvent("mouseup",!0,!0,window,0,0,0,0,0,!1,!1,!1,!1,0,null),e.handleDragStop(f)}}})),w(m(e),"handleDragStop",(function(t){if(e.state.dragging){var r=(0,s.getControlPosition)(t,e.state.touchIdentifier,m(e));if(null!=r){var n=r.x,o=r.y;if(Array.isArray(e.props.grid)){var a=n-e.state.lastX||0,i=o-e.state.lastY||0,u=d((0,s.snapToGrid)(e.props.grid,a,i),2);a=u[0],i=u[1],n=e.state.lastX+a,o=e.state.lastY+i}var f=(0,s.createCoreData)(m(e),n,o);if(!1===e.props.onStop(t,f)||!1===e.mounted)return!1;var p=e.findDOMNode();p&&e.props.enableUserSelectHack&&(0,c.removeUserSelectStyles)(p.ownerDocument),(0,l.default)("DraggableCore: handleDragStop: %j",f),e.setState({dragging:!1,lastX:NaN,lastY:NaN}),p&&((0,l.default)("DraggableCore: Removing handlers"),(0,c.removeEvent)(p.ownerDocument,S.move,e.handleDrag),(0,c.removeEvent)(p.ownerDocument,S.stop,e.handleDragStop))}}})),w(m(e),"onMouseDown",(function(t){return S=D,e.handleDragStart(t)})),w(m(e),"onMouseUp",(function(t){return S=D,e.handleDragStop(t)})),w(m(e),"onTouchStart",(function(t){return S=O,e.handleDragStart(t)})),w(m(e),"onTouchEnd",(function(t){return S=O,e.handleDragStop(t)})),e}return t=u,(r=[{key:"componentDidMount",value:function(){this.mounted=!0;var e=this.findDOMNode();e&&(0,c.addEvent)(e,O.start,this.onTouchStart,{passive:!1})}},{key:"componentWillUnmount",value:function(){this.mounted=!1;var e=this.findDOMNode();if(e){var t=e.ownerDocument;(0,c.removeEvent)(t,D.move,this.handleDrag),(0,c.removeEvent)(t,O.move,this.handleDrag),(0,c.removeEvent)(t,D.stop,this.handleDragStop),(0,c.removeEvent)(t,O.stop,this.handleDragStop),(0,c.removeEvent)(e,O.start,this.onTouchStart,{passive:!1}),this.props.enableUserSelectHack&&(0,c.removeUserSelectStyles)(t)}}},{key:"findDOMNode",value:function(){var e,t,r;return null!==(e=this.props)&&void 0!==e&&e.nodeRef?null===(t=this.props)||void 0===t||null===(r=t.nodeRef)||void 0===r?void 0:r.current:i.default.findDOMNode(this)}},{key:"render",value:function(){return o.cloneElement(o.Children.only(this.props.children),{onMouseDown:this.onMouseDown,onMouseUp:this.onMouseUp,onTouchEnd:this.onTouchEnd})}}])&&y(t.prototype,r),n&&y(t,n),Object.defineProperty(t,"prototype",{writable:!1}),u}(o.Component);t.default=P,w(P,"displayName","DraggableCore"),w(P,"propTypes",{allowAnyClick:a.default.bool,disabled:a.default.bool,enableUserSelectHack:a.default.bool,offsetParent:function(e,t){if(e[t]&&1!==e[t].nodeType)throw new Error("Draggable's offsetParent must be a DOM Node.")},grid:a.default.arrayOf(a.default.number),handle:a.default.string,cancel:a.default.string,nodeRef:a.default.object,onStart:a.default.func,onDrag:a.default.func,onStop:a.default.func,onMouseDown:a.default.func,scale:a.default.number,className:u.dontSetMe,style:u.dontSetMe,transform:u.dontSetMe}),w(P,"defaultProps",{allowAnyClick:!1,disabled:!1,enableUserSelectHack:!0,onStart:function(){},onDrag:function(){},onStop:function(){},onMouseDown:function(){},scale:1})},26422:function(e,t,r){"use strict";var n=r(18385),o=n.default,a=n.DraggableCore;e.exports=o,e.exports.default=o,e.exports.DraggableCore=a},75323:function(e,t,r){"use strict";function n(e){return n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.addClassName=d,t.addEvent=function(e,t,r,n){if(!e)return;var o=s({capture:!0},n);e.addEventListener?e.addEventListener(t,r,o):e.attachEvent?e.attachEvent("on"+t,r):e["on"+t]=r},t.addUserSelectStyles=function(e){if(!e)return;var t=e.getElementById("react-draggable-style-el");t||((t=e.createElement("style")).type="text/css",t.id="react-draggable-style-el",t.innerHTML=".react-draggable-transparent-selection *::-moz-selection {all: inherit;}\n",t.innerHTML+=".react-draggable-transparent-selection *::selection {all: inherit;}\n",e.getElementsByTagName("head")[0].appendChild(t));e.body&&d(e.body,"react-draggable-transparent-selection")},t.createCSSTransform=function(e,t){var r=p(e,t,"px");return u({},(0,a.browserPrefixToKey)("transform",a.default),r)},t.createSVGTransform=function(e,t){return p(e,t,"")},t.getTouch=function(e,t){return e.targetTouches&&(0,o.findInArray)(e.targetTouches,(function(e){return t===e.identifier}))||e.changedTouches&&(0,o.findInArray)(e.changedTouches,(function(e){return t===e.identifier}))},t.getTouchIdentifier=function(e){if(e.targetTouches&&e.targetTouches[0])return e.targetTouches[0].identifier;if(e.changedTouches&&e.changedTouches[0])return e.changedTouches[0].identifier},t.getTranslation=p,t.innerHeight=function(e){var t=e.clientHeight,r=e.ownerDocument.defaultView.getComputedStyle(e);return t-=(0,o.int)(r.paddingTop),t-=(0,o.int)(r.paddingBottom)},t.innerWidth=function(e){var t=e.clientWidth,r=e.ownerDocument.defaultView.getComputedStyle(e);return t-=(0,o.int)(r.paddingLeft),t-=(0,o.int)(r.paddingRight)},t.matchesSelector=f,t.matchesSelectorAndParentsTo=function(e,t,r){var n=e;do{if(f(n,t))return!0;if(n===r)return!1;n=n.parentNode}while(n);return!1},t.offsetXYFromParent=function(e,t,r){var n=t===t.ownerDocument.body?{left:0,top:0}:t.getBoundingClientRect(),o=(e.clientX+t.scrollLeft-n.left)/r,a=(e.clientY+t.scrollTop-n.top)/r;return{x:o,y:a}},t.outerHeight=function(e){var t=e.clientHeight,r=e.ownerDocument.defaultView.getComputedStyle(e);return t+=(0,o.int)(r.borderTopWidth),t+=(0,o.int)(r.borderBottomWidth)},t.outerWidth=function(e){var t=e.clientWidth,r=e.ownerDocument.defaultView.getComputedStyle(e);return t+=(0,o.int)(r.borderLeftWidth),t+=(0,o.int)(r.borderRightWidth)},t.removeClassName=h,t.removeEvent=function(e,t,r,n){if(!e)return;var o=s({capture:!0},n);e.removeEventListener?e.removeEventListener(t,r,o):e.detachEvent?e.detachEvent("on"+t,r):e["on"+t]=null},t.removeUserSelectStyles=function(e){if(!e)return;try{if(e.body&&h(e.body,"react-draggable-transparent-selection"),e.selection)e.selection.empty();else{var t=(e.defaultView||window).getSelection();t&&"Caret"!==t.type&&t.removeAllRanges()}}catch(r){}};var o=r(16723),a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!==n(e)&&"function"!==typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var o={},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var c in e)if("default"!==c&&Object.prototype.hasOwnProperty.call(e,c)){var s=a?Object.getOwnPropertyDescriptor(e,c):null;s&&(s.get||s.set)?Object.defineProperty(o,c,s):o[c]=e[c]}o.default=e,r&&r.set(e,o);return o}(r(21227));function i(e){if("function"!==typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}function c(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function s(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:"transform";if("undefined"===typeof window)return"";var a=null===(e=window.document)||void 0===e||null===(t=e.documentElement)||void 0===t?void 0:t.style;if(!a)return"";if(n in a)return"";for(var i=0;i: Unmounted during event!");return t}},16723:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.dontSetMe=function(e,t,r){if(e[t])return new Error("Invalid prop ".concat(t," passed to ").concat(r," - do not set this, set it on the child."))},t.findInArray=function(e,t){for(var r=0,n=e.length;r'});i().add(c);t.default=c},97730:function(e,t,r){"use strict";var n=r(87854),o=r.n(n),a=r(95348),i=r.n(a),c=new(o())({id:"dashboard_add_chart",use:"dashboard_add_chart-usage",viewBox:"0 0 24 24",content:''});i().add(c);t.Z=c}}]); \ No newline at end of file diff --git a/web/gui/v2/9360.de29630b4dcacbeb5ecd.chunk.js b/web/gui/v2/9360.de29630b4dcacbeb5ecd.chunk.js new file mode 100644 index 00000000000000..fa733a2e767a9c --- /dev/null +++ b/web/gui/v2/9360.de29630b4dcacbeb5ecd.chunk.js @@ -0,0 +1 @@ +!function(){try{var n="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},e=(new Error).stack;e&&(n._sentryDebugIds=n._sentryDebugIds||{},n._sentryDebugIds[e]="3e534b36-1ffd-45c0-b5d3-e513b248c35c",n._sentryDebugIdIdentifier="sentry-dbid-3e534b36-1ffd-45c0-b5d3-e513b248c35c")}catch(n){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9360],{79360:function(n,e,t){t.r(e),t.d(e,{default:function(){return E}});var r=t(67294),o=t(89250),a=(t(47042),t(91038),t(77601),t(82526),t(41817),t(32165),t(29439)),c=t(15861),i=t(64687),u=t.n(i),l=(t(66992),t(41539),t(78783),t(33948),t(41637),t(74916),t(64765),t(88674),t(60285),t(39714),t(21249),t(57640),t(9924),t(64211),t(2490),t(41874),t(23157),t(57327),t(88449),t(59849),t(9170),t(34668),t(92222),t(93416)),s=t(13477),f=t(28234),d=t(18761);function p(n,e){var t="undefined"!==typeof Symbol&&n[Symbol.iterator]||n["@@iterator"];if(!t){if(Array.isArray(n)||(t=function(n,e){if(!n)return;if("string"===typeof n)return v(n,e);var t=Object.prototype.toString.call(n).slice(8,-1);"Object"===t&&n.constructor&&(t=n.constructor.name);if("Map"===t||"Set"===t)return Array.from(n);if("Arguments"===t||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(t))return v(n,e)}(n))||e&&n&&"number"===typeof n.length){t&&(n=t);var r=0,o=function(){};return{s:o,n:function(){return r>=n.length?{done:!0}:{done:!1,value:n[r++]}},e:function(n){throw n},f:o}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var a,c=!0,i=!1;return{s:function(){t=t.call(n)},n:function(){var n=t.next();return c=n.done,n},e:function(n){i=!0,a=n},f:function(){try{c||null==t.return||t.return()}finally{if(i)throw a}}}}function v(n,e){(null==e||e>n.length)&&(e=n.length);for(var t=0,r=new Array(e);t
    Getting you back to Netdata..."),(a=new Error((0,f.r)(null===o||void 0===o?void 0:o.errorMsgKey)||"Redirect error")).name="".concat((null===o||void 0===o?void 0:o.errorMsgKey)||"Server error"),a.stack="".concat((null===o||void 0===o?void 0:o.errorCode)||"Redirect error code"),a;case 15:case"end":return n.stop()}}),n,null,[[0,7]])})));return function(e,t){return n.apply(this,arguments)}}(),y=function(n){return fetch(n,{redirect:"follow"}).then((function(){return n}))},b=function(n){var e,t="top;nowelcome=1",r=p(g.entries());try{for(r.s();!(e=r.n()).done;){var o=(0,a.Z)(e.value,2),c=o[0],i=o[1];"agentID"!==(c=decodeURIComponent(c))&&(t+=";".concat(encodeURIComponent(c)),""!==(i=decodeURIComponent(i))&&(t+="=".concat(encodeURIComponent(i))))}}catch(l){r.e(l)}finally{r.f()}var u=new URL(n);return"/"===u.pathname&&(u.pathname=""),u.hash=t,u.toString()},w=function(){var n=(0,s.jr)(),e=(0,o.TH)().search;return(0,r.useEffect)((function(){n&&function(n,e){if(g.has("agentID")){var t=decodeURIComponent(g.get("agentID")||"unknown_agent_id");h(t,n).catch((function(n){if("ErrVisitedNodeNotFound"!==(null===n||void 0===n?void 0:n.name))throw document.body.innerHTML="

    ".concat(null===n||void 0===n?void 0:n.message,"

    "),n;m()})).then((function(n){var t,r=null===n||void 0===n||null===(t=n.urls)||void 0===t?void 0:t.map(b),o="https:"===location.protocol&&r.some((function(n){return!n.startsWith("https:")})),a=o?r.filter((function(n){return n.startsWith("https:")})):r;if(0!==a.length)return Promise.any(a.map(y)).then((function(){location.assign("url".concat(e))}),(function(){o?location.protocol="http:":document.body.innerHTML="\n
    \n ")}));o?location.protocol="http:":m()}))}else document.body.innerHTML='

    missing "agentID" query string parameter

    '}(n,e||"")}),[n,e]),r.createElement(l.H4,null,"Redirecting...")},E=function(){return r.createElement(o.Z5,null,r.createElement(o.AW,{path:"/redirects/alerts",element:r.createElement(w,null)}),r.createElement(o.AW,{path:"/redirects/alarms",element:r.createElement(w,null)})," ")}},11060:function(n,e,t){var r=t(1702),o=Error,a=r("".replace),c=String(new o("zxcasd").stack),i=/\n\s*at [^:]*:[^\n]*/,u=i.test(c);n.exports=function(n,e){if(u&&"string"==typeof n&&!o.prepareStackTrace)for(;e--;)n=a(n,i,"");return n}},5392:function(n,e,t){var r=t(68880),o=t(11060),a=t(22914),c=Error.captureStackTrace;n.exports=function(n,e,t,i){a&&(c?c(n,e):r(n,"stack",o(t,i)))}},22914:function(n,e,t){var r=t(47293),o=t(79114);n.exports=!r((function(){var n=new Error("a");return!("stack"in n)||(Object.defineProperty(n,"stack",o(1,7)),7!==n.stack)}))},58340:function(n,e,t){var r=t(70111),o=t(68880);n.exports=function(n,e){r(e)&&"cause"in e&&o(n,"cause",e.cause)}},56277:function(n,e,t){var r=t(41340);n.exports=function(n,e){return void 0===n?arguments.length<2?"":e:r(n)}},56967:function(n,e,t){var r=t(82109),o=t(47976),a=t(79518),c=t(27674),i=t(99920),u=t(70030),l=t(68880),s=t(79114),f=t(58340),d=t(5392),p=t(20408),v=t(56277),g=t(5112)("toStringTag"),m=Error,h=[].push,y=function(n,e){var t,r=o(b,this);c?t=c(new m,r?a(this):b):(t=r?this:u(b),l(t,g,"Error")),void 0!==e&&l(t,"message",v(e)),d(t,y,t.stack,1),arguments.length>2&&f(t,arguments[2]);var i=[];return p(n,h,{that:i}),l(t,"errors",i),t};c?c(y,m):i(y,m,{name:!0});var b=y.prototype=u(m.prototype,{constructor:s(1,y),message:s(1,""),name:s(1,"AggregateError")});r({global:!0,constructor:!0,arity:2},{AggregateError:y})},9170:function(n,e,t){t(56967)},34668:function(n,e,t){var r=t(82109),o=t(46916),a=t(19662),c=t(35005),i=t(78523),u=t(12534),l=t(20408),s=t(80612),f="No one promise resolved";r({target:"Promise",stat:!0,forced:s},{any:function(n){var e=this,t=c("AggregateError"),r=i.f(e),s=r.resolve,d=r.reject,p=u((function(){var r=a(e.resolve),c=[],i=0,u=1,p=!1;l(n,(function(n){var a=i++,l=!1;u++,o(r,e,n).then((function(n){l||p||(p=!0,s(n))}),(function(n){l||p||(l=!0,c[a]=n,--u||d(new t(c,f)))}))})),--u||d(new t(c,f))}));return p.error&&d(p.value),r.promise}})},41817:function(n,e,t){var r=t(82109),o=t(19781),a=t(17854),c=t(1702),i=t(92597),u=t(60614),l=t(47976),s=t(41340),f=t(47045),d=t(99920),p=a.Symbol,v=p&&p.prototype;if(o&&u(p)&&(!("description"in v)||void 0!==p().description)){var g={},m=function(){var n=arguments.length<1||void 0===arguments[0]?void 0:s(arguments[0]),e=l(v,this)?new p(n):void 0===n?p():p(n);return""===n&&(g[e]=!0),e};d(m,p),m.prototype=v,v.constructor=m;var h="Symbol(description detection)"===String(p("description detection")),y=c(v.valueOf),b=c(v.toString),w=/^Symbol\((.*)\)[^)]+$/,E=c("".replace),S=c("".slice);f(v,"description",{configurable:!0,get:function(){var n=y(this);if(i(g,n))return"";var e=b(n),t=h?S(e,7,-1):E(e,w,"$1");return""===t?void 0:t}}),r({global:!0,constructor:!0,forced:!0},{Symbol:m})}},32165:function(n,e,t){t(26800)("iterator")}}]); \ No newline at end of file diff --git a/web/gui/v2/9510.dec77b81a86e7cd2ff86.chunk.js b/web/gui/v2/9510.dec77b81a86e7cd2ff86.chunk.js new file mode 100644 index 00000000000000..4a38f5b4dcd472 --- /dev/null +++ b/web/gui/v2/9510.dec77b81a86e7cd2ff86.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="0f6bf0c2-938a-4c34-bcab-fbd5aa4d8fc4",e._sentryDebugIdIdentifier="sentry-dbid-0f6bf0c2-938a-4c34-bcab-fbd5aa4d8fc4")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9510],{89510:function(e,t,o){o.r(t);var i=o(71893),n=o(93416),r=(0,i.css)(['html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{margin:0;padding:0;border:0;font:inherit;font-size:100%;vertical-align:inherit;}body{font-size:14px;line-height:1.5;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Ubuntu,"Helvetica Neue",sans-serif;min-height:100vh;scroll-behavior:smooth;text-rendering:optimizespeed;scrollbar-gutter:stable both-edges;}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block;}body{-webkit-tap-highlight-color:transparent;-webkit-overflow-scrolling:touch;}body > iframe:not([src$="statuspage.io/embed/frame"]){display:none;}ol,ul{list-style:none;}blockquote,q{quotes:none;}blockquote:before,blockquote:after,q:before,q:after{content:"";content:none;}table{border-collapse:collapse;border-spacing:0;th,tr{vertical-align:middle;}th{font-weight:bold;}}*{box-sizing:inherit;-webkit-font-smoothing:antialiased;font-weight:inherit;text-rendering:optimizelegibility;-webkit-appearance:none;border-width:0px;border-style:initial;border-color:initial;border-image:initial;margin:0px;outline:0px;padding:0px;text-decoration:none;}*,*::before,*::after{box-sizing:border-box;}input[type="button" i],input[type="submit" i],input[type="reset" i],input[type="file" i]::-webkit-file-upload-button,button{border-color:transparent;border-style:none;border-width:0;padding:0;}a{color:',";&:hover{text-decoration:underline;color:",";}&:visited{color:",";}}::-webkit-scrollbar{height:8px;width:8px;}",""],(function(e){return e.theme.colors.link}),(function(e){return e.theme.colors.linkHover}),(function(e){return e.theme.colors.link}),(function(e){return e.isScreenSmall&&"\n ::-webkit-scrollbar {\n width: 0px !important;\n height: 0px !important;\n background: transparent !important; /* make scrollbar transparent */\n }\n "}));t.default=(0,i.createGlobalStyle)([""," ",""],r,n.webkitVisibleScrollbar)}}]); \ No newline at end of file diff --git a/web/gui/v2/9513.68ac17c54e2a98d13112.js b/web/gui/v2/9513.68ac17c54e2a98d13112.js new file mode 100644 index 00000000000000..f0fb9c7a0bd8b7 --- /dev/null +++ b/web/gui/v2/9513.68ac17c54e2a98d13112.js @@ -0,0 +1,2 @@ +/*! For license information please see 9513.68ac17c54e2a98d13112.js.LICENSE.txt */ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="752c0db3-9dd1-4e6d-bf9a-b86952cf7b1f",e._sentryDebugIdIdentifier="sentry-dbid-752c0db3-9dd1-4e6d-bf9a-b86952cf7b1f")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9513],{66557:function(e,t,n){t.__esModule=!0,t.StyledIcon=void 0;var r=c(n(71893)),o=n(52612),a=c(n(38249)),i=c(n(15986)),l=c(n(61430));function c(e){return e&&e.__esModule?e:{default:e}}var s={small:"14px",medium:"16px",large:"24px"};t.StyledIcon=r.default.svg.withConfig({displayName:"styled__StyledIcon",componentId:"sc-1wxz4b1-0"})(["height:",";width:",";opacity:",";pointer-events:",";"," "," "," "," "," ",""],(function(e){var t=e.size;return e.height||s[t]}),(function(e){var t=e.size;return e.width||s[t]}),(function(e){return e.disabled?.4:1}),(function(e){return e.disabled?"none":"unset"}),(function(e){var t=e.rotate;return!isNaN(t)&&"transform: rotate("+90*t+"deg);"}),(function(e){var t=e.theme,n=e.color;return n&&"fill: "+(0,o.getColor)(n)({theme:t})+";"}),(function(e){var t=e.theme,n=e.hoverColor;return n&&"&:hover { fill: "+(0,o.getColor)(n)({theme:t})+"; }"}),a.default,i.default,l.default)},93416:function(e,t,n){n.r(t),n.d(t,{AlertMasterCard:function(){return aZ},Animation:function(){return ub},Box:function(){return sg},Button:function(){return ig},ButtonGroup:function(){return Vf},Checkbox:function(){return $f},Collapsible:function(){return bb},ConfirmationDialog:function(){return IZ},DarkTheme:function(){return k},DefaultTheme:function(){return b},Documentation:function(){return wy},DraggableTabs:function(){return dx},Drop:function(){return Lg},DropContainer:function(){return mf},Flex:function(){return rg},GlobalStyles:function(){return Q_},H0:function(){return Qg},H1:function(){return Yg},H2:function(){return Kg},H3:function(){return Xg},H4:function(){return $g},H5:function(){return Jg},H6:function(){return ef},Icon:function(){return Xv},IconButton:function(){return Mf},IconComponents:function(){return o},InputRange:function(){return mp},Intersection:function(){return Zx},Layer:function(){return ib},List:function(){return Wg},ListItem:function(){return qg},MasterCard:function(){return vZ},Menu:function(){return Iw},MenuButton:function(){return gw},MenuDropdown:function(){return Mw},MenuDropdownItem:function(){return Vw},MenuItemContainer:function(){return Sw},Modal:function(){return HZ},ModalBody:function(){return _Z},ModalButton:function(){return MZ},ModalCloseButton:function(){return kZ},ModalContent:function(){return BZ},ModalFooter:function(){return CZ},ModalHeader:function(){return OZ},MultiRangeInput:function(){return yp},NavigationTab:function(){return Ty},NavigationTabs:function(){return Ey},News:function(){return ky},Pill:function(){return Gx},Popover:function(){return dw},PortalSidebar:function(){return Nf},ProgressBar:function(){return Qx},RadioButton:function(){return Np},SearchInput:function(){return Zw},Select:function(){return VB},Sidebar:function(){return If},Tab:function(){return Jp},TabSeparator:function(){return Ry},Table:function(){return q_},Tabs:function(){return rw},Text:function(){return af},TextBig:function(){return lf},TextBigger:function(){return cf},TextFemto:function(){return tf},TextHuge:function(){return sf},TextInput:function(){return up},TextMicro:function(){return rf},TextNano:function(){return nf},TextSmall:function(){return of},Toggle:function(){return Lp},Tooltip:function(){return Zf},alignSelf:function(){return $},breakpoints:function(){return Uf},controlFocused:function(){return Op},controlReset:function(){return _p},cursor:function(){return Wv},devices:function(){return Gf},getColor:function(){return A},getOrElse:function(){return P},getRgbColor:function(){return D},getSizeBy:function(){return F},getSizeUnit:function(){return L},iconsList:function(){return Uv},makeBig:function(){return Xm},makeBigger:function(){return $m},makeBox:function(){return cg},makeFemto:function(){return Wm},makeFlex:function(){return ng},makeH0:function(){return Im},makeH1:function(){return zm},makeH2:function(){return Tm},makeH3:function(){return Rm},makeH4:function(){return Nm},makeH5:function(){return Gm},makeH6:function(){return Um},makeHuge:function(){return Jm},makeMicro:function(){return Qm},makeNano:function(){return qm},makeSmall:function(){return Ym},makeText:function(){return Km},makeTypography:function(){return Fm},margin:function(){return R},opacity:function(){return Jv},padding:function(){return N},position:function(){return ne},propOrElse:function(){return V},round:function(){return K},textTransform:function(){return ee},useCheckboxesList:function(){return Jf},useFocusedState:function(){return Zp},useInputValue:function(){return Bp},useIntersection:function(){return wx},useNavigationArrows:function(){return Wy},useTouchedState:function(){return xp},webkitVisibleScrollbar:function(){return Cp},zIndex:function(){return em}});var r={};n.r(r),n.d(r,{GUTTER_HEIGHT:function(){return g},SIZE_SUB_UNIT:function(){return v},SIZE_UNIT:function(){return m}});var o={};n.r(o),n.d(o,{LoaderIcon:function(){return Ze}});var a=n(67294),i=n(71893),l=n(84967),c={transparent:{full:"rgba(255,255,255,0.0)",semi:"rgba(255, 255, 255, 0.5)",popover:"rgba(18, 36, 50, 0.9)"},green:{poker:"#2f5446",chateau:"#42B861",netdata:"#00AB44",deyork:"#68C47D",vista:"#96D4A2",fringyFlower:"#BFE5C6",frostee:"#E5F5E8",limeGreen:"#48E499",green10:"#001107",green20:"#00220E",green30:"#003314",green40:"#00441B",green50:"#005622",green60:"#006729",green70:"#00783",green80:"#008936",green90:"#009A3D",green100:"#00AB44",green110:"#00CD51",green120:"#00EF5F",green130:"#12FF70",green140:"#34FF84",green150:"#56FF99",green160:"#77FFAD",green170:"#99FFC2",green180:"#BBFFD6",green190:"#DDFFEB",green195:"#EEFFF5",green196:"#F1FFF7",green197:"#F5FFF9",green198:"#F8FFFB",green199:"#FCFFFD",green200:"#09AB49",green300:"#13A94F",green400:"#1DA754",green500:"#29A45A",green600:"#35A060",green700:"#439B66",green800:"#51966C",green900:"#608F73",green1000:"#6F8879"},red:{pomegranate:"#FF4136",carnation:"#F95251",apricot:"#ED7374",wewak:"#F59B9B",pastelpink:"#FFCED3",lavender:"#FFEBEF",red10:"#160205",red20:"#2C0409",red30:"#42070E",red40:"#580913",red50:"#6E0B18",red60:"#830D1C",red70:"#990F21",red80:"#AF1226",red90:"#C5142A",red100:"#DB162F",red110:"#E9233C",red120:"#EB3B52",red130:"#EE5467",red140:"#F06C7D",red150:"#F38593",red160:"#F59DA8",red170:"#F8B6BE",red180:"#FACED4",red190:"#FDE7E9",red200:"#D22037",red300:"#CA2A3E",red400:"#C13546",red500:"#B83F4E",red600:"#AF4956",red700:"#A6545F",red800:"#9D5F67",red900:"#936A6F",red1000:"#8A7577"},yellow:{amber:"#FFC300",sunglow:"#FFCC26",seaBuckthorn:"#F9A825",mustard:"#FFD74F",salomie:"#FFE182",buttermilk:"#FFEDB3",ginfizz:"#FFF8E1",yellow10:"#201300",yellow20:"#402600",yellow30:"#603900",yellow40:"#804B00",yellow50:"#A05E00",yellow60:"#BF7100",yellow70:"#DF8400",yellow80:"#FF9700",yellow90:"#FFA420",yellow100:"#FFB140",yellow110:"#FFB953",yellow120:"#FFC166",yellow130:"#FFC879",yellow140:"#FFD08C",yellow150:"#FFD8A0",yellow160:"#FFE0B3",yellow170:"#FFE8C6",yellow180:"#FFEFD9",yellow190:"#FFF7EC",yellow200:"#F5AD44",yellow300:"#EBA848",yellow400:"#E0A44D",yellow500:"#D49F52",yellow600:"#C79A58",yellow700:"#BA955F",yellow800:"#AD9066",yellow900:"#9E8B6E",yellow1000:"#908577"},neutral:{white:"#FFFFFF",black:"#000000",limedSpruce:"#35414A",regentgrey:"#8F9EAA",blackhaze:"#F7F8F8",brightGrey:"#E9ECEC",chineseWhite:"#DEE3E3",iron:"#CFD5DA",porcelain:"#ECEEEF",bluebayoux:"#536775",shark:"#1C1E22",tuna:"#383B40",outerSpace:"#2B3136",ratsbane:"#3E4551",arsenic:"#353B45",gunmetal:"#282C34",darkGunmetal:"#21252B",eerieBlack:"#181c20",grey05:"#040505",grey10:"#080A0A",grey15:"#0C0F0F",grey20:"#101313",grey25:"#151818",grey30:"#191D1D",grey35:"#1D2222",grey40:"#212727",grey45:"#252C2C",grey50:"#293030",grey55:"#2D3535",grey60:"#313A3A",grey65:"#353F3F",grey70:"#394444",grey75:"#3D4949",grey80:"#424E4E",grey85:"#465252",grey90:"#4A5757",grey95:"#4E5C5C",grey100:"#526161",grey105:"#5A6A6A",grey110:"#617373",grey115:"#697C7C",grey120:"#708585",grey125:"#788D8D",grey130:"#819595",grey135:"#8A9C9C",grey140:"#93A4A4",grey145:"#9CACAC",grey150:"#A5B3B3",grey155:"#AEBBBB",grey160:"#B7C2C2",grey165:"#C0CACA",grey170:"#C9D2D2",grey175:"#D2D9D9",grey180:"#DBE1E1",grey185:"#E4E8E8",grey190:"#EDF0F0",grey195:"#F6F7F7"},purple:{mauve:"#DB94F4",mauveDark:"#CB66EF",mauveFocus:"#EBC2F9",daisy:"#563D7C",lilac:"#B596F8",lilacLite:"#C6AEFA",lilacFocus:"#824EF3"},blue:{aquamarine:"#19C89E",indigo:"#5790FF",cyan:"#00BAE2"},shadows:{dropdownLight:"rgba(9, 30, 66, 0.15)",dropdownDark:"rgba(0, 0, 0, 0.4)"}};function s(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function u(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n>16&255)+", "+(a>>8&255)+", "+(255&a)+", "+t+")"}},F=function(e){return void 0===e&&(e=1),function(t){return isNaN(e)?e:(L(t)||0)*e+"px"}},I=function(e,t){return void 0===e&&(e="border"),void 0===t&&(t="disabled"),function(n){var r=n.theme,o=n.success,a=n.error,i=n.disabled;return o?A(["success"])({theme:r}):a?A(["error"])({theme:r}):i?A([t])({theme:r}):A([e])({theme:r})}},z=function(e,t){return"number"===typeof t?0===(n=e.constants.SIZE_SUB_UNIT*t)?"0":n+"px":"auto";var n},T=function(e,t){return t.map((function(t){return z(e,t)})).join(" ")},R=function(e){var t=e.theme,n=e.margin;return n?Array.isArray(n)&&n.length>=1&&n.length<=4?"margin: "+T(t,n)+";":(console.error("Please provide an array (max 4 elements) for `margin` style helper."),""):""},N=function(e){var t=e.theme,n=e.padding;return n?Array.isArray(n)&&n.length>=1&&n.length<=4?"padding: "+T(t,n)+";":(console.error("Please provide an array (max 4 elements) for `padding` style helper."),""):""},G=function(e,t){return!0===t?e+"px":"number"===typeof t?e*t+"px":"string"===typeof t?t:""},U=function(e,t){return"border-top-left-radius: "+G(e,t)+";"},W=function(e,t){return"border-top-right-radius: "+G(e,t)+";"},q=function(e,t){return"border-bottom-left-radius: "+G(e,t)+";"},Q=function(e,t){return"border-bottom-right-radius: "+G(e,t)+";"},Y={top:function(e,t){return"\n "+U(e,t)+"\n "+W(e,t)+"\n "},left:function(e,t){return"\n "+U(e,t)+"\n "+q(e,t)+"\n "},bottom:function(e,t){return"\n "+q(e,t)+"\n "+Q(e,t)+"\n "},right:function(e,t){return"\n "+W(e,t)+"\n "+Q(e,t)+"\n "},"top-left":U,"top-right":W,"bottom-left":q,"bottom-right":Q},K=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,n=e.round;if(!n)return"";var r=G(t,n);if(r)return"border-radius: "+r+";";var o=n.side,a=n.size,i=void 0===a?1:a;return o in Y?""+Y[o](t,i):""},X={end:"flex-end",start:"flex-start",center:"center",stretch:"stretch"},$=function(e){var t=e.alignSelf;return t in X&&"align-self: "+X[t]+";"},J={none:"none",capitalize:"capitalize",uppercase:"uppercase",lowercase:"lowercase",firstLetter:"firstLetter",fullWidth:"full-width"},ee=function(e){var t=(void 0===e?{}:e).textTransform,n=void 0===t?J.none:t;return n===J.firstLetter?"text-transform: lowercase;\n &::first-letter {\n text-transform: uppercase;\n }\n":n in J?"text-transform: "+J[n]+";":"text-transform: "+J.none+";"},te={static:"static",absolute:"absolute",fixed:"fixed",relative:"relative",sticky:"sticky",initial:"initial",inherit:"inherit"},ne=function(e){var t=e.position;return t in te?"position: "+t+";":""},re=n(75980),oe="default",ae="hollow",ie="borderless",le=["groupFirst","groupLast","groupMiddle"];function ce(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function se(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,le);return se({padding:o.padding||o.tiny?[.5,1]:o.small?[1,3]:[2],colors:be(o),round:t?{side:"left"}:n?{side:"right"}:!r},function(e){return e.themeType?se(se({},e),{},{theme:de[e.themeType]}):se(se({},e),{},{theme:e.theme})}(o))})).withConfig({displayName:"styled__StyledButton",componentId:"sc-14wh25s-0"})(["&&{display:flex;justify-content:center;align-items:center;position:relative;",";font-weight:",";font-size:",";white-space:nowrap;word-break:keep-all;cursor:pointer;opacity:",";pointer-events:",";"," "," transition:all 150ms;background-color:",";color:",";border-width:1px;border-style:solid;border-color:",";"," box-sizing:border-box;"," "," text-decoration:none;& > span{",";margin-left:",";}&:hover{border-color:",";background-color:",";color:",";text-decoration:none;.button-icon{fill:",";}}&:active{","}"," &:focus{outline:none;}.button-icon{height:",";width:",";fill:",";}.ntd-spinner{fill:none;stroke-width:17px;stroke-dasharray:100;stroke-dashoffset:100;animation:ntd-draw 1s linear infinite;stroke:",";width:24px;}.path{stroke:",";}@keyframes ntd-draw{to{stroke-dashoffset:0;}}}"],$,(function(e){return e.strong?700:500}),(function(e){var t=e.small;return e.tiny?"10px":t?"12px":"14px"}),(function(e){return e.disabled?.4:1}),(function(e){return e.disabled?"none":"auto"}),R,N,(function(e){return e.colors.bg(e)}),(function(e){return e.colors.color(e)}),(function(e){return e.colors.border(e)}),K,ne,re.position,ee,(function(e){return e.hasIcon?"4px":"0px"}),(function(e){return e.colors.borderHover(e)}),(function(e){return e.colors.bgHover(e)}),(function(e){return e.colors.colorHover(e)}),(function(e){return e.colors.colorHover(e)}),he,(function(e){return e.active&&"\n "+he+"\n "}),F(2),F(2),(function(e){return e.colors.color(e)}),(function(e){return e.colors.color(e)}),(function(e){return e.colors.color(e)})),xe=i.default.svg.withConfig({displayName:"loader__StyledSvg",componentId:"sc-fxhmqg-0"})(["fill:none;stroke-width:17px;stroke-dasharray:100;stroke-dashoffset:100;animation:ntd-draw 1s linear infinite;stroke:",";width:24px;.path{stroke:",";}@keyframes ntd-draw{to{stroke-dashoffset:0;}}"],A("bright"),A("bright")),Ze=function(e){var t=e.className;return a.createElement(xe,{className:t,viewBox:"0 0 21 17",version:"1.1",xmlns:"http://www.w3.org/2000/svg"},a.createElement("g",{className:"path",stroke:"none",strokeWidth:"1",fill:"none",fillRule:"evenodd"},a.createElement("path",{d:"M2,1 C8.25086152,1 11.9367136,1 13.0575562,1 C14.73882,1 19.6834591,2 19.9614325,7.72050108 C20.239406,13.4410022 15.7459591,15.1224845 13.6463763,15.1224845 C12.2466545,15.1224845 10.0279195,15.1224845 6.9901715,15.1224845 L2,1 Z",id:"Path-2",strokeWidth:"2"})))},Be=n(87854),Oe=n.n(Be),_e=n(95348),Ce=n.n(_e),Me=new(Oe())({id:"add_node",use:"add_node-usage",viewBox:"0 0 18 18",content:''}),ke=(Ce().add(Me),Me),He=new(Oe())({id:"add_user",use:"add_user-usage",viewBox:"0 0 15 16",content:''}),je=(Ce().add(He),He),Ee=new(Oe())({id:"aggregation_avg",use:"aggregation_avg-usage",viewBox:"0 0 16 12",content:''}),Se=(Ce().add(Ee),Ee),Ve=new(Oe())({id:"aggregation_max",use:"aggregation_max-usage",viewBox:"0 0 15 16",content:''}),Pe=(Ce().add(Ve),Ve),Le=new(Oe())({id:"aggregation_med",use:"aggregation_med-usage",viewBox:"0 0 14 14",content:''}),Ae=(Ce().add(Le),Le),De=new(Oe())({id:"aggregation_min",use:"aggregation_min-usage",viewBox:"0 0 15 16",content:''}),Fe=(Ce().add(De),De),Ie=new(Oe())({id:"aggregation_sum",use:"aggregation_sum-usage",viewBox:"0 0 12 14",content:''}),ze=(Ce().add(Ie),Ie),Te=new(Oe())({id:"aggregation_sum_abs",use:"aggregation_sum_abs-usage",viewBox:"0 0 14 14",content:''}),Re=(Ce().add(Te),Te),Ne=new(Oe())({id:"alarm",use:"alarm-usage",viewBox:"0 0 18 21",content:''}),Ge=(Ce().add(Ne),Ne),Ue=new(Oe())({id:"alarm_c",use:"alarm_c-usage",viewBox:"0 0 24 24",content:''}),We=(Ce().add(Ue),Ue),qe=new(Oe())({id:"alarm_cw",use:"alarm_cw-usage",viewBox:"0 0 24 24",content:''}),Qe=(Ce().add(qe),qe),Ye=new(Oe())({id:"alarmFilled",use:"alarmFilled-usage",viewBox:"0 0 18 18",content:''}),Ke=(Ce().add(Ye),Ye),Xe=new(Oe())({id:"alarm_w",use:"alarm_w-usage",viewBox:"0 0 24 24",content:''}),$e=(Ce().add(Xe),Xe),Je=new(Oe())({id:"alarm_bell",use:"alarm_bell-usage",viewBox:"0 0 12 14",content:''}),et=(Ce().add(Je),Je),tt=new(Oe())({id:"alarms_new",use:"alarms_new-usage",viewBox:"0 0 22 20",content:''}),nt=(Ce().add(tt),tt),rt=new(Oe())({id:"alarm_off",use:"alarm_off-usage",viewBox:"0 0 15 16",content:''}),ot=(Ce().add(rt),rt),at=new(Oe())({id:"anomalies_brain",use:"anomalies_brain-usage",viewBox:"0 0 18 18",content:''}),it=(Ce().add(at),at),lt=new(Oe())({id:"anomalies_lens",use:"anomalies_lens-usage",viewBox:"0 0 18 18",content:''}),ct=(Ce().add(lt),lt),st=new(Oe())({id:"anomaly_badge",use:"anomaly_badge-usage",viewBox:"0 0 15 16",content:''}),ut=(Ce().add(st),st),dt=new(Oe())({id:"applications_hollow",use:"applications_hollow-usage",viewBox:"0 0 18 18",content:''}),ht=(Ce().add(dt),dt),vt=new(Oe())({id:"around_clock",use:"around_clock-usage",viewBox:"0 0 16 16",content:''}),mt=(Ce().add(vt),vt),gt=new(Oe())({id:"arrow_down",use:"arrow_down-usage",viewBox:"0 0 16 16",content:''}),ft=(Ce().add(gt),gt),pt=new(Oe())({id:"arrow_w_line_left",use:"arrow_w_line_left-usage",viewBox:"0 0 24 24",content:''}),wt=(Ce().add(pt),pt),bt=new(Oe())({id:"arrow_w_line_right",use:"arrow_w_line_right-usage",viewBox:"0 0 24 24",content:''}),yt=(Ce().add(bt),bt),xt=new(Oe())({id:"arrow_left",use:"arrow_left-usage",viewBox:"0 0 24 24",content:''}),Zt=(Ce().add(xt),xt),Bt=new(Oe())({id:"arrow-s_down",use:"arrow-s_down-usage",viewBox:"0 0 8 9",content:''}),Ot=(Ce().add(Bt),Bt),_t=new(Oe())({id:"arrow-s_left",use:"arrow-s_left-usage",viewBox:"0 0 8 9",content:''}),Ct=(Ce().add(_t),_t),Mt=new(Oe())({id:"arrows_vertical",use:"arrows_vertical-usage",viewBox:"0 0 6 10",content:''}),kt=(Ce().add(Mt),Mt),Ht=new(Oe())({id:"bookmark",use:"bookmark-usage",viewBox:"0 0 12 14",content:''}),jt=(Ce().add(Ht),Ht),Et=new(Oe())({id:"bullet_one",use:"bullet_one-usage",viewBox:"0 0 10 10",content:''}),St=(Ce().add(Et),Et),Vt=new(Oe())({id:"bullet_three",use:"bullet_three-usage",viewBox:"0 0 10 10",content:''}),Pt=(Ce().add(Vt),Vt),Lt=new(Oe())({id:"bullet_two",use:"bullet_two-usage",viewBox:"0 0 10 10",content:''}),At=(Ce().add(Lt),Lt),Dt=new(Oe())({id:"calendar_full",use:"calendar_full-usage",viewBox:"0 0 18 18",content:''}),Ft=(Ce().add(Dt),Dt),It=new(Oe())({id:"calendar_full_press",use:"calendar_full_press-usage",viewBox:"0 0 18 18",content:''}),zt=(Ce().add(It),It),Tt=new(Oe())({id:"chart_added",use:"chart_added-usage",viewBox:"0 0 17 17",content:''}),Rt=(Ce().add(Tt),Tt),Nt=new(Oe())({id:"charts",use:"charts-usage",viewBox:"0 0 20 20",content:''}),Gt=(Ce().add(Nt),Nt),Ut=new(Oe())({id:"check",use:"check-usage",viewBox:"0 0 24 24",content:''}),Wt=(Ce().add(Ut),Ut),qt=new(Oe())({id:"checkmark_partial_s",use:"checkmark_partial_s-usage",viewBox:"0 0 16 16",content:''}),Qt=(Ce().add(qt),qt),Yt=new(Oe())({id:"checkmark_s",use:"checkmark_s-usage",viewBox:"0 0 16 16",content:''}),Kt=(Ce().add(Yt),Yt),Xt=new(Oe())({id:"checkmark",use:"checkmark-usage",viewBox:"0 0 16 16",content:''}),$t=(Ce().add(Xt),Xt),Jt=new(Oe())({id:"chevron_double",use:"chevron_double-usage",viewBox:"0 0 6 10",content:''}),en=(Ce().add(Jt),Jt),tn=new(Oe())({id:"chevron_down",use:"chevron_down-usage",viewBox:"0 0 12 12",content:''}),nn=(Ce().add(tn),tn),rn=new(Oe())({id:"chevron_down_thin",use:"chevron_down_thin-usage",viewBox:"0 0 16 24",content:''}),on=(Ce().add(rn),rn),an=new(Oe())({id:"chevron_expand",use:"chevron_expand-usage",viewBox:"0 0 8 6",content:''}),ln=(Ce().add(an),an),cn=new(Oe())({id:"chevron_left",use:"chevron_left-usage",viewBox:"0 0 18 18",content:''}),sn=(Ce().add(cn),cn),un=new(Oe())({id:"chevron_left_start",use:"chevron_left_start-usage",viewBox:"0 0 18 18",content:''}),dn=(Ce().add(un),un),hn=new(Oe())({id:"chevron_left_small",use:"chevron_left_small-usage",viewBox:"0 0 5 6",content:''}),vn=(Ce().add(hn),hn),mn=new(Oe())({id:"chevron_right",use:"chevron_right-usage",viewBox:"0 0 18 18",content:''}),gn=(Ce().add(mn),mn),fn=new(Oe())({id:"chevron_right_s",use:"chevron_right_s-usage",viewBox:"0 0 5 6",content:''}),pn=(Ce().add(fn),fn),wn=new(Oe())({id:"chevron_right_end",use:"chevron_right_end-usage",viewBox:"0 0 18 18",content:''}),bn=(Ce().add(wn),wn),yn=new(Oe())({id:"chevron_right_small",use:"chevron_right_small-usage",viewBox:"0 0 5 6",content:''}),xn=(Ce().add(yn),yn),Zn=new(Oe())({id:"chevron_up_thin",use:"chevron_up_thin-usage",viewBox:"0 0 16 24",content:''}),Bn=(Ce().add(Zn),Zn),On=new(Oe())({id:"class_error",use:"class_error-usage",viewBox:"0 0 21 22",content:''}),_n=(Ce().add(On),On),Cn=new(Oe())({id:"class_latency",use:"class_latency-usage",viewBox:"0 0 21 20",content:''}),Mn=(Ce().add(Cn),Cn),kn=new(Oe())({id:"class_utilization",use:"class_utilization-usage",viewBox:"0 0 25 19",content:''}),Hn=(Ce().add(kn),kn),jn=new(Oe())({id:"class_workload",use:"class_workload-usage",viewBox:"0 0 22 21",content:''}),En=(Ce().add(jn),jn),Sn=new(Oe())({id:"clock_hollow",use:"clock_hollow-usage",viewBox:"0 0 24 24",content:''}),Vn=(Ce().add(Sn),Sn),Pn=new(Oe())({id:"clock_5_min",use:"clock_5_min-usage",viewBox:"0 0 18 18",content:''}),Ln=(Ce().add(Pn),Pn),An=new(Oe())({id:"clock_5_min_press",use:"clock_5_min_press-usage",viewBox:"0 0 18 18",content:''}),Dn=(Ce().add(An),An),Fn=new(Oe())({id:"close_circle",use:"close_circle-usage",viewBox:"0 0 10 10",content:''}),In=(Ce().add(Fn),Fn),zn=new(Oe())({id:"cluster",use:"cluster-usage",viewBox:"0 0 22 22",content:''}),Tn=(Ce().add(zn),zn),Rn=new(Oe())({id:"cluster_spaces",use:"cluster_spaces-usage",viewBox:"0 0 22 22",content:''}),Nn=(Ce().add(Rn),Rn),Gn=new(Oe())({id:"code",use:"code-usage",viewBox:"0 0 16 16",content:''}),Un=(Ce().add(Gn),Gn),Wn=new(Oe())({id:"collapse",use:"collapse-usage",viewBox:"0 0 16 2",content:''}),qn=(Ce().add(Wn),Wn),Qn=new(Oe())({id:"collect",use:"collect-usage",viewBox:"0 0 24 24",content:''}),Yn=(Ce().add(Qn),Qn),Kn=new(Oe())({id:"community",use:"community-usage",viewBox:"0 0 18 18",content:''}),Xn=(Ce().add(Kn),Kn),$n=new(Oe())({id:"connection_to_cloud",use:"connection_to_cloud-usage",viewBox:"0 0 18 18",content:''}),Jn=(Ce().add($n),$n),er=new(Oe())({id:"connectivity_status_live",use:"connectivity_status_live-usage",viewBox:"0 0 18 18",content:''}),tr=(Ce().add(er),er),nr=new(Oe())({id:"connectivity_status_offline",use:"connectivity_status_offline-usage",viewBox:"0 0 18 18",content:''}),rr=(Ce().add(nr),nr),or=new(Oe())({id:"connectivity_status_stale",use:"connectivity_status_stale-usage",viewBox:"0 0 18 18",content:''}),ar=(Ce().add(or),or),ir=new(Oe())({id:"container",use:"container-usage",viewBox:"0 0 22 22",content:''}),lr=(Ce().add(ir),ir),cr=new(Oe())({id:"controller_kind",use:"controller_kind-usage",viewBox:"0 0 22 22",content:''}),sr=(Ce().add(cr),cr),ur=new(Oe())({id:"controller_name",use:"controller_name-usage",viewBox:"0 0 22 22",content:''}),dr=(Ce().add(ur),ur),hr=new(Oe())({id:"copy",use:"copy-usage",viewBox:"0 0 14 14",content:''}),vr=(Ce().add(hr),hr),mr=new(Oe())({id:"correlation",use:"correlation-usage",viewBox:"0 0 28 28",content:''}),gr=(Ce().add(mr),mr),fr=new(Oe())({id:"correlation_inv",use:"correlation_inv-usage",viewBox:"0 0 24 24",content:''}),pr=(Ce().add(fr),fr),wr=new(Oe())({id:"cpu",use:"cpu-usage",viewBox:"0 0 18 18",content:''}),br=(Ce().add(wr),wr),yr=new(Oe())({id:"cross_s",use:"cross_s-usage",viewBox:"0 0 16 16",content:''}),xr=(Ce().add(yr),yr),Zr=new(Oe())({id:"data_retention",use:"data_retention-usage",viewBox:"0 0 18 18",content:''}),Br=(Ce().add(Zr),Zr),Or=new(Oe())({id:"database",use:"database-usage",viewBox:"0 0 24 24",content:''}),_r=(Ce().add(Or),Or),Cr=new(Oe())({id:"dashboard",use:"dashboard-usage",viewBox:"0 0 22 18",content:''}),Mr=(Ce().add(Cr),Cr),kr=new(Oe())({id:"dashboard_add",use:"dashboard_add-usage",viewBox:"0 0 16 16",content:''}),Hr=(Ce().add(kr),kr),jr=new(Oe())({id:"dashboards",use:"dashboards-usage",viewBox:"0 0 16 10",content:''}),Er=(Ce().add(jr),jr),Sr=new(Oe())({id:"disk",use:"disk-usage",viewBox:"0 0 18 18",content:''}),Vr=(Ce().add(Sr),Sr),Pr=new(Oe())({id:"documentation",use:"documentation-usage",viewBox:"0 0 24 24",content:''}),Lr=(Ce().add(Pr),Pr),Ar=new(Oe())({id:"dot",use:"dot-usage",viewBox:"0 0 10 10",content:''}),Dr=(Ce().add(Ar),Ar),Fr=new(Oe())({id:"dots_2x3",use:"dots_2x3-usage",viewBox:"0 0 6 10",content:''}),Ir=(Ce().add(Fr),Fr),zr=new(Oe())({id:"download",use:"download-usage",viewBox:"0 0 20 20",content:''}),Tr=(Ce().add(zr),zr),Rr=new(Oe())({id:"drag_horizontal",use:"drag_horizontal-usage",viewBox:"0 0 24 24",content:''}),Nr=(Ce().add(Rr),Rr),Gr=new(Oe())({id:"edit",use:"edit-usage",viewBox:"0 0 16 16",content:''}),Ur=(Ce().add(Gr),Gr),Wr=new(Oe())({id:"error",use:"error-usage",viewBox:"0 0 24 24",content:''}),qr=(Ce().add(Wr),Wr),Qr=new(Oe())({id:"exclamation",use:"exclamation-usage",viewBox:"0 0 24 24",content:''}),Yr=(Ce().add(Qr),Qr),Kr=new(Oe())({id:"expand",use:"expand-usage",viewBox:"0 0 24 24",content:''}),Xr=(Ce().add(Kr),Kr),$r=new(Oe())({id:"favorites",use:"favorites-usage",viewBox:"0 0 18 18",content:''}),Jr=(Ce().add($r),$r),eo=new(Oe())({id:"feed",use:"feed-usage",viewBox:"0 0 18 18",content:''}),to=(Ce().add(eo),eo),no=new(Oe())({id:"filter",use:"filter-usage",viewBox:"0 0 24 24",content:''}),ro=(Ce().add(no),no),oo=new(Oe())({id:"filterList",use:"filterList-usage",viewBox:"0 0 18 18",content:''}),ao=(Ce().add(oo),oo),io=new(Oe())({id:"force_play",use:"force_play-usage",viewBox:"0 0 18 18",content:''}),lo=(Ce().add(io),io),co=new(Oe())({id:"force_play_outline",use:"force_play_outline-usage",viewBox:"0 0 18 18",content:''}),so=(Ce().add(co),co),uo=new(Oe())({id:"functions",use:"functions-usage",viewBox:"0 0 18 18",content:''}),ho=(Ce().add(uo),uo),vo=new(Oe())({id:"full_screen",use:"full_screen-usage",viewBox:"0 0 18 18",content:''}),mo=(Ce().add(vo),vo),go=new(Oe())({id:"gear",use:"gear-usage",viewBox:"0 0 20 20",content:''}),fo=(Ce().add(go),go),po=new(Oe())({id:"github",use:"github-usage",viewBox:"0 0 24 24",content:''}),wo=(Ce().add(po),po),bo=new(Oe())({id:"go_to_node",use:"go_to_node-usage",viewBox:"0 0 18 18",content:''}),yo=(Ce().add(bo),bo),xo=new(Oe())({id:"google",use:"google-usage",viewBox:"0 0 24 24",content:''}),Zo=(Ce().add(xo),xo),Bo=new(Oe())({id:"group_by",use:"group_by-usage",viewBox:"0 0 18 18",content:''}),Oo=(Ce().add(Bo),Bo),_o=new(Oe())({id:"hamburger",use:"hamburger-usage",viewBox:"0 0 24 24",content:''}),Co=(Ce().add(_o),_o),Mo=new(Oe())({id:"help",use:"help-usage",viewBox:"0 0 20 21",content:''}),ko=(Ce().add(Mo),Mo),Ho=new(Oe())({id:"hide",use:"hide-usage",viewBox:"0 0 18 18",content:''}),jo=(Ce().add(Ho),Ho),Eo=new(Oe())({id:"highlight_area",use:"highlight_area-usage",viewBox:"0 0 16 16",content:''}),So=(Ce().add(Eo),Eo),Vo=new(Oe())({id:"holder",use:"holder-usage",viewBox:"0 0 24 24",content:''}),Po=(Ce().add(Vo),Vo),Lo=new(Oe())({id:"importExport",use:"importExport-usage",viewBox:"0 0 16 16",content:''}),Ao=(Ce().add(Lo),Lo),Do=new(Oe())({id:"incident_manager",use:"incident_manager-usage",viewBox:"0 0 18 18",content:''}),Fo=(Ce().add(Do),Do),Io=new(Oe())({id:"information",use:"information-usage",viewBox:"0 0 18 18",content:''}),zo=(Ce().add(Io),Io),To=new(Oe())({id:"information_press",use:"information_press-usage",viewBox:"0 0 18 18",content:''}),Ro=(Ce().add(To),To),No=new(Oe())({id:"insights",use:"insights-usage",viewBox:"0 0 18 18",content:''}),Go=(Ce().add(No),No),Uo=new(Oe())({id:"aws_sns",use:"aws_sns-usage",viewBox:"0 0 24 24",content:''}),Wo=(Ce().add(Uo),Uo),qo=new(Oe())({id:"aws_sns_colored",use:"aws_sns_colored-usage",viewBox:"0 0 24 24",content:''}),Qo=(Ce().add(qo),qo),Yo=new(Oe())({id:"discord",use:"discord-usage",viewBox:"0 0 24 24",content:''}),Ko=(Ce().add(Yo),Yo),Xo=new(Oe())({id:"discord_colored",use:"discord_colored-usage",viewBox:"0 0 24 24",content:''}),$o=(Ce().add(Xo),Xo),Jo=new(Oe())({id:"email",use:"email-usage",viewBox:"0 0 24 24",content:''}),ea=(Ce().add(Jo),Jo),ta=new(Oe())({id:"email_colored",use:"email_colored-usage",viewBox:"0 0 24 24",content:''}),na=(Ce().add(ta),ta),ra=new(Oe())({id:"mattermost",use:"mattermost-usage",viewBox:"0 0 700 700",content:''}),oa=(Ce().add(ra),ra),aa=new(Oe())({id:"mattermost_colored",use:"mattermost_colored-usage",viewBox:"0 0 700 700",content:''}),ia=(Ce().add(aa),aa),la=new(Oe())({id:"mobile_app_colored",use:"mobile_app_colored-usage",viewBox:"0 0 25 24",content:''}),ca=(Ce().add(la),la),sa=new(Oe())({id:"opsgenie",use:"opsgenie-usage",viewBox:"0 0 16 16",content:''}),ua=(Ce().add(sa),sa),da=new(Oe())({id:"opsgenie_colored",use:"opsgenie_colored-usage",viewBox:"0 0 16 16",content:''}),ha=(Ce().add(da),da),va=new(Oe())({id:"pagerduty",use:"pagerduty-usage",viewBox:"0 0 24 24",content:''}),ma=(Ce().add(va),va),ga=new(Oe())({id:"pagerduty_colored",use:"pagerduty_colored-usage",viewBox:"0 0 24 24",content:''}),fa=(Ce().add(ga),ga),pa=new(Oe())({id:"rocketChat",use:"rocketChat-usage",viewBox:"0 0 16 16",content:''}),wa=(Ce().add(pa),pa),ba=new(Oe())({id:"rocketChat_colored",use:"rocketChat_colored-usage",viewBox:"0 0 16 16",content:''}),ya=(Ce().add(ba),ba),xa=new(Oe())({id:"integrations",use:"integrations-usage",viewBox:"0 0 16 16",content:''}),Za=(Ce().add(xa),xa),Ba=new(Oe())({id:"slack",use:"slack-usage",viewBox:"0 0 24 24",content:''}),Oa=(Ce().add(Ba),Ba),_a=new(Oe())({id:"slack_colored",use:"slack_colored-usage",viewBox:"0 0 24 24",content:''}),Ca=(Ce().add(_a),_a),Ma=new(Oe())({id:"telegram",use:"telegram-usage",viewBox:"0 0 24 24",content:''}),ka=(Ce().add(Ma),Ma),Ha=new(Oe())({id:"telegram_colored",use:"telegram_colored-usage",viewBox:"0 0 24 24",content:''}),ja=(Ce().add(Ha),Ha),Ea=new(Oe())({id:"webhook",use:"webhook-usage",viewBox:"0 0 24 24",content:''}),Sa=(Ce().add(Ea),Ea),Va=new(Oe())({id:"webhook_colored",use:"webhook_colored-usage",viewBox:"0 0 24 24",content:''}),Pa=(Ce().add(Va),Va),La=new(Oe())({id:"ipNetworking",use:"ipNetworking-usage",viewBox:"0 0 16 16",content:''}),Aa=(Ce().add(La),La),Da=new(Oe())({id:"ipNetworkingPress",use:"ipNetworkingPress-usage",viewBox:"0 0 16 16",content:''}),Fa=(Ce().add(Da),Da),Ia=new(Oe())({id:"last_week",use:"last_week-usage",viewBox:"0 0 18 18",content:''}),za=(Ce().add(Ia),Ia),Ta=new(Oe())({id:"line_chart",use:"line_chart-usage",viewBox:"0 0 15 15",content:''}),Ra=(Ce().add(Ta),Ta),Na=new(Oe())({id:"logo_s",use:"logo_s-usage",viewBox:"0 0 14 13",content:''}),Ga=(Ce().add(Na),Na),Ua=new(Oe())({id:"loading",use:"loading-usage",viewBox:"0 0 24 24",content:''}),Wa=(Ce().add(Ua),Ua),qa=new(Oe())({id:"magnify",use:"magnify-usage",viewBox:"0 0 24 24",content:''}),Qa=(Ce().add(qa),qa),Ya=new(Oe())({id:"metrics",use:"metrics-usage",viewBox:"0 0 24 24",content:''}),Ka=(Ce().add(Ya),Ya),Xa=new(Oe())({id:"metrics_explorer",use:"metrics_explorer-usage",viewBox:"0 0 18 18",content:''}),$a=(Ce().add(Xa),Xa),Ja=new(Oe())({id:"minimize_s",use:"minimize_s-usage",viewBox:"0 0 18 18",content:''}),ei=(Ce().add(Ja),Ja),ti=new(Oe())({id:"mobile_push_notifications",use:"mobile_push_notifications-usage",viewBox:"0 0 24 24",content:''}),ni=(Ce().add(ti),ti),ri=new(Oe())({id:"mobile_push_notifications_hollow",use:"mobile_push_notifications_hollow-usage",viewBox:"0 0 24 24",content:''}),oi=(Ce().add(ri),ri),ai=new(Oe())({id:"monitoring",use:"monitoring-usage",viewBox:"0 0 20 20",content:''}),ii=(Ce().add(ai),ai),li=new(Oe())({id:"more",use:"more-usage",viewBox:"0 0 18 4",content:''}),ci=(Ce().add(li),li),si=new(Oe())({id:"nav_left",use:"nav_left-usage",viewBox:"0 0 8 10",content:''}),ui=(Ce().add(si),si),di=new(Oe())({id:"nav_right",use:"nav_right-usage",viewBox:"0 0 8 10",content:''}),hi=(Ce().add(di),di),vi=new(Oe())({id:"nav_arrow_goto",use:"nav_arrow_goto-usage",viewBox:"0 0 10 10",content:''}),mi=(Ce().add(vi),vi),gi=new(Oe())({id:"nav_dots",use:"nav_dots-usage",viewBox:"0 0 24 24",content:''}),fi=(Ce().add(gi),gi),pi=new(Oe())({id:"netdata",use:"netdata-usage",viewBox:"0 0 24 24",content:''}),wi=(Ce().add(pi),pi),bi=new(Oe())({id:"netdataAssistant",use:"netdataAssistant-usage",viewBox:"0 0 16 16",content:''}),yi=(Ce().add(bi),bi),xi=new(Oe())({id:"netdata-press",use:"netdata-press-usage",viewBox:"0 0 18 18",content:''}),Zi=(Ce().add(xi),xi),Bi=new(Oe())({id:"node",use:"node-usage",viewBox:"0 0 24 24",content:''}),Oi=(Ce().add(Bi),Bi),_i=new(Oe())({id:"node_child",use:"node_child-usage",viewBox:"0 0 18 18",content:''}),Ci=(Ce().add(_i),_i),Mi=new(Oe())({id:"node_default_l",use:"node_default_l-usage",viewBox:"0 0 40 40",content:''}),ki=(Ce().add(Mi),Mi),Hi=new(Oe())({id:"node_hollow",use:"node_hollow-usage",viewBox:"0 0 22 12",content:''}),ji=(Ce().add(Hi),Hi),Ei=new(Oe())({id:"node_import_export",use:"node_import_export-usage",viewBox:"0 0 24 24",content:''}),Si=(Ce().add(Ei),Ei),Vi=new(Oe())({id:"node_notification_l",use:"node_notification_l-usage",viewBox:"0 0 40 40",content:''}),Pi=(Ce().add(Vi),Vi),Li=new(Oe())({id:"node_parent",use:"node_parent-usage",viewBox:"0 0 18 18",content:''}),Ai=(Ce().add(Li),Li),Di=new(Oe())({id:"node_selected_l",use:"node_selected_l-usage",viewBox:"0 0 40 40",content:''}),Fi=(Ce().add(Di),Di),Ii=new(Oe())({id:"nodes",use:"nodes-usage",viewBox:"0 0 16 16",content:''}),zi=(Ce().add(Ii),Ii),Ti=new(Oe())({id:"nodes_hollow",use:"nodes_hollow-usage",viewBox:"0 0 18 18",content:''}),Ri=(Ce().add(Ti),Ti),Ni=new(Oe())({id:"none_selected",use:"none_selected-usage",viewBox:"0 0 16 16",content:''}),Gi=(Ce().add(Ni),Ni),Ui=new(Oe())({id:"notification_shortcut_enabled",use:"notification_shortcut_enabled-usage",viewBox:"0 0 24 24",content:''}),Wi=(Ce().add(Ui),Ui),qi=new(Oe())({id:"notification_shortcut_disabled",use:"notification_shortcut_disabled-usage",viewBox:"0 0 24 24",content:''}),Qi=(Ce().add(qi),qi),Yi=new(Oe())({id:"os",use:"os-usage",viewBox:"0 0 18 18",content:''}),Ki=(Ce().add(Yi),Yi),Xi=new(Oe())({id:"alpine_linux",use:"alpine_linux-usage",viewBox:"0 0 18 18",content:''}),$i=(Ce().add(Xi),Xi),Ji=new(Oe())({id:"amazon_linux",use:"amazon_linux-usage",viewBox:"0 0 18 18",content:''}),el=(Ce().add(Ji),Ji),tl=new(Oe())({id:"arch_linux",use:"arch_linux-usage",viewBox:"0 0 18 18",content:''}),nl=(Ce().add(tl),tl),rl=new(Oe())({id:"celarOS",use:"celarOS-usage",viewBox:"0 0 18 18",content:''}),ol=(Ce().add(rl),rl),al=new(Oe())({id:"centos",use:"centos-usage",viewBox:"0 0 18 18",content:''}),il=(Ce().add(al),al),ll=new(Oe())({id:"centos_colored",use:"centos_colored-usage",viewBox:"0 0 18 18",content:''}),cl=(Ce().add(ll),ll),sl=new(Oe())({id:"coreOS",use:"coreOS-usage",viewBox:"0 0 18 18",content:''}),ul=(Ce().add(sl),sl),dl=new(Oe())({id:"debian",use:"debian-usage",viewBox:"0 0 18 18",content:''}),hl=(Ce().add(dl),dl),vl=new(Oe())({id:"debian_colored",use:"debian_colored-usage",viewBox:"0 0 18 18",content:''}),ml=(Ce().add(vl),vl),gl=new(Oe())({id:"fedora",use:"fedora-usage",viewBox:"0 0 18 18",content:''}),fl=(Ce().add(gl),gl),pl=new(Oe())({id:"freeBSD",use:"freeBSD-usage",viewBox:"0 0 18 18",content:''}),wl=(Ce().add(pl),pl),bl=new(Oe())({id:"gentoo",use:"gentoo-usage",viewBox:"0 0 18 18",content:''}),yl=(Ce().add(bl),bl),xl=new(Oe())({id:"linux",use:"linux-usage",viewBox:"0 0 18 18",content:''}),Zl=(Ce().add(xl),xl),Bl=new(Oe())({id:"linux_colored",use:"linux_colored-usage",viewBox:"0 0 18 18",content:''}),Ol=(Ce().add(Bl),Bl),_l=new(Oe())({id:"linux_manjaro",use:"linux_manjaro-usage",viewBox:"0 0 14 14",content:''}),Cl=(Ce().add(_l),_l),Ml=new(Oe())({id:"macOSX",use:"macOSX-usage",viewBox:"0 0 18 18",content:''}),kl=(Ce().add(Ml),Ml),Hl=new(Oe())({id:"oracle",use:"oracle-usage",viewBox:"0 0 18 18",content:''}),jl=(Ce().add(Hl),Hl),El=new(Oe())({id:"oracle_colored",use:"oracle_colored-usage",viewBox:"0 0 18 18",content:''}),Sl=(Ce().add(El),El),Vl=new(Oe())({id:"os_press",use:"os_press-usage",viewBox:"0 0 18 18",content:''}),Pl=(Ce().add(Vl),Vl),Ll=new(Oe())({id:"raspbian",use:"raspbian-usage",viewBox:"0 0 18 18",content:''}),Al=(Ce().add(Ll),Ll),Dl=new(Oe())({id:"red_hat",use:"red_hat-usage",viewBox:"0 0 18 18",content:''}),Fl=(Ce().add(Dl),Dl),Il=new(Oe())({id:"suse_linux",use:"suse_linux-usage",viewBox:"0 0 18 18",content:''}),zl=(Ce().add(Il),Il),Tl=new(Oe())({id:"ubuntu",use:"ubuntu-usage",viewBox:"0 0 18 18",content:''}),Rl=(Ce().add(Tl),Tl),Nl=new(Oe())({id:"ubuntu_colored",use:"ubuntu_colored-usage",viewBox:"0 0 18 18",content:''}),Gl=(Ce().add(Nl),Nl),Ul=new(Oe())({id:"notification",use:"notification-usage",viewBox:"0 0 40 24",content:''}),Wl=(Ce().add(Ul),Ul),ql=new(Oe())({id:"padlock",use:"padlock-usage",viewBox:"0 0 18 18",content:''}),Ql=(Ce().add(ql),ql),Yl=new(Oe())({id:"pan_tool",use:"pan_tool-usage",viewBox:"0 0 24 24",content:''}),Kl=(Ce().add(Yl),Yl),Xl=new(Oe())({id:"pause_outline",use:"pause_outline-usage",viewBox:"0 0 18 18",content:''}),$l=(Ce().add(Xl),Xl),Jl=new(Oe())({id:"pause_solid",use:"pause_solid-usage",viewBox:"0 0 24 24",content:''}),ec=(Ce().add(Jl),Jl),tc=new(Oe())({id:"pencil_outline",use:"pencil_outline-usage",viewBox:"0 0 14 14",content:''}),nc=(Ce().add(tc),tc),rc=new(Oe())({id:"pencil_solid",use:"pencil_solid-usage",viewBox:"0 0 19 19",content:''}),oc=(Ce().add(rc),rc),ac=new(Oe())({id:"pie_chart_skeleton",use:"pie_chart_skeleton-usage",viewBox:"0 0 100 100",content:''}),ic=(Ce().add(ac),ac),lc=new(Oe())({id:"pin_element",use:"pin_element-usage",viewBox:"0 0 14 14",content:''}),cc=(Ce().add(lc),lc),sc=new(Oe())({id:"play_outline",use:"play_outline-usage",viewBox:"0 0 18 18",content:''}),uc=(Ce().add(sc),sc),dc=new(Oe())({id:"play_solid",use:"play_solid-usage",viewBox:"0 0 24 24",content:''}),hc=(Ce().add(dc),dc),vc=new(Oe())({id:"plus",use:"plus-usage",viewBox:"0 0 24 24",content:''}),mc=(Ce().add(vc),vc),gc=new(Oe())({id:"plus_mini_s",use:"plus_mini_s-usage",viewBox:"0 0 24 24",content:''}),fc=(Ce().add(gc),gc),pc=new(Oe())({id:"pod",use:"pod-usage",viewBox:"0 0 22 22",content:''}),wc=(Ce().add(pc),pc),bc=new(Oe())({id:"pricing",use:"pricing-usage",viewBox:"0 0 16 16",content:''}),yc=(Ce().add(bc),bc),xc=new(Oe())({id:"print",use:"print-usage",viewBox:"0 0 21 20",content:''}),Zc=(Ce().add(xc),xc),Bc=new(Oe())({id:"privacy",use:"privacy-usage",viewBox:"0 0 16 16",content:''}),Oc=(Ce().add(Bc),Bc),_c=new(Oe())({id:"push_notifications",use:"push_notifications-usage",viewBox:"0 0 22 22",content:''}),Cc=(Ce().add(_c),_c),Mc=new(Oe())({id:"qr_code",use:"qr_code-usage",viewBox:"0 0 18 18",content:''}),kc=(Ce().add(Mc),Mc),Hc=new(Oe())({id:"question",use:"question-usage",viewBox:"0 0 20 20",content:''}),jc=(Ce().add(Hc),Hc),Ec=new(Oe())({id:"questionFilled",use:"questionFilled-usage",viewBox:"0 0 24 24",content:''}),Sc=(Ce().add(Ec),Ec),Vc=new(Oe())({id:"ram",use:"ram-usage",viewBox:"0 0 18 18",content:''}),Pc=(Ce().add(Vc),Vc),Lc=new(Oe())({id:"rearrange",use:"rearrange-usage",viewBox:"0 0 8 14",content:''}),Ac=(Ce().add(Lc),Lc),Dc=new(Oe())({id:"reduce_size",use:"reduce_size-usage",viewBox:"0 0 18 18",content:''}),Fc=(Ce().add(Dc),Dc),Ic=new(Oe())({id:"refresh",use:"refresh-usage",viewBox:"0 0 18 19",content:''}),zc=(Ce().add(Ic),Ic),Tc=new(Oe())({id:"reload",use:"reload-usage",viewBox:"0 0 24 24",content:''}),Rc=(Ce().add(Tc),Tc),Nc=new(Oe())({id:"remove_node",use:"remove_node-usage",viewBox:"0 0 18 18",content:''}),Gc=(Ce().add(Nc),Nc),Uc=new(Oe())({id:"resize_handler",use:"resize_handler-usage",viewBox:"0 0 16 16",content:''}),Wc=(Ce().add(Uc),Uc),qc=new(Oe())({id:"rocket",use:"rocket-usage",viewBox:"0 0 18 18",content:''}),Qc=(Ce().add(qc),qc),Yc=new(Oe())({id:"room",use:"room-usage",viewBox:"0 0 24 24",content:''}),Kc=(Ce().add(Yc),Yc),Xc=new(Oe())({id:"room_home",use:"room_home-usage",viewBox:"0 0 14 12",content:''}),$c=(Ce().add(Xc),Xc),Jc=new(Oe())({id:"room_new",use:"room_new-usage",viewBox:"0 0 20 20",content:''}),es=(Ce().add(Jc),Jc),ts=new(Oe())({id:"room_overview",use:"room_overview-usage",viewBox:"0 0 24 25",content:''}),ns=(Ce().add(ts),ts),rs=new(Oe())({id:"sad",use:"sad-usage",viewBox:"0 0 24 24",content:''}),os=(Ce().add(rs),rs),as=new(Oe())({id:"save",use:"save-usage",viewBox:"0 0 14 14",content:''}),is=(Ce().add(as),as),ls=new(Oe())({id:"search",use:"search-usage",viewBox:"0 0 18 18",content:''}),cs=(Ce().add(ls),ls),ss=new(Oe())({id:"search_s",use:"search_s-usage",viewBox:"0 0 14 14",content:''}),us=(Ce().add(ss),ss),ds=new(Oe())({id:"search_press",use:"search_press-usage",viewBox:"0 0 18 18",content:''}),hs=(Ce().add(ds),ds),vs=new(Oe())({id:"apache",use:"apache-usage",viewBox:"0 0 18 18",content:''}),ms=(Ce().add(vs),vs),gs=new(Oe())({id:"asterisk",use:"asterisk-usage",viewBox:"0 0 16 16",content:''}),fs=(Ce().add(gs),gs),ps=new(Oe())({id:"apache_tomcat",use:"apache_tomcat-usage",viewBox:"0 0 18 18",content:''}),ws=(Ce().add(ps),ps),bs=new(Oe())({id:"beanstalk",use:"beanstalk-usage",viewBox:"0 0 18 18",content:''}),ys=(Ce().add(bs),bs),xs=new(Oe())({id:"bind",use:"bind-usage",viewBox:"0 0 16 16",content:''}),Zs=(Ce().add(xs),xs),Bs=new(Oe())({id:"containerTech",use:"containerTech-usage",viewBox:"0 0 16 16",content:''}),Os=(Ce().add(Bs),Bs),_s=new(Oe())({id:"coreDNS",use:"coreDNS-usage",viewBox:"0 0 16 16",content:''}),Cs=(Ce().add(_s),_s),Ms=new(Oe())({id:"couchDB",use:"couchDB-usage",viewBox:"0 0 18 18",content:''}),ks=(Ce().add(Ms),Ms),Hs=new(Oe())({id:"database",use:"database-usage",viewBox:"0 0 18 18",content:''}),js=(Ce().add(Hs),Hs),Es=new(Oe())({id:"dns",use:"dns-usage",viewBox:"0 0 16 16",content:''}),Ss=(Ce().add(Es),Es),Vs=new(Oe())({id:"dnsmasq",use:"dnsmasq-usage",viewBox:"0 0 16 16",content:''}),Ps=(Ce().add(Vs),Vs),Ls=new(Oe())({id:"docker_hub_press",use:"docker_hub_press-usage",viewBox:"0 0 18 18",content:''}),As=(Ce().add(Ls),Ls),Ds=new(Oe())({id:"docker_hub",use:"docker_hub-usage",viewBox:"0 0 18 18",content:''}),Fs=(Ce().add(Ds),Ds),Is=new(Oe())({id:"dotnet",use:"dotnet-usage",viewBox:"0 0 16 16",content:''}),zs=(Ce().add(Is),Is),Ts=new(Oe())({id:"eBPF",use:"eBPF-usage",viewBox:"0 0 18 18",content:''}),Rs=(Ce().add(Ts),Ts),Ns=new(Oe())({id:"elasticSearch",use:"elasticSearch-usage",viewBox:"0 0 18 18",content:''}),Gs=(Ce().add(Ns),Ns),Us=new(Oe())({id:"example",use:"example-usage",viewBox:"0 0 16 16",content:''}),Ws=(Ce().add(Us),new(Oe())({id:"freeNAS",use:"freeNAS-usage",viewBox:"0 0 18 18",content:''})),qs=(Ce().add(Ws),Ws),Qs=new(Oe())({id:"haProxy",use:"haProxy-usage",viewBox:"0 0 18 18",content:''}),Ys=(Ce().add(Qs),Qs),Ks=new(Oe())({id:"httpCheck",use:"httpCheck-usage",viewBox:"0 0 18 18",content:''}),Xs=(Ce().add(Ks),Ks),$s=new(Oe())({id:"iceCast",use:"iceCast-usage",viewBox:"0 0 18 18",content:''}),Js=(Ce().add($s),$s),eu=new(Oe())({id:"influxDB",use:"influxDB-usage",viewBox:"0 0 18 18",content:''}),tu=(Ce().add(eu),eu),nu=new(Oe())({id:"ipfs",use:"ipfs-usage",viewBox:"0 0 18 18",content:''}),ru=(Ce().add(nu),nu),ou=new(Oe())({id:"ipvs",use:"ipvs-usage",viewBox:"0 0 18 18",content:''}),au=(Ce().add(ou),ou),iu=new(Oe())({id:"kubernetes",use:"kubernetes-usage",viewBox:"0 0 18 18",content:''}),lu=(Ce().add(iu),iu),cu=new(Oe())({id:"lighthttpd",use:"lighthttpd-usage",viewBox:"0 0 18 18",content:''}),su=(Ce().add(cu),cu),uu=new(Oe())({id:"lighthttpd2",use:"lighthttpd2-usage",viewBox:"0 0 18 18",content:''}),du=(Ce().add(uu),uu),hu=new(Oe())({id:"liteSpeed",use:"liteSpeed-usage",viewBox:"0 0 18 18",content:''}),vu=(Ce().add(hu),hu),mu=new(Oe())({id:"lxc",use:"lxc-usage",viewBox:"0 0 18 18",content:''}),gu=(Ce().add(mu),mu),fu=new(Oe())({id:"mariaDB",use:"mariaDB-usage",viewBox:"0 0 18 18",content:''}),pu=(Ce().add(fu),fu),wu=new(Oe())({id:"memCached",use:"memCached-usage",viewBox:"0 0 18 18",content:''}),bu=(Ce().add(wu),wu),yu=new(Oe())({id:"mongoDB",use:"mongoDB-usage",viewBox:"0 0 18 18",content:''}),xu=(Ce().add(yu),yu),Zu=new(Oe())({id:"mySQL",use:"mySQL-usage",viewBox:"0 0 18 18",content:''}),Bu=(Ce().add(Zu),Zu),Ou=new(Oe())({id:"mySQL_press",use:"mySQL_press-usage",viewBox:"0 0 18 18",content:''}),_u=(Ce().add(Ou),Ou),Cu=new(Oe())({id:"nginx",use:"nginx-usage",viewBox:"0 0 18 18",content:''}),Mu=(Ce().add(Cu),Cu),ku=new(Oe())({id:"nginx_local",use:"nginx_local-usage",viewBox:"0 0 18 18",content:''}),Hu=(Ce().add(ku),ku),ju=new(Oe())({id:"nginx_plus",use:"nginx_plus-usage",viewBox:"0 0 18 18",content:''}),Eu=(Ce().add(ju),ju),Su=new(Oe())({id:"ntpd",use:"ntpd-usage",viewBox:"0 0 18 18",content:''}),Vu=(Ce().add(Su),Su),Pu=new(Oe())({id:"nvidia",use:"nvidia-usage",viewBox:"0 0 18 18",content:''}),Lu=(Ce().add(Pu),Pu),Au=new(Oe())({id:"ntpd_press",use:"ntpd_press-usage",viewBox:"0 0 18 18",content:''}),Du=(Ce().add(Au),Au),Fu=new(Oe())({id:"openStack",use:"openStack-usage",viewBox:"0 0 18 18",content:''}),Iu=(Ce().add(Fu),Fu),zu=new(Oe())({id:"openWrt",use:"openWrt-usage",viewBox:"0 0 18 18",content:''}),Tu=(Ce().add(zu),zu),Ru=new(Oe())({id:"pan",use:"pan-usage",viewBox:"0 0 18 18",content:''}),Nu=(Ce().add(Ru),Ru),Gu=new(Oe())({id:"pandas",use:"pandas-usage",viewBox:"0 0 18 18",content:''}),Uu=(Ce().add(Gu),Gu),Wu=new(Oe())({id:"percona",use:"percona-usage",viewBox:"0 0 18 18",content:''}),qu=(Ce().add(Wu),Wu),Qu=new(Oe())({id:"pfSense",use:"pfSense-usage",viewBox:"0 0 18 18",content:''}),Yu=(Ce().add(Qu),Qu),Ku=new(Oe())({id:"php_fpm",use:"php_fpm-usage",viewBox:"0 0 18 18",content:''}),Xu=(Ce().add(Ku),Ku),$u=new(Oe())({id:"postgreSQL",use:"postgreSQL-usage",viewBox:"0 0 18 18",content:''}),Ju=(Ce().add($u),$u),ed=new(Oe())({id:"prometheus",use:"prometheus-usage",viewBox:"0 0 256 257",content:''}),td=(Ce().add(ed),ed),nd=new(Oe())({id:"proxySQL",use:"proxySQL-usage",viewBox:"0 0 18 18",content:''}),rd=(Ce().add(nd),nd),od=new(Oe())({id:"rabbitMQ",use:"rabbitMQ-usage",viewBox:"0 0 18 18",content:''}),ad=(Ce().add(od),od),id=new(Oe())({id:"random",use:"random-usage",viewBox:"0 0 16 16",content:''}),ld=(Ce().add(id),id),cd=new(Oe())({id:"redis",use:"redis-usage",viewBox:"0 0 18 18",content:''}),sd=(Ce().add(cd),cd),ud=new(Oe())({id:"rethinkDB",use:"rethinkDB-usage",viewBox:"0 0 18 18",content:''}),dd=(Ce().add(ud),ud),hd=new(Oe())({id:"retroShare",use:"retroShare-usage",viewBox:"0 0 18 18",content:''}),vd=(Ce().add(hd),hd),md=new(Oe())({id:"selected_area",use:"selected_area-usage",viewBox:"0 0 18 18",content:''}),gd=(Ce().add(md),md),fd=new(Oe())({id:"sendgrid",use:"sendgrid-usage",viewBox:"0 0 16 16",content:''}),pd=(Ce().add(fd),fd),wd=new(Oe())({id:"services",use:"services-usage",viewBox:"0 0 18 18",content:''}),bd=(Ce().add(wd),wd),yd=new(Oe())({id:"smartdlog",use:"smartdlog-usage",viewBox:"0 0 16 16",content:''}),xd=(Ce().add(yd),yd),Zd=new(Oe())({id:"solr",use:"solr-usage",viewBox:"0 0 18 18",content:''}),Bd=(Ce().add(Zd),Zd),Od=new(Oe())({id:"squid",use:"squid-usage",viewBox:"0 0 18 18",content:''}),_d=(Ce().add(Od),Od),Cd=new(Oe())({id:"summary_statistic",use:"summary_statistic-usage",viewBox:"0 0 18 18",content:''}),Md=(Ce().add(Cd),Cd),kd=new(Oe())({id:"systemd",use:"systemd-usage",viewBox:"0 0 16 16",content:''}),Hd=(Ce().add(kd),kd),jd=new(Oe())({id:"traefik",use:"traefik-usage",viewBox:"0 0 18 18",content:''}),Ed=(Ce().add(jd),jd),Sd=new(Oe())({id:"varnish",use:"varnish-usage",viewBox:"0 0 18 18",content:''}),Vd=(Ce().add(Sd),Sd),Pd=new(Oe())({id:"webLog",use:"webLog-usage",viewBox:"0 0 18 18",content:''}),Ld=(Ce().add(Pd),Pd),Ad=new(Oe())({id:"webLog_nginx",use:"webLog_nginx-usage",viewBox:"0 0 18 18",content:''}),Dd=(Ce().add(Ad),Ad),Fd=new(Oe())({id:"x509_check",use:"x509_check-usage",viewBox:"0 0 18 18",content:''}),Id=(Ce().add(Fd),Fd),zd=new(Oe())({id:"xen",use:"xen-usage",viewBox:"0 0 18 18",content:''}),Td=(Ce().add(zd),zd),Rd=new(Oe())({id:"settings",use:"settings-usage",viewBox:"0 0 17 15",content:''}),Nd=(Ce().add(Rd),Rd),Gd=new(Oe())({id:"settings_h",use:"settings_h-usage",viewBox:"0 0 14 14",content:''}),Ud=(Ce().add(Gd),Gd),Wd=new(Oe())({id:"sign_in",use:"sign_in-usage",viewBox:"0 0 18 18",content:''}),qd=(Ce().add(Wd),Wd),Qd=new(Oe())({id:"sorting_vertical",use:"sorting_vertical-usage",viewBox:"0 0 19 18",content:''}),Yd=(Ce().add(Qd),Qd),Kd=new(Oe())({id:"sorting_asc",use:"sorting_asc-usage",viewBox:"0 0 8 9",content:''}),Xd=(Ce().add(Kd),Kd),$d=new(Oe())({id:"sort_ascending",use:"sort_ascending-usage",viewBox:"0 0 18 18",content:''}),Jd=(Ce().add($d),$d),eh=new(Oe())({id:"sorting_desc",use:"sorting_desc-usage",viewBox:"0 0 8 9",content:''}),th=(Ce().add(eh),eh),nh=new(Oe())({id:"sort_descending",use:"sort_descending-usage",viewBox:"0 0 18 18",content:''}),rh=(Ce().add(nh),nh),oh=new(Oe())({id:"sort_indicator",use:"sort_indicator-usage",viewBox:"0 0 18 18",content:''}),ah=(Ce().add(oh),oh),ih=new(Oe())({id:"space",use:"space-usage",viewBox:"0 0 24 24",content:''}),lh=(Ce().add(ih),ih),ch=new(Oe())({id:"space_new",use:"space_new-usage",viewBox:"0 0 20 20",content:''}),sh=(Ce().add(ch),ch),uh=new(Oe())({id:"spaces_v2",use:"spaces_v2-usage",viewBox:"0 0 24 24",content:''}),dh=(Ce().add(uh),uh),hh=new(Oe())({id:"switch_off",use:"switch_off-usage",viewBox:"0 0 14 15",content:''}),vh=(Ce().add(hh),hh),mh=new(Oe())({id:"system_overview",use:"system_overview-usage",viewBox:"0 0 32 32",content:''}),gh=(Ce().add(mh),mh),fh=new(Oe())({id:"system_overview_press",use:"system_overview_press-usage",viewBox:"0 0 16 16",content:''}),ph=(Ce().add(fh),fh),wh=new(Oe())({id:"text_add",use:"text_add-usage",viewBox:"0 0 16 16",content:''}),bh=(Ce().add(wh),wh),yh=new(Oe())({id:"thumb_down",use:"thumb_down-usage",viewBox:"0 0 24 24",content:''}),xh=(Ce().add(yh),yh),Zh=new(Oe())({id:"thumb_up",use:"thumb_up-usage",viewBox:"0 0 24 24",content:''}),Bh=(Ce().add(Zh),Zh),Oh=new(Oe())({id:"tiny_buttons",use:"tiny_buttons-usage",viewBox:"0 0 22 22",content:''}),_h=(Ce().add(Oh),Oh),Ch=new(Oe())({id:"training",use:"training-usage",viewBox:"0 0 16 16",content:''}),Mh=(Ce().add(Ch),Ch),kh=new(Oe())({id:"trashcan",use:"trashcan-usage",viewBox:"0 0 14 15",content:''}),Hh=(Ce().add(kh),kh),jh=new(Oe())({id:"triangle",use:"triangle-usage",viewBox:"0 0 24 24",content:''}),Eh=(Ce().add(jh),jh),Sh=new(Oe())({id:"triangle_down",use:"triangle_down-usage",viewBox:"0 0 10 5",content:''}),Vh=(Ce().add(Sh),Sh),Ph=new(Oe())({id:"unknownError",use:"unknownError-usage",viewBox:"0 0 16 16",content:''}),Lh=(Ce().add(Ph),Ph),Ah=new(Oe())({id:"universe",use:"universe-usage",viewBox:"0 0 18 18",content:''}),Dh=(Ce().add(Ah),Ah),Fh=new(Oe())({id:"unreachable",use:"unreachable-usage",viewBox:"0 0 12 14",content:''}),Ih=(Ce().add(Fh),Fh),zh=new(Oe())({id:"unreachableNode",use:"unreachableNode-usage",viewBox:"0 0 231 230",content:''}),Th=(Ce().add(zh),zh),Rh=new(Oe())({id:"update",use:"update-usage",viewBox:"0 0 20 20",content:''}),Nh=(Ce().add(Rh),Rh),Gh=new(Oe())({id:"update_pending",use:"update_pending-usage",viewBox:"0 0 20 20",content:''}),Uh=(Ce().add(Gh),Gh),Wh=new(Oe())({id:"upload",use:"upload-usage",viewBox:"0 0 20 21",content:''}),qh=(Ce().add(Wh),Wh),Qh=new(Oe())({id:"user",use:"user-usage",viewBox:"0 0 16 18",content:''}),Yh=(Ce().add(Qh),Qh),Kh=new(Oe())({id:"user_press",use:"user_press-usage",viewBox:"0 0 16 16",content:''}),Xh=(Ce().add(Kh),Kh),$h=new(Oe())({id:"users",use:"users-usage",viewBox:"0 0 14 14",content:''}),Jh=(Ce().add($h),$h),ev=new(Oe())({id:"view_list",use:"view_list-usage",viewBox:"0 0 24 24",content:''}),tv=(Ce().add(ev),ev),nv=new(Oe())({id:"single_node_view",use:"single_node_view-usage",viewBox:"0 0 18 18",content:''}),rv=(Ce().add(nv),nv),ov=new(Oe())({id:"single_node_view_press",use:"single_node_view_press-usage",viewBox:"0 0 18 18",content:''}),av=(Ce().add(ov),ov),iv=new(Oe())({id:"virtualization",use:"virtualization-usage",viewBox:"0 0 16 16",content:''}),lv=(Ce().add(iv),iv),cv=new(Oe())({id:"warning",use:"warning-usage",viewBox:"0 0 24 24",content:''}),sv=(Ce().add(cv),cv),uv=new(Oe())({id:"warning_triangle",use:"warning_triangle-usage",viewBox:"0 0 12 10",content:''}),dv=(Ce().add(uv),uv),hv=new(Oe())({id:"warning_triangle_hollow",use:"warning_triangle_hollow-usage",viewBox:"0 0 24 24",content:''}),vv=(Ce().add(hv),hv),mv=new(Oe())({id:"weights_compare",use:"weights_compare-usage",viewBox:"0 0 14 12",content:''}),gv=(Ce().add(mv),mv),fv=new(Oe())({id:"weights_drill_down",use:"weights_drill_down-usage",viewBox:"0 0 14 14",content:''}),pv=(Ce().add(fv),fv),wv=new(Oe())({id:"x",use:"x-usage",viewBox:"0 0 24 24",content:''}),bv=(Ce().add(wv),wv),yv=new(Oe())({id:"firewall_solid",use:"firewall_solid-usage",viewBox:"0 0 24 24",content:''}),xv=(Ce().add(yv),yv),Zv=new(Oe())({id:"qualityOfService_solid",use:"qualityOfService_solid-usage",viewBox:"0 0 24 24",content:''}),Bv=(Ce().add(Zv),Zv),Ov=new(Oe())({id:"applications_solid",use:"applications_solid-usage",viewBox:"0 0 24 24",content:''}),_v=(Ce().add(Ov),Ov),Cv=new(Oe())({id:"networking_stack",use:"networking_stack-usage",viewBox:"0 0 18 18",content:''}),Mv=(Ce().add(Cv),Cv),kv=new(Oe())({id:"charts_view",use:"charts_view-usage",viewBox:"0 0 16 15",content:''}),Hv=(Ce().add(kv),kv),jv=new(Oe())({id:"nodes_update",use:"nodes_update-usage",viewBox:"0 0 40 40",content:''}),Ev=(Ce().add(jv),jv),Sv=new(Oe())({id:"zoom_in",use:"zoom_in-usage",viewBox:"0 0 24 24",content:''}),Vv=(Ce().add(Sv),Sv),Pv=new(Oe())({id:"zoom_out",use:"zoom_out-usage",viewBox:"0 0 24 24",content:''}),Lv=(Ce().add(Pv),Pv),Av=new(Oe())({id:"zoom_reset",use:"zoom_reset-usage",viewBox:"0 0 24 24",content:''}),Dv=(Ce().add(Av),Av),Fv=new(Oe())({id:"N",use:"N-usage",viewBox:"0 0 16 16",content:''}),Iv=(Ce().add(Fv),Fv),zv=new(Oe())({id:"I",use:"I-usage",viewBox:"0 0 16 16",content:''}),Tv=(Ce().add(zv),zv),Rv=new(Oe())({id:"D",use:"D-usage",viewBox:"0 0 16 16",content:''}),Nv=(Ce().add(Rv),Rv),Gv=new(Oe())({id:"L",use:"L-usage",viewBox:"0 0 16 16",content:''}),Uv=(Ce().add(Gv),{add_node:ke,add_user:je,aggregation_avg:Se,aggregation_max:Pe,aggregation_med:Ae,aggregation_min:Fe,aggregation_sum:ze,aggregation_sum_abs:Re,alarm:Ge,alarmCritical:We,alarmCriticalWarning:Qe,alarmFilled:Ke,alarmWarning:$e,alarm_bell:et,alarms_new:nt,alarm_off:ot,anomaliesBrain:it,anomaliesLens:ct,anomalyBadge:ut,applications_hollow:ht,applicationsSolid:_v,around_clock:mt,arrow_down:ft,arrow_w_line_left:wt,arrow_w_line_right:yt,arrow_left:Zt,arrow_s_down:Ot,arrow_s_left:Ct,arrows_vertical:kt,bookmark:jt,bullet_one:St,bullet_three:Pt,bullet_two:At,calendarFull:Ft,calendarFullPress:zt,chart_added:Rt,charts:Gt,charts_view:Hv,check:Wt,checkmark_partial_s:Qt,checkmark_s:Kt,checkmark:$t,chevron_double:en,chevron_down:nn,chevron_down_thin:on,chevron_expand:ln,chevron_left:sn,chevron_left_small:vn,chevron_left_start:dn,chevron_right:gn,chevron_right_end:bn,chevron_right_s:pn,chevron_right_small:xn,chevron_up_thin:Bn,classError:_n,classLatency:Mn,classUtilization:Hn,classWorkload:En,clock_hollow:Vn,clock5Min:Ln,clock5MinPress:Dn,close_circle:In,cluster:Tn,cluster_spaces:Nn,code:Un,collapse:qn,collect:Yn,community:Xn,connection_to_cloud:Jn,connectivityStatusLive:tr,connectivityStatusOffline:rr,connectivityStatusStale:ar,container:lr,controller_kind:sr,controller_name:dr,copy:vr,correlation:gr,correlation_inv:pr,cpu:br,cross_s:xr,data_retention:Br,database:_r,dashboard:Mr,dashboard_add:Hr,dashboards:Er,disk:Vr,documentation:Lr,dot:Dr,dots_2x3:Ir,download:Tr,dragHorizontal:Nr,edit:Ur,error:qr,exclamation:Yr,expand:Xr,favorites:Jr,feed:to,filter:ro,filterList:ao,firewallSolid:xv,forcePlay:lo,forcePlayOutline:so,functions:ho,fullScreen:mo,gear:fo,github:wo,google:Zo,goToNode:yo,group_by:Oo,hamburger:Co,help:ko,hide:jo,highlightArea:So,holder:Po,importExport:Ao,incident_manager:Fo,information:zo,informationPress:Ro,insights:Go,integrationAWSSNS:Wo,integrationAWSSNSColored:Qo,integrationDiscord:Ko,integrationDiscordColored:$o,integrationEmail:ea,integrationEmailColored:na,integrationMattermost:oa,integrationMattermostColored:ia,integrationMobileAppColored:ca,integrationOpsgenie:ua,integrationOpsgenieColored:ha,integrationPagerduty:ma,integrationPagerdutyColored:fa,integrationRocketChat:wa,integrationRocketChatColored:ya,integrations:Za,integrationSlack:Oa,integrationSlackColored:Ca,integrationTelegram:ka,integrationTelegramColored:ja,integrationWebhook:Sa,integrationWebhookColored:Pa,ipNetworking:Aa,ipNetworkingPress:Fa,last_week:za,line_chart:Ra,logo_s:Ga,loading:Wa,magnify:Qa,metrics:Ka,metrics_explorer:$a,minimize_s:ei,mobilePushNotifications:ni,mobilePushNotificationsHollow:oi,monitoring:ii,more:ci,navLeft:ui,navRight:hi,nav_arrow_goto:mi,nav_dots:fi,networkingStack:Mv,netdata:wi,netdataAssistant:yi,netdataPress:Zi,node:Oi,node_child:Ci,node_default_l:ki,node_hollow:ji,node_import_export:Si,node_notification_l:Pi,node_parent:Ai,node_selected_l:Fi,nodes:zi,nodes_hollow:Ri,none_selected:Gi,nodes_update:Ev,notification:Wl,notification_shortcut_enabled:Wi,notification_shortcut_disabled:Qi,os:Ki,osAlpineLinux:$i,osAmazonLinux:el,osArchLinux:nl,osCelarOS:ol,osCentos:il,osCentosColored:cl,osCoreOS:ul,osDebian:hl,osDebianColored:ml,osFedora:fl,osFreeBSD:wl,osGentoo:yl,osLinux:Zl,osLinuxColored:Ol,osLinuxManjaro:Cl,osMacOSX:kl,osOracle:jl,osOracleColored:Sl,osPress:Pl,osRaspbian:Al,osRedHat:Fl,osSuseLinux:zl,osUbuntu:Rl,osUbuntuColored:Gl,padlock:Ql,panTool:Kl,pauseOutline:$l,pauseSolid:ec,pencilSolid:oc,pencilOutline:nc,pie_chart_skeleton:ic,pin_element:cc,playOutline:uc,playSolid:hc,plus:mc,plus_mini_s:fc,pod:wc,pricing:yc,print:Zc,privacy:Oc,pushNotifications:Cc,qrCode:kc,ram:Pc,qualityOfServiceSolid:Bv,question:jc,questionFilled:Sc,rearrange:Ac,reduceSize:Fc,refresh:zc,reload:Rc,removeNode:Gc,resize_handler:Wc,rocket:Qc,room:Kc,room_home:$c,room_new:es,room_overview:ns,sad:os,save:is,search:cs,search_s:us,searchPress:hs,serviceAlarm:Ge,serviceApache:ms,serviceAsterisk:fs,serviceApacheTomcat:ws,serviceBeanstalk:ys,serviceBind:Zs,serviceContainer:Os,serviceCoreDns:Cs,serviceCouchDB:ks,serviceDatabase:js,serviceDNS:Ss,serviceDNSmasq:Ps,serviceDockerHubPress:As,serviceDockerHub:Fs,serviceDotNet:zs,serviceEBPF:Rs,serviceElasticSearch:Gs,serviceFreeNAS:qs,serviceHAProxy:Ys,serviceHTTPCheck:Xs,serviceIceCast:Js,serviceInfluxDB:tu,serviceIPFS:ru,serviceIPVS:au,serviceKubernetes:lu,serviceLighthttpd:su,serviceLighthttpd2:du,serviceLiteSpeed:vu,serviceLxc:gu,serviceMariaDB:pu,serviceMemCached:bu,serviceMongoDB:xu,serviceMySQL:Bu,serviceMySQLPress:_u,serviceNginx:Mu,serviceNginxLocal:Hu,serviceNginxPlus:Eu,serviceNtpd:Vu,serviceNvidia:Lu,serviceNtpdPress:Du,serviceOpenStack:Iu,serviceOpenWrt:Tu,servicePan:Nu,servicePandas:Uu,servicePercona:qu,servicePfSense:Yu,servicePhpFpm:Xu,servicePostgreSQL:Ju,servicePrometheus:td,serviceProxySQL:rd,serviceRabbitMQ:ad,serviceRandom:ld,serviceRedis:sd,serviceRethinkDB:dd,serviceRetroShare:vd,serviceSelectedArea:gd,serviceSendgrid:pd,services:bd,servicesSmartdlog:xd,serviceSolr:Bd,serviceSquid:_d,serviceSummaryStatistic:Md,serviceSystemd:Hd,serviceTraefik:Ed,serviceVarnish:Vd,serviceWebLog:Ld,serviceWebLogNginx:Dd,serviceX509Check:Id,serviceXen:Td,settings:Nd,settings_h:Ud,sign_in:qd,sorting_vertical:Yd,sort_ascending:Jd,sorting_asc:Xd,sort_descending:rh,sorting_desc:th,sort_indicator:ah,space:lh,space_new:sh,spaces_v2:dh,switch_off:vh,system_overview:gh,systemOverviewPress:ph,text_add:bh,thumb_down:xh,thumb_up:Bh,tiny_buttons:_h,training:Mh,trashcan:Hh,triangle:Eh,triangle_down:Vh,unknownError:Lh,universe:Dh,unreachable:Ih,unreachableNode:Th,update:Nh,update_pending:Uh,upload:qh,user:Yh,userPress:Xh,users:Jh,view_list:tv,viewSingleNode:rv,viewSingleNodePress:av,virtualization:lv,warning:sv,warning_triangle:dv,warning_triangle_hollow:vv,weights_compare:gv,weights_drill_down:pv,x:bv,zoomIn:Vv,zoomOut:Lv,zoomReset:Dv,N:Iv,I:Tv,D:Nv,L:Gv}),Wv=function(e){var t=e.cursor;return t?"cursor: "+t+";":""},qv={small:"14px",medium:"16px",large:"24px"},Qv=i.default.svg.withConfig({displayName:"styled__StyledIcon",componentId:"sc-1wxz4b1-0"})(["height:",";width:",";opacity:",";pointer-events:",";"," "," "," "," "," ",""],(function(e){var t=e.size;return e.height||qv[t]}),(function(e){var t=e.size;return e.width||qv[t]}),(function(e){return e.disabled?.4:1}),(function(e){return e.disabled?"none":"unset"}),(function(e){var t=e.rotate;return!isNaN(t)&&"transform: rotate("+90*t+"deg);"}),(function(e){var t=e.theme,n=e.color;return n&&"fill: "+A(n)({theme:t})+";"}),(function(e){var t=e.theme,n=e.hoverColor;return n&&"&:hover { fill: "+A(n)({theme:t})+"; }"}),R,$,Wv),Yv=["name","size"];function Kv(){return Kv=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Yv),i=Uv[n];if(!i)return null;var l,c=r||((l=n).endsWith("_s")?"small":l.endsWith("_l")?"large":"medium");return a.createElement(Qv,Kv({viewBox:i.viewBox},o,{size:c,ref:t}),a.createElement("use",{xlinkHref:"#"+i.id}))})),$v={weak:.3,medium:.4,strong:.8},Jv=function(e){var t=e.opacity,n=t&&$v[t]||t;return n?"opacity: "+n+";":""},em=function(e){var t=e.zIndex;if(t&&"number"===typeof t)return"z-index: "+t+";"},tm=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,n=e.height;if("object"===typeof n){var r=n.min,o=void 0===r?"":r,a=n.max,i=void 0===a?"":a;return"\n "+(o&&"min-height: "+("number"===typeof o?t*o+"px":o)+";")+"\n "+(i&&"max-height: "+("number"===typeof i?t*i+"px":i)+";")+"\n "}return n&&"height: "+("number"===typeof n?t*n+"px":n)+";"},nm=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,n=e.width;if("object"===typeof n){var r=n.min,o=void 0===r?"":r,a=n.max,i=void 0===a?"":a,l=n.base,c=void 0===l?"":l;return"\n "+(o&&"min-width: "+("number"===typeof o?t*o+"px":o)+";")+"\n "+(i&&"max-width: "+("number"===typeof i?t*i+"px":i)+";")+"\n "+(c&&"width: "+("number"===typeof c?t*c+"px":c)+";")+"\n "}return n&&"width: "+("number"===typeof n?t*n+"px":n)+";"},rm=function(e){var t=e.overflow;if(!t)return"";if("string"===typeof t)return"overflow: "+t+";";var n=t.vertical,r=void 0===n?"":n,o=t.horizontal,a=void 0===o?"":o;return"\n "+(r&&"overflow-y: "+r+";")+"\n "+(a&&"overflow-x: "+a+";")+"\n "},om=function(e){var t=e.theme,n=e.background,r=e.backgroundOpacity;if(!n)return"";var o=r?D(n,r)({theme:t}):A(n)({theme:t});return o&&"background-color: "+o+";"},am=function(e){var t=e.flex,n=e.basis;if(void 0===t&&void 0===n)return"";if(n&&void 0===t)return"flex-basis: "+n+";";var r=function(e,t){return void 0===t&&(t="auto"),!0===e?"1 1 "+t:!1===e?"0 0 "+t:"grow"===e?"1 0 "+t:"shrink"===e?"0 1 "+t:"number"===typeof e?e+" 0 "+t:"object"!==typeof e?e:e.grow+" "+e.shrink+" "+t}(t,n);return r?"flex: "+r+";":""},im=function(e){var t=function(e){return!0===e?"wrap":!1===e?"nowrap":"reverse"===e?e:""}(e.flexWrap);return t&&"flex-wrap: "+t+";"},lm={start:"flex-start",center:"center",end:"flex-end",baseline:"baseline",stretch:"stretch"},cm=function(e){var t=e.alignItems;return t in lm?"align-items: "+lm[t]+";":""},sm={start:"flex-start",center:"center",end:"flex-end",between:"space-between",around:"space-around",stretch:"stretch"},um=function(e){var t=e.alignContent;return t in sm?"align-content: "+sm[t]+";":""},dm={start:"flex-start",center:"center",end:"flex-end",between:"space-between",around:"space-around",evenly:"space-evenly",stretch:"stretch"},hm=function(e){var t=e.justifyContent;return t in dm?"justify-content: "+dm[t]+";":""},vm=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,n=e.gap,r=e.column,o=e.columnReverse,a=e.rowReverse;if("number"!==typeof n)return"";var i=function(e){var t=e.column,n=e.columnReverse,r=e.rowReverse;return t||n?"bottom":r?"left":"right"}({column:r,columnReverse:o,rowReverse:a});return"\n &> *:not(:last-child) {\n margin-"+i+": "+t*n+"px;\n }\n "},mm=function(e){var t=function(e,t,n){return e?"column":t?"column-reverse":n?"row-reverse":"row"}(e.column,e.columnReverse,e.rowReverse);return"flex-direction: "+t+";"};function gm(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function fm(e){for(var t=1;t=0||(o[n]=e[n]);return o}var km={boxShadow:xm,border:ym,background:om,color:function(e){var t=e.theme,n=e.color;return n?"color: "+A(n)({theme:t})+";":""},alignItems:cm},Hm=function(e){var t=e.theme,n=Mm(e,Zm),r="";for(var o in n)if(void 0===km[o]){r=r+(o+":")+n[o]+";"}else{var a=km[o];r+=a&&"function"===typeof a?a(_m({theme:t},n)):""}return r},jm={_before:"&::before",_after:"&::after",_hover:"&:hover, &[data-hover]",_active:"&:active, &[data-active]",_focus:"&:focus, &[data-focus]",_focusWithin:"&:focus-within",_visited:"&:visited",_empty:"&:empty",_even:"&:nth-of-type(even)",_odd:"&:nth-of-type(odd)",_disabled:"&[disabled], &[aria-disabled=true], &[data-disabled]",_checked:"&[aria-checked=true]",_mixed:"&[aria-checked=mixed]",_selected:"&[aria-selected=true], [data-selected] > &",_invalid:"&[aria-invalid=true]",_pressed:"&[aria-pressed=true]",_readOnly:"&[aria-readonly=true], &[readonly]",_first:"&:first-of-type",_last:"&:last-of-type",_expanded:"&[aria-expanded=true]",_grabbed:"&[aria-grabbed=true]",_notFirst:"&:not(:first-of-type)",_notLast:"&:not(:last-of-type)",_groupHover:"[role=group]:hover &",_autofill:"&:-webkit-autofill",_placeholder:"&::placeholder"},Em=function(e){var t=e.theme,n=Mm(e,Bm),r="";for(var o in n)if(o in jm){var a=o,i=n[a],l=Hm(_m({theme:t},i));r=r+"\n "+jm[a]+"{ \n "+l+" \n }"}return r.replace(/^(?=\n)$|^\s*|\s*$|\n\n+/gm,"")},Sm=new Set(["left","center","right"]),Vm=new Set(["underline","none","line-through"]),Pm=new Set(["normal","nowrap","pre-line"]),Lm=new Set(["normal","break-all","keep-all","break-word"]),Am=function(e){var t=e.theme,n=e.color;return"color: "+A(void 0===n?"text":n)({theme:t})+";"},Dm=(0,i.css)([""," "," "," "," "," "," "," "," "," "," "," "," "," "," ",""],Am,(function(e){var t=e.background,n=void 0===t?"text":t,r=e.code,o=e.color,a=void 0===o?"elementBackground":o,i=e.theme;return r&&"\n background-color: "+A(n)({theme:i})+";\n border-radius: 4px;\n color: "+A(a)({theme:i})+";\n padding: 0 6px;\n "}),$,(function(e){var t=e.textAlign;return Sm.has(t)&&"text-align: "+t+";"}),(function(e){var t=e.textDecoration;return Vm.has(t)&&"text-decoration: "+t+";"}),ee,(function(e){return e.truncate&&"\n white-space: nowrap;\n text-overflow: ellipsis;\n overflow: hidden;\n"}),(function(e){var t=e.whiteSpace;return Pm.has(t)&&"white-space: "+t+";"}),(function(e){var t=e.wordBreak;return Lm.has(t)&&"word-break: "+t+";"}),R,N,Jv,Wv,(function(e){var t=e.fontSize;return t&&"\n font-size: "+t+";\n line-height: "+t+";\n "}),(function(e){var t=e.lineHeight;return t&&"\n line-height: "+t+";\n "})),Fm=function(e,t){var n,r,o=t.fontSize,a=t.lineHeight,l=t.strong;return(0,i.default)(e).withConfig({displayName:"typography",componentId:"sc-1lwqv72-0"})(['font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Ubuntu,"Helvetica Neue",sans-serif;font-style:normal;'," "," "," ",""],(r=o,function(){return"font-size: "+r+";"}),function(e){return function(){return"line-height: "+e+";"}}(a),(n=l,function(e){var t=e.strong;return"font-weight: "+((void 0===t?n:t)?"bold":"normal")+";"}),Dm)},Im=function(e){return Fm(e,{fontSize:"26px",lineHeight:"32px",strong:!0})},zm=function(e){return Fm(e,{fontSize:"24px",lineHeight:"28px",strong:!0})},Tm=function(e){return Fm(e,{fontSize:"22px",lineHeight:"24px",strong:!0})},Rm=function(e){return Fm(e,{fontSize:"20px",lineHeight:"24px",strong:!0})},Nm=function(e){return Fm(e,{fontSize:"16px",lineHeight:"21px",strong:!0})},Gm=function(e){return Fm(e,{fontSize:"14px",lineHeight:"18px",strong:!0})},Um=function(e){return Fm(e,{fontSize:"12px",lineHeight:"14px",strong:!0})},Wm=function(e){return Fm(e,{fontSize:"7px",lineHeight:"8px"})},qm=function(e){return Fm(e,{fontSize:"8px",lineHeight:"10px"})},Qm=function(e){return Fm(e,{fontSize:"10px",lineHeight:"13px"})},Ym=function(e){return Fm(e,{fontSize:"11px",lineHeight:"14px"})},Km=function(e){return Fm(e,{fontSize:"12px",lineHeight:"16px"})},Xm=function(e){return Fm(e,{fontSize:"14px",lineHeight:"20px"})},$m=function(e){return Fm(e,{fontSize:"16px",lineHeight:"18px"})},Jm=function(e){return Fm(e,{fontSize:"24px",lineHeight:"32px"})},eg=n(37947),tg=function(e){return(0,eg.default)(e.sx)(e)},ng=function(e){return(0,i.default)(e).withConfig({displayName:"flex",componentId:"sc-1gtk8kg-0"})(["display:flex;"," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," ",""],am,mm,im,cm,um,hm,$,ne,R,N,vm,nm,tm,Am,om,Jv,ym,K,rm,em,Wv,xm,Em,re.position,tg)},rg=ng("div"),og=["label","icon","flavour","isLoading","loadingLabel","onClick","textTransform","iconColor","iconSize","iconWidth","iconHeight","children"];function ag(){return ag=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,og);return a.createElement(ye,ag({flavour:o,textTransform:u,hasIcon:!!r||i,onClick:i?void 0:c,ref:t,iconColor:d},p),i&&a.createElement(Ze,{className:"button-icon"}),r&&!i&&a.createElement(rg,{justifyContent:"center",alignItems:"center",width:"auto",height:"100%"},a.createElement(Xv,{size:h,className:"button-icon",title:r,name:r,width:v,height:m})),!!f&&a.createElement("span",null,i&&l||f))}));ig.defaultProps={onClick:function(){},icon:null};var lg=function(e){return(0,eg.default)(e.sx)(e)},cg=function(e){return(0,i.default)(e).withConfig({displayName:"box",componentId:"sc-12jmtj1-0"})(["box-sizing:border-box;"," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," ",""],um,$,ne,R,N,vm,nm,tm,om,Jv,ym,K,rm,em,Wv,xm,Em,re.position,lg)},sg=cg("div"),ug=n(73935),dg=function(){var e=(0,a.useMemo)((function(){var e=document.createElement("div");return document.body.append(e),e}),[]);return(0,a.useLayoutEffect)((function(){return function(){return document.body.removeChild(e)}}),[]),e},hg=function(e){return(0,a.useEffect)((function(){if(e){var t=function(t){27===t.keyCode&&e(t)};return document.addEventListener("keydown",t),function(){return document.removeEventListener("keydown",t)}}}),[e])},vg=function(e){var t=[];for(e=e.parentNode;e;)t.push(e),e=e.parentNode;return t},mg=function(e,t){return vg(t).some((function(t){return t===e}))},gg=function(e,t,n,r){(0,a.useEffect)((function(){if(t&&!r){var o=function(r){r.target===e.current||mg(e.current,r.target)||mg(n,r.target)||t(r)};return document.addEventListener("mousedown",o),function(){return document.removeEventListener("mousedown",o)}}}),[t])},fg=function(e,t){"function"===typeof e?e(t):e&&(e.current=t)},pg=function(e){var t=(0,a.useRef)(),n=(0,a.useCallback)((function(n){t.current=n,fg(e,n)}),[]);return[t,n]},wg=function e(t,n,r,o){void 0===o&&(o=!0);var a=function(e,t,n){return"left"===e.left?t.left:"right"===e.left?t.right:"right"===e.right?t.right-n.width:"left"===e.right?t.left-n.width:t.left+t.width/2-n.width/2}(t,n,r),i=Math.max(0,a);return a=Math.min(window.innerWidth-r.width,i),o||i===a?a:e(function(e){return"left"===e.left?{right:"right"}:"right"===e.left?{right:"left"}:"right"===e.right?{left:"left"}:"left"===e.right?{left:"right"}:void 0}(t),n,r)},bg=function e(t,n,r,o){void 0===o&&(o=!0);var a=function(e,t,n){if("top"===e.top)return t.top;if("bottom"===e.top)return t.bottom;if("bottom"===e.bottom)return t.bottom-n.height;if("top"===e.bottom){var r=t.top-n.height;return r<0&&t.bottom+n.height=0||(o[n]=e[n]);return o}(e,Zg))})).withConfig({displayName:"container__Container",componentId:"sc-l6u9ms-0"})(["left:-99999px;"," "," "," backface-visibility:hidden;perspective:1000;transform:translate3d(0,0,0);will-change:left,top,transform;"],(function(e){return e.animation&&Cg}),(function(e){return!e.hideShadow&&"box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15);"}),(function(e){return!!e.noEvents&&"pointer-events: none;"})),kg=Mg,Hg=function(e){var t=e.backdropBlur;return t?"boolean"===typeof t?"backdrop-filter: blur(10px);":"number"===typeof t?"backdrop-filter: blur("+t+"px);":"backdrop-filter: blur("+t+");":""},jg=["backdrop","target","align","stretch","onClickOutside","onEsc","children","canHideTarget","keepHorizontal"];function Eg(){return Eg=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,jg),p=pg(t),w=p[0],b=p[1],y=xg(o,w,l,s,m,g);(0,a.useLayoutEffect)((function(){y()}),[y]),function(e,t){(0,a.useEffect)((function(){var n,r=function(){var n=vg(e).filter((function(e){return e.scrollHeight>e.clientHeight}));return n.forEach((function(e){return e.addEventListener("scroll",t,{capture:!1,passive:!0})})),function(){return n.forEach((function(e){return e.removeEventListener("scroll",t)}))}};n=r();var o=function(){n(),n=r(),t()};return window.addEventListener("resize",o),function(){n(),window.removeEventListener("resize",o)}}),[e,t])}(o,y),gg(w,u,o),hg(d);var x=dg();return ug.createPortal(r?a.createElement(a.Fragment,null,a.createElement(kg,Eg({ref:b,width:{max:"100%"},column:!0,"data-testid":"drop"},f),h),a.createElement(Sg,null)):a.createElement(kg,Eg({ref:b,width:{max:"100%"},column:!0,"data-testid":"drop"},f),h),x)})),Lg=Pg,Ag=function(e,t){var n=void 0===t?{}:t,r=n.on,o=n.off,i=n.toggle,l=(0,a.useState)(!!e),c=l[0],s=l[1];return[c,(0,a.useCallback)((function(e){return s((function(t){var n="boolean"===typeof e?e:!t;return i&&i(n),r&&n&&r(),o&&!n&&o(),n}))}),[i,r,o]),(0,a.useCallback)((function(){s(!0),r&&r()}),[r]),(0,a.useCallback)((function(){s(!1),o&&o()}),[o])]};function Dg(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Fg(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,uf);return a.createElement(rg,{ref:t,column:"top"===r,columnReverse:"bottom"===r,rowReverse:"right"===r,margin:i},a.createElement(rg,df({background:c,padding:[1,2],round:!0,column:!0},s),"string"===typeof n?a.createElement(af,{color:"bright"},n):n),r&&a.createElement(Xv,{name:"triangle",alignSelf:"center",color:c,rotate:hf[r],height:"8px",width:"8px","data-testid":"drop-arrow"}))})),mf=vf,gf=["plain","open","align","dropProps","content","animation","disabled","zIndex","children","allowHoverOnTooltip"];function ff(){return ff=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,gf),p=Ng(f["aria-describedby"]),w=Ag(!1),b=w[0],y=w[2],x=w[3],Z=pg(t),B=Z[0],O=Z[1],_=zg(m,O,wf(wf({onMouseEnter:y,onMouseLeave:g?function(){return setTimeout((function(){C.current||x()}),300)}:x,onFocus:y,onBlur:x},b&&{"aria-describedby":p}),f)),C=(0,a.useRef)(!1);return(0,a.useLayoutEffect)((function(){B.current&&o&&y()}),[]),s?a.createElement(a.Fragment,null,_,b&&B.current&&!d&&a.createElement(Lg,ff({noEvents:!g,align:(null==c?void 0:c.align)||Gg[l],hideShadow:!0,id:p,onClickOutside:x,onMouseEnter:function(){return C.current=!0},onMouseLeave:function(){C.current=!1,x()},target:B.current},c,{animation:u,onEsc:x,zIndex:v}),n?yf(s):a.createElement(mf,{align:l},yf(s)))):m})),Zf=xf,Bf=["width","height","tooltip"];function Of(){return Of=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Bf);return a.createElement(Zf,{plain:!0,animation:!0,content:c&&a.createElement(_f,{content:c})},a.createElement(sg,Of({as:ig,iconWidth:r,iconHeight:i,ref:t,flavour:"borderless",neutral:!0},s)))})),Mf=Cf,kf=["items","checked","onChange","children"];function Hf(){return Hf=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,kf);return a.createElement(rg,Hf({alignItems:"center"},i),null!=t&&t.length?a.createElement(Sf,{items:t,checked:n,onChange:r}):a.createElement(Ef,null,o))},Pf=i.default.div.withConfig({displayName:"styled__ComponentBox",componentId:"sc-gh7act-0"})(["display:flex;flex-direction:",";top:0;bottom:0;height:100vh;width:100vw;"],(function(e){return e.isRight?"row-reverse":"row"})),Lf=i.default.aside.withConfig({displayName:"styled__SidebarBox",componentId:"sc-gh7act-1"})(["overflow:hidden;top:0;bottom:0;height:100%;width:50%;"]),Af=i.default.aside.withConfig({displayName:"styled__DisabledOverlay",componentId:"sc-gh7act-2"})(["position:fixed;top:0;bottom:0;left:0;height:100vh;width:100vw;min-width:100vw;max-width:100vw;background-color:black;opacity:0.3;z-index:15;"]),Df=i.default.aside.withConfig({displayName:"styled__PortalSidebox",componentId:"sc-gh7act-3"})(["position:fixed;overflow:hidden;top:0;",":0;bottom:0;height:100vh;width:50vw;box-shadow:0px ","px 68px rgba(0,0,0,0.288);"],V(["side"],"left"),V(["shadowSide"],!0)?2:-2),Ff=i.default.div.withConfig({displayName:"styled__InfoBox",componentId:"sc-gh7act-4"})(["display:flex;width:50%;background-color:",";box-shadow:inset 0px ","px 68px rgba(0,0,0,0.288);"],A("primary"),V(["shadowSide"],!0)?2:-2),If=function(e){var t=e.info,n=e.children,r=e.className,o=e.right,i=void 0!==o&&o;return a.createElement(Pf,{isRight:i},a.createElement(Lf,{className:r,shadowSide:i},n),a.createElement(Ff,null,t))},zf=n(26519),Tf=["children","className","closeOnEsc","closeOnOverlayClick","onClose","right","Wrapper"];function Rf(){return Rf=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Tf);(0,a.useEffect)((function(){var e=function(e){27===e.keyCode&&o&&s()};return document.addEventListener("keydown",e),function(){document.removeEventListener("keydown",e)}}),[o,s]);return a.createElement(zf.Z,null,a.createElement(Af,{onClick:function(){l&&s()}}),a.createElement(v,null,a.createElement(Df,Rf({className:n,shadowSide:d,side:d?"right":"left"},m),t)))},Gf={mobileSmall:"320px",mobile:"425px",tablet:"768px",laptop:"1024px",laptopLarge:"1200px",desktop:"1440px",desktopLarge:"2560px"},Uf={mobileSmall:"(min-width: "+Gf.mobileSmall+")",mobile:"(min-width: "+Gf.mobile+")",tablet:"(min-width: "+Gf.tablet+")",laptop:"(min-width: "+Gf.laptop+")",laptopLarge:"(min-width: "+Gf.laptopLarge+")",desktop:"(min-width: "+Gf.desktop+")",desktopLarge:"(min-width: "+Gf.desktopLarge+")"};function Wf(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function qf(e,t,n){return t=function(e){var t=function(e,t){if("object"!==typeof e||null===e)return e;var n=e[Symbol.toPrimitive];if(void 0!==n){var r=n.call(e,t||"default");if("object"!==typeof r)return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===typeof t?t:String(t)}(t),t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}var Qf=i.default.input.attrs({type:"checkbox"}).withConfig({displayName:"styled__HiddenCheckboxInput",componentId:"sc-1qof7za-0"})(["border:0;clip:rect(0 0 0 0);clip-path:inset(50%);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;white-space:nowrap;width:1px;"]),Yf=(0,i.default)(rg).attrs((function(e){return function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Kf);return a.createElement(rg,Xf({as:"label",position:"relative",alignItems:"center",gap:1,cursor:r?"auto":"pointer",rowReverse:"right"===d,"data-testid":"checkbox",disabled:r},v,{onClick:function(e){e.preventDefault(),r||null==h||h(!n,e)}}),c&&a.createElement(af,Xf({as:l,opacity:r?.4:1},s),c),a.createElement(sg,{width:"16px",height:"16px"},a.createElement(Qf,Xf({"data-testid":"checkbox-input",ref:t,disabled:r},i&&{"data-indeterminate":!0},{"data-checked":n})),a.createElement(Yf,{"data-testid":"styled-checkbox",disabled:r},(!!n||!!i)&&a.createElement(Xv,Xf({disabled:r,name:i?"checkmark_partial_s":"checkmark_s",width:"16px",height:"16px",color:"accent",hoverColor:"primary"},o)))))}));$f.defaultProps={Label:af,labelPosition:"right"};var Jf=function(e,t){var n=e.every((function(e){return e})),r=!n&&e.includes(!1)&&e.includes(!0),o=(0,a.useCallback)((function(){n?t.forEach((function(e){return e(!1)})):t.forEach((function(e){return e(!0)}))}),[n,t]);return[n,r,o]};function ep(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function tp(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,cp),Z=!0===t?"invalid":t;return a.createElement(rg,sp({gap:.5,column:!0,className:s},b,{as:"label"}),"string"===typeof m?a.createElement(ap,{size:w},m):m,a.createElement(rg,sp({position:"relative"},y),r&&a.createElement(rg,{position:"absolute",left:1,top:0,bottom:0,alignItems:"center"},r),a.createElement(op,sp({disabled:n,placeholder:v,onBlur:c,onFocus:l,name:i,"aria-label":i,hasIconLeft:!!r,hasIconRight:!!o,hasIndicator:!!d,type:"text",value:g,size:w,ref:f,error:t,hasValue:!!g},x)),(!!o||!!d)&&a.createElement(rg,{position:"absolute",right:1,top:0,bottom:0,alignItems:"center",gap:1},!!d&&a.createElement(rf,{color:"textLite"},d),!!o&&o)),"string"===typeof u?a.createElement(rf,{color:"textLite"},u):!!u&&u,"string"===typeof Z?a.createElement(rf,{color:"errorText"},Z):!!Z&&Z)},dp=i.default.input.attrs({type:"range"}).withConfig({displayName:"styled__InputRange",componentId:"sc-17kts71-0"})(["-webkit-appearance:none;height:2px;background-color:","40;background-image:linear-gradient(",",",");background-repeat:no-repeat;background-size:",";cursor:pointer;width:100%;&::-webkit-slider-thumb{-webkit-appearance:none;height:10px;width:10px;border-radius:50%;background:",";transition:all 0.3s ease-in-out;&:active{height:16px;width:16px;}}"],A("primary"),A("primary"),A("primary"),(function(e){var t=e.max;return 100*e.value/t+"% 100%"}),A("primary")),hp=["max","min","step","value"];function vp(){return vp=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,hp);return a.createElement(dp,vp({"data-testid":"rangeInput",max:r,min:i,step:c,type:"range",value:u,ref:t},d))})),gp=(0,i.default)(sg).withConfig({displayName:"styled__Slider",componentId:"sc-y2g216-0"})([""]),fp=(0,i.default)(sg).withConfig({displayName:"styled__SliderTrack",componentId:"sc-y2g216-1"})(["background-position:",";background-color:","40;background-image:linear-gradient( ",","," );background-repeat:no-repeat;background-size:",";height:2px;width:",";"],(function(e){var t=e.max,n=e.min,r=e.minValue;return e.width*((r-n)/(t-n))*100/100+"px 100%"}),(function(e){return e.theme.colors.primary}),(function(e){return e.theme.colors.primary}),(function(e){return e.theme.colors.primary}),(function(e){var t=e.max,n=e.maxValue,r=e.min;return 100*(n-e.minValue)/(t-r)+"% 100%"}),(function(e){return e.width+"px"||0})),pp=(0,i.default)(mp).withConfig({displayName:"styled__Range",componentId:"sc-y2g216-2"})(["pointer-events:none;position:absolute;height:0;outline:none;width:100%;&::-webkit-slider-thumb{pointer-events:all;}"]),wp=["initMax","initMin","max","min","onChange","onInput","step","TextComponent"];function bp(){return bp=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,wp),g=(0,a.useState)(t||o),f=g[0],p=g[1],w=(0,a.useState)(n||l),b=w[0],y=w[1],x=(0,a.useState)(0),Z=x[0],B=x[1],O=(0,a.useRef)(null),_=(0,a.useRef)(null);(0,a.useEffect)((function(){O.current&&B(O.current.getBoundingClientRect().width)}),[b]),(0,a.useEffect)((function(){p(t||o),y(n||l)}),[o,l]);return a.createElement(rg,{column:!0,gap:1,flex:!0},a.createElement(rg,{alignItems:"center","data-testid":"multiRangeInput",justifyContent:"center",position:"relative",width:"100%"},a.createElement(pp,bp({"data-testid":"minRangeInput",max:o,min:l,onChange:function(e){c&&c({max:f,min:e.target.value})},onInput:function(e){var t=Math.min(+e.target.value,f-d);y(t),e.target.value=t.toString(),s&&s({max:f,min:e.target.value})},position:"relative",ref:_,step:d,value:b,zIndex:3},m)),a.createElement(pp,bp({"data-testid":"maxRangeInput",max:o,min:l,onChange:function(e){c&&c({max:e.target.value,min:b})},onInput:function(e){var t=Math.max(+e.target.value,b+d);p(t),e.target.value=t.toString(),s&&s({max:e.target.value,min:b})},ref:O,step:d,value:f,zIndex:5},m)),a.createElement(gp,{"data-testid":"multiRange-slider"},a.createElement(fp,{"data-testid":"multiRange-sliderTrack",max:o,maxValue:f,min:l,minValue:b,width:Z}))),a.createElement(rg,{"data-testid":"multiRange-values",justifyContent:"between"},a.createElement(v,{"data-testid":"multiRange-minValue"},b),a.createElement(v,{"data-testid":"multiRange-maxValue"},f)))},xp=function(e){var t=e.onBlur,n=e.defaultState,r=void 0!==n&&n,o=(0,a.useState)(r),i=o[0],l=o[1],c=(0,a.useCallback)((function(e){i||l(!0),t&&t(e)}),[t,i]);return[i,c,l]},Zp=function(e){var t=e.defaultState,n=void 0!==t&&t,r=e.onBlur,o=e.onFocus,i=(0,a.useState)(n),l=i[0],c=i[1],s=(0,a.useCallback)((function(e){l||c(!0),o&&o(e)}),[o,l]),u=(0,a.useCallback)((function(e){c(!1),r&&r(e)}),[r]);return[l,s,u]},Bp=function(e){var t=e.value,n=void 0===t?"":t,r=e.onChange,o=e.maxChars,i=(0,a.useState)(n),l=i[0],c=i[1],s=(0,a.useState)(!1),u=s[0],d=s[1],h=(0,a.useCallback)((function(e){var t=e.target.value;if(o&&t.length>o)return e.preventDefault(),void e.stopPropagation();c(t),u||d(!0),r&&r(e)}),[u,o,r]),v=o?l.length+"/"+o:"",m=(0,a.useCallback)((function(e){void 0===e&&(e=""),c(e),d(!1)}),[]);return[l,h,v,u,{setIsDirty:d,setValue:c,resetValue:m}]},Op=(0,i.css)(["border-color:",";box-shadow:0 0 0 1px ",";"],I("controlFocused"),I("controlFocused")),_p="\n font-family: inherit;\n border: none;\n outline: none;\n padding: 0;\n margin: 0;\n",Cp=(0,i.css)(["&::-webkit-scrollbar{width:",";}&::-webkit-scrollbar-track{border-radius:",";background-color:",";}&::-webkit-scrollbar-thumb{border-radius:",";background-color:",";}&::-webkit-scrollbar-thumb:hover{background-color:",";}&::-webkit-scrollbar-track-piece{background-color:",";}&::-webkit-scrollbar-corner{background-color:",";}"],F(1),F(.5),D("border",.1),F(1),D("border",.3),D("border",.5),D("border",.3),D("border",.3)),Mp=["right"];var kp=i.default.div.withConfig({displayName:"styled__ToggleContainer",componentId:"sc-dtjehx-0"})(["display:block;box-sizing:border-box;width:40px;height:20px;"]),Hp=i.default.input.attrs({type:"checkbox"}).withConfig({displayName:"styled__HiddenToggleInput",componentId:"sc-dtjehx-1"})(["display:none;"]),jp=i.default.div.withConfig({displayName:"styled__StyledToggle",componentId:"sc-dtjehx-2"})(["box-sizing:border-box;width:40px;height:20px;background:",";border:1px solid ",";border-radius:100px;transition:",";display:block;position:relative;-webkit-tap-highlight-color:transparent;flex-shrink:0;align-self:flex-start;cursor:pointer;pointer-events:",';&:after{display:block;position:absolute;content:"";width:16px;height:16px;border-radius:50%;left:5%;top:50%;transform:translateY(-50%);transition:',";opacity:",";background-color:",";}"," ",":focus + &{","}"],(function(e){return e.disabled?A("mainBackgroundDisabled"):A("mainBackground")}),A("border"),(function(e){return e.withTransition?"all 150ms":"unset"}),(function(e){return e.disabled?"none":"auto"}),(function(e){return e.withTransition?"left 0.2s ease":"unset"}),(function(e){return e.disabled?"0.4":"1"}),(function(e){var t=e.colored,n=e.checked;return A(t?n?"primary":"error":"controlFocused")}),(function(e){return e.checked&&"\n &:after {\n left: 55%;\n }\n "}),Hp,Op),Ep=i.default.label.withConfig({displayName:"styled__StyledLabel",componentId:"sc-dtjehx-3"})([""," "," position:relative;cursor:pointer;display:flex;flex-flow:row nowrap;align-items:center;"],R,$),Sp=i.default.span.withConfig({displayName:"styled__LabelText",componentId:"sc-dtjehx-4"})(["",""],(function(e){var t=e.right,n=function(e,t){if(null==e)return{};var n,r,o={},a=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,Mp);return t?"margin-left: "+L(n)+"px;":"margin-right: "+L(n)+"px;"})),Vp=["checked","disabled","className","labelLeft","labelRight","Label","colored","margin","alignSelf","toggleProps"];function Pp(){return Pp=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Vp);return a.createElement(Ep,{className:r,margin:s,alignSelf:u},o&&a.createElement(Sp,{as:l,left:!0},o),a.createElement(kp,null,a.createElement(Hp,Pp({disabled:n,checked:t},v)),a.createElement(jp,Pp({checked:t,disabled:n,colored:c,role:"switch"},h))),i&&a.createElement(Sp,{as:l,right:!0},i))};Lp.defaultProps={colored:!1,Label:af};var Ap=i.default.input.withConfig({displayName:"styled__Input",componentId:"sc-11vyiac-0"})(["&&{height:0;width:0;opacity:0;-moz-appearance:none;margin:0;border:none;}"]),Dp=i.default.label.withConfig({displayName:"styled__Container",componentId:"sc-11vyiac-1"})(["display:flex;flex-direction:row;align-items:center;"," "," "," ",""],cm,$,R,(function(e){var t=e.disabled;return"\n pointer-events: "+(t?"none":"auto")+";\n cursor: "+(t?"default":"pointer")+";\n"})),Fp=i.default.div.withConfig({displayName:"styled__IconContainer",componentId:"sc-11vyiac-2"})(["display:flex;align-items:center;justify-content:center;overflow:hidden;flex:0 0 auto;height:16px;width:16px;box-sizing:border-box;border:1px solid ",";border-radius:100%;background:",";",""],(function(e){return A(e.borderColor)(e)}),(function(e){return A(e.background)(e)}),R),Ip=(0,i.default)(Xv).withConfig({displayName:"styled__StyledIcon",componentId:"sc-11vyiac-3"})(["fill:",";height:8px;width:8px;"],(function(e){return A(e.color)(e)})),zp=["alignItems","alignSelf","checked","children","disabled","iconProps","label","margin"];function Tp(){return Tp=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,zp),h=l&&!o?"disabled":o&&l?"checkedDisabled":o?"checked":"default",v=a.useMemo((function(){return Rp[h]}),[h]),m="checkedDisabled"===h||"checked"===h;return a.createElement(Dp,{alignItems:n,alignSelf:r,disabled:l,margin:u},a.createElement(Ap,Tp({type:"radio",disabled:l,checked:o},d)),a.createElement(Fp,{background:v.containerColor,borderColor:v.borderColor,margin:i||s?[.5,2,0,0]:null},m&&a.createElement(Ip,Tp({name:"dot",color:v.dotColor},c))),i,s&&!i&&a.createElement(af,null,s))};function Gp(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Up(e){for(var t=1;t span{font-weight:",";}"],(function(e){return e.small?"2px":"4px"}),(function(e){var t=e.active;return A(t?"accent":["transparent","full"])}),(function(e){var t=e.minWidth;return null!=t?t:F(10)}),(function(e){var t=e.maxWidth;return null!=t?t:F(26)}),(function(e){var t=e.small;return F(t?4:6)}),A("text"),(function(e){return e.active?"bold":"normal"}),(function(e){return e.disabled?.4:1}),(function(e){return e.disabled?"none":"auto"}),(function(e){return e.small?"2px":"4px"}),A("primary"),(function(e){return e.active?"bold":"normal"})),Kp=(0,i.default)(rg).withConfig({displayName:"styled__StyledTabMenu",componentId:"sc-1n790ui-3"})(["white-space:nowrap;color:",";padding:4px 8px;background:",";width:100%;border-radius:4px;cursor:",";justify-content:flex-start;&:hover{background:",";}"],A("text"),(function(e){var t=e.active;return A(t?"menuItemSelected":["transparent","full"])}),(function(e){return e.active?"default":"pointer"}),(function(e){var t=e.active;return A(t?"menuItemSelected":"menuItemHover")})),Xp=["index","isMenuItem","onChange"];function $p(){return $p=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Xp),i=(0,a.useCallback)((function(){return r&&r(t||0)}),[t,r]),l=n?Kp:Yp;return a.createElement(l,$p({justifyContent:"center",alignItems:"center",onClick:o.disabled?void 0:i},o),o.label)};function ew(){return ew=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,tw),g=function(e,t){void 0===e&&(e=0);var n=(0,a.useState)(t?e:0),r=n[0],o=n[1],i=(0,a.useCallback)((function(e){t?t(e):o(e)}),[t]);return(0,a.useEffect)((function(){o(e||0)}),[e]),[r,i]}(o,r),f=g[0],p=g[1],w=function(e,t,n){return(0,a.useMemo)((function(){var r=[],o=[],i=null,l=-1,c=!1;return a.Children.forEach(e,(function(e,s){var u=(null==e?void 0:e.props)||{};l<0&&!u.disabled&&(l=s);var d=t===r.length,h=s+"-"+u.label;e&&o.push(a.createElement(Jp,ew({key:h},u,{onChange:n,index:s,active:d}))),d&&(c=!!u.disabled,i=u.children),r.push(s)})),[o,i,l,c]}),[e,t,n])}(i,f,p),b=w[0],y=w[1],x=w[2],Z=w[3];return(0,a.useEffect)((function(){Z&&f!==x&&p(x)}),[f,x,Z,p]),a.createElement(qp,nw({className:n},m,{ref:t}),a.createElement(c,null,a.createElement(Qp,nw({className:"tabs",noDefaultBorder:d},h),b)),a.createElement(u,v,y))})),ow=["plain","open","align","dropProps","content","animation","children","zIndex"];function aw(){return aw=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,ow),g=Ng(m["aria-describedby"]),f=Ag(o),p=f[0],w=f[2],b=f[3],y=(0,a.useRef)(!1),x=(0,a.useRef)(!1),Z=(0,a.useCallback)((function(){return requestAnimationFrame((function(){return!y.current&&!x.current&&b()}))}),[]),B=pg(t),O=B[0],_=B[1],C=zg(d,_,lw(lw({isOpen:p,onMouseOver:w,onMouseLeave:Z,onFocus:w,onBlur:Z},p&&{"aria-describedby":g}),m)),M=(0,a.useCallback)((function(){y.current=!0}),[]),k=(0,a.useCallback)((function(){y.current=!1,Z()}),[]);return a.createElement(a.Fragment,null,C,p&&O.current&&a.createElement(Lg,aw({id:g,hideShadow:!0},c,{align:(null==c?void 0:c.align)||Gg[l],animation:u,onEsc:b,onMouseEnter:M,onMouseLeave:k,target:O.current,zIndex:v}),n?sw(s):a.createElement(mf,{align:l,background:"tooltip",backgroundOpacity:.9,padding:[2,4]},sw(s))))})),dw=uw,hw=["open","icon","label","caret"];function vw(){return vw=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,hw);return a.createElement(mw,vw({gap:2,padding:[2,4],justifyContent:"between",alignItems:"center",role:"button",tabindex:"0","aria-haspopup":"listbox","aria-expanded":n,ref:t},l),a.createElement(rg,{alignItems:"center",gap:2},r,"string"===typeof o?a.createElement(af,null,o):o),!0===i?a.createElement(Xv,{name:"chevron_down",color:"text",width:"12px",height:"12px",rotate:n?2:null}):i)})),fw=n(6811);function pw(e){return function(e){if(Array.isArray(e))return ww(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return ww(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return ww(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function ww(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,yw),s=(0,a.useState)(r),u=s[0],d=s[1];return bw((function(){return o(u)}),300,[u]),a.createElement(up,xw({iconLeft:a.createElement(Xv,{name:"search",color:u?"textFocus":"textLite",width:"14px",height:"14px"}),iconRight:(!!u||!!r)&&a.createElement(Mf,{icon:"x",iconColor:u?"textFocus":"textLite",width:"14px",height:"14px",onClick:function(){return i?i():d("")},padding:[0],neutral:!0}),inputRef:t,value:u,onChange:function(e){return d(e.target.value)},placeholder:l,size:"small"},c))})),Bw=["hideShadow","itemProps","items","onItemClick","dropTitle","dropTitlePadding","Item","Footer","value","hasSearch","searchMargin","gap","estimateSize","close"];function Ow(){return Ow=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Bw),x=(0,a.useState)(""),Z=x[0],B=x[1],O=(0,a.useMemo)((function(){if(!Z)return r;var e=Z.toLowerCase();return r.filter((function(t){var n=t.label,r=t.value;return!("string"!==typeof n||!n.toLowerCase().includes(e))||!("string"!==typeof r||!r.toLowerCase().includes(e))}))}),[r,Z]),_=(0,a.useRef)(),C=(0,fw.MG)({count:O.length,getScrollElement:function(){return _.current},scrollOffsetFn:function(e){return e?e.target.scrollTop-_.current.offsetTop:0},overscan:3,enableSmoothScroll:!1,estimateSize:w});return a.createElement(_w,Ow({as:"ul",role:"listbox",background:"dropdown",hideShadow:t,padding:[0],margin:[1,0],column:!0,tabindex:"-1",width:"auto"},y),i&&a.createElement(rg,{padding:c},i),h&&a.createElement(sg,{margin:m},a.createElement(Zw,{"data-testid":"dropdown-search",placeholder:"Search",onChange:B})),a.createElement("div",{ref:_,style:{height:"100%",overflow:"auto"}},a.createElement("div",{style:{minHeight:C.getTotalSize()+"px",width:"100%",position:"relative"}},C.getVirtualItems().map((function(e){return a.createElement("div",{key:e.key,style:{position:"absolute",top:0,left:0,width:"100%",transform:"translateY("+e.start+"px)",padding:2*f,overflow:"hidden"},"data-index":e.index,ref:C.measureElement},a.createElement(s,{item:O[e.index],index:e.index,itemProps:n,value:d,onItemClick:o,close:b}))})))),u&&a.createElement(u,{close:b}))},kw=["value","label","icon","reverse","disabled","onClick"],Hw=["item","value","onItemClick","index","style"];function jw(){return jw=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}var Sw=(0,i.default)(rg).attrs({as:"li",role:"option",padding:[1,4]}).withConfig({displayName:"dropdownItem__ItemContainer",componentId:"sc-vjov6x-0"})(["cursor:",";opacity:",";alignitems:",";pointer-events:",";&:hover{background-color:",";}"],(function(e){var t=e.cursor;return null!=t?t:"pointer"}),(function(e){return e.disabled?.4:1}),(function(e){var t=e.alignItems;return null!=t?t:"center"}),(function(e){return e.disabled?"none":"auto"}),(function(e){return A("borderSecondary")(e)})),Vw=function(e){var t=e.item,n=t.value,r=t.label,o=t.icon,i=t.reverse,l=t.disabled,c=t.onClick,s=Ew(t,kw),u=e.value,d=e.onItemClick,h=e.index,v=e.style,m=Ew(e,Hw),g=u===n;return a.createElement(Sw,jw({"data-index":h,"aria-selected":g,disabled:l||g,onClick:function(e){c&&c(e),d(n)}},s,m,{style:v}),i&&a.createElement(of,null,r),o,!i&&a.createElement(of,null,r))},Pw=["value","onChange","onOpen","onClose","closeOnClick","open","icon","label","caret","children","dropProps","dropdownProps","itemProps","items","Item","Footer","Dropdown","animation","dropTitle","dropTitlePadding","hasSearch"];function Lw(){return Lw=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Pw),S=Ag(u,{on:o,off:i}),V=S[0],P=S[1],L=S[3],A=pg(t),D=A[0],F=A[1],I=(0,a.useCallback)((function(e){r&&r(e),c&&L()}),[r]),z=(0,a.useCallback)((function(e){D.current===e.target||vg(e.target).some((function(e){return e===D.current}))||L()}),[L]),T=zg(f,F,function(e){for(var t=1;t4)return zw;var n=t.map((function(t){return z(e,t)}));return 1===n.length?{top:n[0],right:n[0],bottom:n[0],left:n[0]}:2===n.length?{top:n[0],right:n[1],bottom:n[0],left:n[1]}:3===n.length?{top:n[0],right:n[1],bottom:n[2],left:n[1]}:{top:n[0],right:n[1],bottom:n[2],left:n[3]}},Rw=function(e,t){return"0"!==e&&"0"!==t?"calc((100% - "+e+") - "+t+")":"0"===e&&"0"===t?"100%":"calc(100% - "+("0"===e?t:e)+")"},Nw=new Set(["top","center","bottom"]),Gw=new Set(["bottom-left","left","top-left"]),Uw=new Set(["right","center","left"]),Ww=new Set(["top-left","top","top-right"]),qw=new Set(["top-right","right","bottom-right"]),Qw=new Set(["bottom-right","bottom","bottom-left"]),Yw=i.default.div.attrs((function(e){var t=e.theme,n=e.margin;return{marginDimensions:Tw(t,n)}})).withConfig({displayName:"container__Container",componentId:"sc-k2hlzf-0"})(["position:",";display:flex;outline:none;pointer-events:all;"," "," "," "," "," "," "," "," ",""],(function(e){return e.isAbsolute?"absolute":"fixed"}),(function(e){var t=e.marginDimensions,n=t.top,r=t.bottom;return"max-height: "+Rw(n,r)+";"}),(function(e){var t=e.marginDimensions,n=t.right,r=t.left;return"max-width: "+Rw(r,n)+";"}),(function(e){var t=e.position,n=e.full,r=e.marginDimensions;return"vertical"===n||!0===n||Ww.has(t)?"top: "+r.top+";":Uw.has(t)?"top: 50%;":""}),(function(e){var t=e.position,n=e.full,r=e.marginDimensions;return"horizontal"===n||!0===n||qw.has(t)?"right: "+r.right+";":""}),(function(e){var t=e.position,n=e.full,r=e.marginDimensions;return"vertical"===n||!0===n||Qw.has(t)?"bottom: "+r.bottom+";":""}),(function(e){var t=e.position,n=e.full,r=e.marginDimensions;return"horizontal"===n||!0===n||Gw.has(t)?"left: "+r.left+";":Nw.has(t)?"left: 50%;":""}),(function(e){var t=e.full,n=e.position,r=function(){var e=!0!==t&&"horizontal"!==t&&Nw.has(n),r=!0!==t&&"vertical"!==t&&Uw.has(n);return e||r?e&&!r?"translateX(-50%)":!e&&r?"translateY(-50%)":"translate(-50%, -50%)":""}();return r&&"transform: "+r+";"}),(function(e){return e.borderShadow&&"box-shadow: 0px 2px 68px rgba(0, 0, 0, 0.288);"}),(function(e){var t=e.zIndex,n=void 0===t?35:t;return"z-index: "+n+";"})),Kw=Yw;function Xw(){return Xw=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,tb),f=(0,a.useRef)();gg(f,u,null,l),hg(d);var p=dg(),w=a.createElement(Kw,nb({isAbsolute:l,ref:f,full:o,position:n,margin:s,borderShadow:h,"data-testid":"layer-container"},l?{}:function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,cb),m=(0,a.useMemo)((function(){return function(e){var t=e.toggle,n=e.timing,r=void 0===n?"":n,o=e.speed,a=void 0===o?200:o,l=e.transformOrigin,c=function(e){return(0,i.css)([""," animation:"," ","ms ",";"],l&&"transform-origin: "+l+";",e,a,r)};return{entering:c((0,i.keyframes)(["from{","}"],t)),exiting:c((0,i.keyframes)(["to{","}"],t))}}({toggle:r,timing:s,speed:l,transformOrigin:u})}),[]),g=(0,a.useMemo)((function(){return n&&(0,i.default)(n).withConfig({displayName:"animation",componentId:"sc-6srjcx-0"})(["",""],(function(e){return e.transitionStyling}))}),[]);return a.createElement(lb.ZP,{in:t,timeout:l,mountOnEnter:!d,unmountOnExit:!d},(function(e){var t=m[e];return g?a.createElement(g,sb({transitionStyling:t},v),h({transition:e,transitionStyling:t})):h({transition:e,transitionStyling:t})}))},db=function(e,t){var n=function(){var e=(0,a.useRef)(!1);return(0,a.useEffect)((function(){return e.current=!0,function(){return e.current=!1}}),[]),e}();(0,a.useEffect)((function(){if(n.current)return e()}),t)},hb=["open","duration","children","direction","persist","closedValue","overflow"];function vb(){return vb=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,hb),f=fb[c]||fb.vertical,p=(0,a.useState)(r?"initial":h+"px"),w=p[0],b=p[1],y=(0,a.useState)(r),x=y[0],Z=y[1],B=pg(t),O=B[0],_=B[1];db((function(){if(O.current){b(r?h+"px":O.current.scrollHeight+"px");var e=requestAnimationFrame((function(){O.current&&b(r?O.current.scrollHeight+"px":h+"px")}));r&&Z(r);var t=setTimeout((function(){cancelAnimationFrame(e),r?b("initial"):(b(h+"px"),Z(!1))}),i);return function(){cancelAnimationFrame(e),clearTimeout(t)}}}),[r]);var C=(0,a.useMemo)((function(){return(x||u)&&("function"===typeof l?l(x):l)}),[x,u,l]);return a.createElement(pb,vb({open:r,maxDimension:w,measurement:f,duration:i,ref:_,"data-testid":"collapsible",overflow:"initial"===w?m:"hidden"},g),C)})),bb=(0,a.memo)(wb);function yb(){return yb=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,_b);return a.createElement(rg,Cb({column:!0},o&&Mb,i),a.createElement(rg,{margin:[0,0,2],gap:2},a.createElement(Xv,{name:"insights",color:"text",width:"18px",height:"18px"}),a.createElement(af,{strong:!0},t)),"string"===typeof n?a.createElement(af,null,n):n)},jb=function(){return a.createElement(kb,null,a.createElement(Hb,{title:"Mouse Over / Hover",topBorder:!1},a.createElement(af,null,"Mouse over on a chart to show, at its legend, the values for the timestamp under the mouse (the chart will also highlight the point at the chart)."),a.createElement(af,null,"All the other visible charts will also show and highlight their values for the same timestamp.")),a.createElement(Hb,{title:"Drag Chart Contents"},a.createElement(af,null,"Drag the contents of a chart, by pressing the left mouse button and moving the mouse,"),a.createElement(af,null,"All the charts will follow soon after you let the chart alone (this little delay is by design: it speeds up your browser and lets you focus on what you are exploring)."),a.createElement(af,null,"Once a chart is panned, auto refreshing stops for all charts. To enable it again, double click a panned chart.")),a.createElement(Hb,{title:"Double Click"},"Double Click a chart to reset all the charts to their default auto-refreshing state."),a.createElement(Hb,{title:"SHIFT + Drag"},a.createElement(af,null,"While pressing the ",a.createElement("b",null,"SHIFT")," key, press the left mouse button on the contents of a chart and move the mouse to select an area, to zoom in. The other charts will follow too. Zooming is performed in two phases:"),a.createElement(af,{margin:[4,0,0]},"- The already loaded chart contents are zoomed (low resolution)"),a.createElement(af,null,"- New data are transferred from the netdata server, to refresh the chart with possibly more detail."),a.createElement(af,{margin:[4,0,0]},"Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, double click a zoomed chart.")),a.createElement(Hb,{title:"Highlight Timeframe"},a.createElement(af,null,"While pressing the ",a.createElement("b",null,"ALT")," key, press the left mouse button on the contents of a chart and move the mouse to select an area. The selected are will be highlighted on all charts.")),a.createElement(Hb,{title:"SHIFT + Mouse Wheel"},a.createElement(af,null,"While pressing the ",a.createElement("b",null,"SHIFT")," key and the mouse pointer is over the contents of a chart, scroll the mouse wheel to zoom in or out. This kind of zooming is aligned to center below the mouse pointer. The other charts will follow too."),a.createElement(af,null,"Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, double click a zoomed chart.")),a.createElement(Hb,{title:"Legend Operations"},a.createElement(af,null,"Click on the label or value of a dimension, will select / un-select this dimension."),a.createElement(af,null,"You can press any of the ",a.createElement("b",null,"SHIFT")," or ",a.createElement("b",null,"CONTROL")," keys and then click on legend labels or values, to select / un-select multiple dimensions.")))},Eb=function(){return a.createElement(kb,null,a.createElement(Hb,{title:"Single tap",topBorder:!1},a.createElement(af,null,"Single Tap on the contents of a chart to show, at its legend, the values for the timestamp tapped (the chart will also highlight the point at the chart)."),a.createElement(af,null,"All the other visible charts will also show and highlight their values for the same timestamp.")),a.createElement(Hb,{title:"Drag Chart Contents"},a.createElement(af,null,"Touch and Drag the contents of a chart to pan it horizontally."),a.createElement(af,null,"All the charts will follow soon after you let the chart alone (this little delay is by design: it speeds up your browser and lets you focus on what you are exploring)."),a.createElement(af,null,"Once a chart is panned, auto refreshing stops for all charts. To enable it again, double tap a panned chart.")),a.createElement(Hb,{title:a.createElement(a.Fragment,null,a.createElement(af,{strong:!0},"Zoom"),a.createElement(af,{margin:[0,0,0,1]},"(does not work on firefox and IE/Edge)"))},a.createElement(af,null,"With two fingers, zoom in or out."),a.createElement(af,null,"Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, double click a zoomed chart.")),a.createElement(Hb,{title:"Double Tap"},"Tap on the label or value of a dimension, will select / un-select this dimension."))},Sb=(0,i.default)(rw).withConfig({displayName:"dashboard__StyledTabs",componentId:"sc-8t9551-0"})(["width:100%;.tabs > *{min-width:initial;max-width:initial;}"]),Vb=function(){return a.createElement(rg,{overflow:{vertical:"auto"},"data-testid":"dashboard"},a.createElement(Sb,null,a.createElement(Jp,{label:"Using a Mouse"},a.createElement(jb,null)),a.createElement(Jp,{label:"Using Touch"},a.createElement(Eb,null))))},Pb=n(64787),Lb=n(6890),Ab=n(50483),Db=n(52861);function Fb(){"use strict";Fb=function(){return t};var e,t={},n=Object.prototype,r=n.hasOwnProperty,o=Object.defineProperty||function(e,t,n){e[t]=n.value},a="function"==typeof Symbol?Symbol:{},i=a.iterator||"@@iterator",l=a.asyncIterator||"@@asyncIterator",c=a.toStringTag||"@@toStringTag";function s(e,t,n){return Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}),e[t]}try{s({},"")}catch(e){s=function(e,t,n){return e[t]=n}}function u(e,t,n,r){var a=t&&t.prototype instanceof p?t:p,i=Object.create(a.prototype),l=new j(r||[]);return o(i,"_invoke",{value:C(e,n,l)}),i}function d(e,t,n){try{return{type:"normal",arg:e.call(t,n)}}catch(e){return{type:"throw",arg:e}}}t.wrap=u;var h="suspendedStart",v="suspendedYield",m="executing",g="completed",f={};function p(){}function w(){}function b(){}var y={};s(y,i,(function(){return this}));var x=Object.getPrototypeOf,Z=x&&x(x(E([])));Z&&Z!==n&&r.call(Z,i)&&(y=Z);var B=b.prototype=p.prototype=Object.create(y);function O(e){["next","throw","return"].forEach((function(t){s(e,t,(function(e){return this._invoke(t,e)}))}))}function _(e,t){function n(o,a,i,l){var c=d(e[o],e,a);if("throw"!==c.type){var s=c.arg,u=s.value;return u&&"object"==typeof u&&r.call(u,"__await")?t.resolve(u.__await).then((function(e){n("next",e,i,l)}),(function(e){n("throw",e,i,l)})):t.resolve(u).then((function(e){s.value=e,i(s)}),(function(e){return n("throw",e,i,l)}))}l(c.arg)}var a;o(this,"_invoke",{value:function(e,r){function o(){return new t((function(t,o){n(e,r,t,o)}))}return a=a?a.then(o,o):o()}})}function C(t,n,r){var o=h;return function(a,i){if(o===m)throw new Error("Generator is already running");if(o===g){if("throw"===a)throw i;return{value:e,done:!0}}for(r.method=a,r.arg=i;;){var l=r.delegate;if(l){var c=M(l,r);if(c){if(c===f)continue;return c}}if("next"===r.method)r.sent=r._sent=r.arg;else if("throw"===r.method){if(o===h)throw o=g,r.arg;r.dispatchException(r.arg)}else"return"===r.method&&r.abrupt("return",r.arg);o=m;var s=d(t,n,r);if("normal"===s.type){if(o=r.done?g:v,s.arg===f)continue;return{value:s.arg,done:r.done}}"throw"===s.type&&(o=g,r.method="throw",r.arg=s.arg)}}}function M(t,n){var r=n.method,o=t.iterator[r];if(o===e)return n.delegate=null,"throw"===r&&t.iterator.return&&(n.method="return",n.arg=e,M(t,n),"throw"===n.method)||"return"!==r&&(n.method="throw",n.arg=new TypeError("The iterator does not provide a '"+r+"' method")),f;var a=d(o,t.iterator,n.arg);if("throw"===a.type)return n.method="throw",n.arg=a.arg,n.delegate=null,f;var i=a.arg;return i?i.done?(n[t.resultName]=i.value,n.next=t.nextLoc,"return"!==n.method&&(n.method="next",n.arg=e),n.delegate=null,f):i:(n.method="throw",n.arg=new TypeError("iterator result is not an object"),n.delegate=null,f)}function k(e){var t={tryLoc:e[0]};1 in e&&(t.catchLoc=e[1]),2 in e&&(t.finallyLoc=e[2],t.afterLoc=e[3]),this.tryEntries.push(t)}function H(e){var t=e.completion||{};t.type="normal",delete t.arg,e.completion=t}function j(e){this.tryEntries=[{tryLoc:"root"}],e.forEach(k,this),this.reset(!0)}function E(t){if(t||""===t){var n=t[i];if(n)return n.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var o=-1,a=function n(){for(;++o=0;--a){var i=this.tryEntries[a],l=i.completion;if("root"===i.tryLoc)return o("end");if(i.tryLoc<=this.prev){var c=r.call(i,"catchLoc"),s=r.call(i,"finallyLoc");if(c&&s){if(this.prev=0;--n){var o=this.tryEntries[n];if(o.tryLoc<=this.prev&&r.call(o,"finallyLoc")&&this.prev=0;--t){var n=this.tryEntries[t];if(n.finallyLoc===e)return this.complete(n.completion,n.afterLoc),H(n),f}},catch:function(e){for(var t=this.tryEntries.length-1;t>=0;--t){var n=this.tryEntries[t];if(n.tryLoc===e){var r=n.completion;if("throw"===r.type){var o=r.arg;H(n)}return o}}throw new Error("illegal catch attempt")},delegateYield:function(t,n,r){return this.delegate={iterator:E(t),resultName:n,nextLoc:r},"next"===this.method&&(this.arg=e),f}},t}function Ib(e,t,n,r,o,a,i){try{var l=e[a](i),c=l.value}catch(qr){return void n(qr)}l.done?t(c):Promise.resolve(c).then(r,o)}var zb={headers:{"Content-Type":"application/json"},transformResponse:function(e){try{var t=JSON.parse(e).topics;return(void 0===t?[]:t).map((function(e){return{id:{raw:e.id},title:{raw:e.title},description:{raw:e.fancy_title},url:{raw:"https://community.netdata.cloud/t/"+e.slug}}}))}catch(qr){return qr}}},Tb=function(){var e,t=(e=Fb().mark((function e(t,n){var r;return Fb().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.prev=0,e.next=3,(0,Db.Z)("https://community.netdata.cloud/search.json?q="+t,zb);case 3:if((r=e.sent).data){e.next=6;break}return e.abrupt("return");case 6:n&&n(r),e.next=12;break;case 9:return e.prev=9,e.t0=e.catch(0),e.abrupt("return");case 12:case"end":return e.stop()}}),e,null,[[0,9]])})),function(){var t=this,n=arguments;return new Promise((function(r,o){var a=e.apply(t,n);function i(e){Ib(a,r,o,i,l,"next",e)}function l(e){Ib(a,r,o,i,l,"throw",e)}i(void 0)}))});return function(e,n){return t.apply(this,arguments)}}(),Rb=Tb,Nb=["children"];function Gb(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Ub(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Nb),r=(0,a.useState)([]),o=r[0],i=r[1],l=n.searchTerm,c=n.results;return(0,a.useEffect)((function(){if(l){var e=!0;return Rb(l,(function(t){var n=t.data;return e&&i(n)})),function(){return e=!1}}}),[l]),t(Ub(Ub({},n),{},{results:Ub(Ub({},c),{},{discourse:o})}))},Qb=/^https:\/\/((learn.netdata).cloud|www.(netdata.cloud)|github.com\/netdata\/(netdata-cloud)|github.com\/netdata\/(netdata))/,Yb=function(e){return e.reduce((function(e,t){var n=t.url.raw.match(Qb).find((function(e,t){return t>1&&e}));return e[n]=e[n]||[],e[n].push(t),e}),{})};function Kb(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Xb(e){for(var t=1;t *{min-width:160px;max-width:100%;}"]),cy=["learn","community"],sy={learn:"learn.netdata",community:"discourse","github-cloud":"netdata-cloud","github-agent":"netdata"},uy={learn:"Documentation",community:"Community","github-cloud":"Github / Cloud","github-agent":"Github / Agent"},dy=function(e){var t=e.results;return a.createElement(rg,{overflow:{vertical:"auto"},"data-testid":"searchResults",flex:!0,width:"1000px",height:"60vh"},a.createElement(ly,null,cy.map((function(e){var n=t[sy[e]],r=null==n?void 0:n.length;return a.createElement(Jp,{key:e,label:uy[e]+(r?" ("+r+")":"")},a.createElement(iy,null,r?n.map((function(e){var t=e.id,n=e.url,r=e.title,o=e.description;return a.createElement(ay,{key:t.raw,url:n.raw,title:r,description:o})})):a.createElement(rg,{padding:[4]},a.createElement(af,{strong:!0},"No results"))))}))))},hy=(0,i.default)(rg).attrs({padding:[6],background:"dropdown",gap:6,column:!0,round:!0,overflow:{vertical:"auto"}}).withConfig({displayName:"documentation__Container",componentId:"sc-13yq5u1-0"})(["box-shadow:0px 4px 4px rgba(0,0,0,0.25);"]),vy=function(e){var t=e.children,n=e.onClose;return a.createElement(rg,{width:"100%",alignItems:"center",justifyContent:"between",padding:[0,0,4],border:{side:"bottom",color:"disabled"}},a.createElement(rg,{gap:2,alignItems:"center"},t),a.createElement(ig,{icon:"x",neutral:!0,small:!0,onClick:n,flavour:"borderless","data-testid":"documentation-help-close"}))},my="general",gy="dashboard",fy="search",py={general:"Need help?",dashboard:"Need help?"},wy=function(e){var t=e.app,n=void 0===t?"cloud":t,r=e.onCloseClick,o=e.onVisitDocumentClick,i=e.onOpenIssueClick,l=e.onOpenBugClick,c=e.onContributeClick,s=e.onSupportClick,u=e.onGoToDemoClick,d=e.children,h=e.demoUrl,v=Ag(),m=v[0],g=v[1],f=(0,a.useState)(my),p=f[0],w=f[1],b=p===my,y=(0,a.useCallback)((function(){return w(gy)}),[]),x=(0,a.useCallback)((function(){return w(my)}),[]),Z=(0,a.useCallback)((function(){return w(fy)}),[]),B=(0,a.useCallback)((function(){g(),r&&r()}),[]);return a.createElement(a.Fragment,null,d(g,m),m&&a.createElement(ib,{position:"bottom-left",backdrop:!0,margin:[5,17],onClickOutside:g,onEsc:g},a.createElement(ty,null,(function(e){var t=e.searchTerm,r=e.setSearchTerm,d=e.results,v=e.reset;return a.createElement(a.Fragment,null,a.createElement(hy,{width:{max:b?"325px":p===gy?"600px":"100%"},"data-testid":"documentation-layer"},a.createElement(vy,{onClose:B},b&&a.createElement(Xv,{color:"text",name:"questionFilled",width:"18px",height:"18px"}),!b&&a.createElement(ig,{icon:"arrow_left",neutral:!0,small:!0,onClick:function(){x(),v()},flavour:"borderless","data-testid":"dashboard-back"}),a.createElement(Jg,{margin:[0]},py[p]||py.general)),p!==gy&&a.createElement(ny,{value:t,setSearchTerm:r,setSearchView:Z}),b&&a.createElement(rg,{gap:6,overflow:{vertical:"auto"},column:!0,padding:[1]},a.createElement(Ob,{app:n,onDashboardClick:y,onVisitDocumentClick:o,onOpenIssueClick:i,onOpenBugClick:l,onContributeClick:c,onSupportClick:s,onGoToDemoClick:u,demoUrl:h})),p===gy&&a.createElement(Vb,null),p===fy&&a.createElement(dy,{results:d})))}))))},by=(0,i.default)(rg).attrs({overflow:{vertical:"auto"},padding:[0,4,0,0]}).withConfig({displayName:"container__Container",componentId:"sc-x7a9ga-0"})(["",""],Cp),yy=function(e){var t=e.onClose;return a.createElement(rg,{border:{side:"bottom",color:"selected"},justifyContent:"between",alignItems:"center",padding:[0,0,4,0]},a.createElement(rg,{gap:2},a.createElement(Xv,{color:"text",name:"insights"}),a.createElement(lf,{strong:!0},"Netdata News")),a.createElement(ig,{flavour:"borderless",neutral:!0,icon:"x",title:"close news",onClick:t}))},xy=(0,i.default)(rg).attrs({as:"img"}).withConfig({displayName:"image__Image",componentId:"sc-1aijjz1-0"})(["object-fit:cover;"]),Zy=(0,i.default)(rg).attrs({as:"a"}).withConfig({displayName:"anchor__Anchor",componentId:"sc-1r3u0zv-0"})(["text-decoration:none;& :hover{text-decoration:none;}"]),By=function(e){var t=e.item,n=t.last_publication_date,r=t.data,o=r.title,i=r.description,l=r.url,c=r.image,s=r.label,u=c&&c.url,d=new Date(n);return a.createElement(rg,{column:!0,gap:2},a.createElement(rg,{gap:4},u&&a.createElement(xy,{src:u,width:"160px"}),a.createElement(rg,{column:!0,gap:2},a.createElement(af,{strong:!0},o),a.createElement(af,null,i))),a.createElement(rg,{justifyContent:"between",alignItems:"center"},a.createElement(of,null,d.toLocaleDateString()),a.createElement(Zy,{href:l,target:"_blank",rel:"noopener noreferrer",gap:1,alignItems:"center"},a.createElement(af,{color:"success",strong:!0},s),a.createElement(Xv,{color:"success",rotate:2,name:"arrow_left"}))))},Oy=n(31538),_y=n(70978),Cy=function(){return Oy.eI("https://netdata-news.cdn.prismic.io/api/v2")},My=[],ky=function(e){var t=e.app,n=void 0===t?"cloud":t,r=e.onCloseClick,o=e.children,i=localStorage.getItem("news_last_seen"),l=(0,a.useState)(My),c=l[0],s=l[1],u=(0,a.useState)(),d=u[0],h=u[1],v=Ag(),m=v[0],g=v[1],f=function(){var e=(0,a.useState)(Cy)[0];return function(t,n,r){return e.get({filters:[_y.h.any("document.tags",Array.isArray(t)?t:[t])],pageSize:100,orderings:[{field:"document.last_publication_date",direction:"desc"}]}).then(n).catch(r)}}();(0,a.useEffect)((function(){f(n,(function(e){var t=e.results;return s(t)}),(function(){return h(!0)}))}),[]);var p=(0,a.useMemo)((function(){if(!c.length)return!0;var e=c[0].last_publication_date;return new Date(i)>=new Date(e)}),[i,c]),w=(0,a.useCallback)((function(){g(),localStorage.setItem("news_last_seen",new Date),r&&r()}),[r]);return a.createElement(a.Fragment,null,o({toggle:g,isOpen:m,upToDate:p}),m&&a.createElement(ib,{backdrop:!0,onClickOutside:w,onEsc:w},a.createElement(rg,{background:"dropdown",round:!0,padding:[6],width:"640px",height:{max:"640px"},gap:4,column:!0},a.createElement(yy,{onClose:w}),a.createElement(by,{column:!0,gap:6},d&&a.createElement(of,{textAlign:"center"},"Something went wrong \ud83d\ude14"),!d&&!c.length&&a.createElement(of,{textAlign:"center"},"There are no latest news"),!d&&c.length>0&&c.map((function(e){return a.createElement(By,{key:e.id,item:e})}))))))},Hy=["children"];function jy(){return jy=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Hy),r=(0,a.useState)(!1),o=r[0],i=r[1],l=(0,a.useRef)();return a.createElement(rg,{column:!0,width:"100%",position:"relative"},a.createElement(rg,{height:"1px",background:"border",width:"100%",position:"absolute",top:0,zIndex:4}),a.createElement(rg,jy({justifyContent:"start",alignSelf:"start",alignItems:"end",width:"100%",height:"100%",overflow:"auto",background:"topBarBg",ref:l,zIndex:1},n),a.Children.map(t,(function(e){return a.cloneElement(e,{collapsed:o,onResize:i,parentRef:l})}))),a.createElement(rg,{height:"1px",background:"border",width:"100%",position:"absolute",bottom:0,zIndex:0}))},Sy=function(){var e=(0,a.useContext)(i.ThemeContext);return function(t){return A(t)({theme:e})}};function Vy(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Py(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Fy),O=(0,a.useState)(),_=O[0],C=O[1],M=Dy({active:n,showBorderLeft:h,isDragOverlay:v}).rootStyles,k=(0,a.useCallback)((function(e){e&&e.preventDefault(),r&&r()}),[r]),H=(0,a.useCallback)((function(e){C(!0),i&&i(e)}),[i]),j=(0,a.useCallback)((function(e){C(!1),l&&l(e)}),[l]),E=(0,a.useCallback)((function(e){e.preventDefault(),e.stopPropagation(),c&&c(o,n)}),[c,o,n]),S=(0,a.useCallback)((function(e){return a.cloneElement(e,{color:n?"text":"textLite"})}),[n]),V=_&&!s;return a.createElement(rg,Iy({},M,{ref:t},!v&&{onClick:k,onMouseOver:H,onMouseLeave:j},{tabIndex:"0","data-index":o,"data-id":w,style:b},p),a.createElement(rg,null,V&&!v&&a.createElement(Xv,{name:"x",size:"small",color:n?"text":"textLite",onClick:E}),!V&&u&&S(u)),!Z&&a.createElement(rg,B,d),(m||v)&&a.createElement(Xv,Iy({name:"rearrange",width:"10px",height:"10px",color:_?n?"text":"textLite":"textNoFocus"},g,f,{cursor:x||y?"grabbing":"grab"})))}));zy.displayName="Tab";var Ty=zy,Ry=function(){return a.createElement(rg,{padding:[1,2],height:"100%"},a.createElement(rg,{width:"1px",background:"selected",height:"100%"}))},Ny=n(79752),Gy=n(45587),Uy=n(27856),Wy=function(e,t,n,r){var o=(0,a.useState)(!1),i=o[0],l=o[1],c=(0,a.useState)(!1),s=c[0],u=c[1];return[i,s,(0,a.useCallback)((function(){if(e.current&&t.current&&r){var n=e.current.scrollLeft,o=e.current.getBoundingClientRect().right,a=t.current.getBoundingClientRect().right;u(a>o+20),l(n>20)}}),[r,n])]};function qy(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Qy(e){for(var t=1;to))return i.current=Qy(Qy({},i.current),{},{width:o,tabRight:d,containerRight:c}),d>=c&&!i.current.collapse?(i.current.collapse=!0,r(!0)):d+h0||(o.disconnect(),gx.delete(e),delete vx[r],delete mx[r])}}((function(e){o&&o(e),h(e)}),e,a)}}),[t,n,r,o]);return(0,a.useEffect)((function(){return function(){null==s.current||s.current(),s.current=null}}),[]),[v,c,d]},bx=["height","width","fallback","root","rootMargin","threshold","onVisibility","children"];function yx(){return yx=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,bx),w=wx({root:u,rootMargin:h,threshold:m,onVisibility:g}),b=w[0],y=w[1],x=w[2],Z=function(e,t){void 0===t&&(t=!1);var n=(0,a.useRef)();return(0,a.useEffect)((function(){n.current=e}),[t?e:""]),n.current}(x),B=(0,a.useRef)(o);return x!==Z&&!x&&y.current&&(B.current=y.current.clientHeight+"px"),a.createElement(rg,yx({ref:function(e){b(e),fg(t,e)},width:l,height:x?o:{min:B.current}},p),"function"===typeof(n=x?f:s)?n():n)})),Zx=xx,Bx={background:{neutral:"nodeBadgeBackground",success:"success",clear:"success",warning:"warning",error:"error",critical:"error",stale:"stale",idleClear:"idleClear",idleError:"idleError",idleWarning:"idleWarning"},border:{neutral:"nodeBadgeBackground",success:"success",clear:"success",warning:"warning",error:"error",critical:"error",stale:"stale",idleClear:"idleClear",idleError:"idleError",idleWarning:"idleWarning"},hollow:{neutral:"neutralPillBorder",success:"successSemi",clear:"successSemi",warning:"warningSemi",error:"errorSemi",critical:"errorSemi",stale:"staleSemi"},color:{neutral:"neutralPillColor",success:"success",clear:"success",warning:"warning",error:"error",critical:"error",stale:"stale",idleClear:"idleClear",idleError:"idleError",idleWarning:"idleWarning"}},Ox={alert:"alertIcon",disabledClear:"idleClear",disabledError:"errorSemi",disabledWarning:"idleWarning",clear:"success",error:"error",warning:"warning"},_x=function(e,t){return Bx[e][t]},Cx=["icon","color","hollow","flavour","size"];function Mx(){return Mx=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Cx);return t?"string"!==typeof t?t:a.createElement(Xv,Mx({color:n||(r?_x("color",o):"bright"),"data-testid":"pill-icon",height:kx[i]||kx.default,width:kx[i]||kx.default,name:t},l)):null},jx=function(e,t){return e||function(e){return Ox[e]}(t)},Ex=function(e){var t=e.background,n=e.flavour,r=void 0===n?"neutral":n,o=e.hollow,a=e.semi;return t||(o?a?_x("hollow",r):"transparent":_x("background",r))},Sx={default:[.5,2],large:[1,2.5]},Vx=function(e,t,n){return e||(n?[.25,.5]:Sx[t]||Sx.default)},Px=["round","hollow","flavour","borderColor","onClick","padding","size","tiny","position","zIndex","justifyContent","alignItems","background","semi"];function Lx(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Ax(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Px);return Ax(Ax({padding:Vx(l,c,s),round:!1===n?1:!0===n?7.5:n,border:{side:"all",color:a||_x(p?"hollow":"border",o),size:"1px"}},i&&{cursor:"pointer"}),{},{justifyContent:v,alignItems:g,position:u,zIndex:d,background:Ex({background:f,flavour:o,hollow:r,semi:p})},w)})).withConfig({displayName:"styled__PillContainer",componentId:"sc-1pihee3-1"})([""]),zx=["children","background","color","data-testid","flavour","hollow","icon","iconSize","normal","reverse","size","textSize","tiny","textProps","semi"];function Tx(){return Tx=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,zx),x={color:o,flavour:c,hollow:s,icon:u,size:d},Z=p?Rx.tiny:f?Rx[f]:Rx[g]||Rx.default;return a.createElement(Ix,Tx({background:r,"data-testid":l,flavour:c,gap:1,hollow:s,ref:t,size:g,tiny:p,semi:b},y),!m&&a.createElement(Hx,Tx({"data-testid":l+"-icon-left"},x)),n&&a.createElement(rg,Tx({as:Z,color:o||(s?_x("color",c):"bright"),"data-testid":l+"-text",strong:!v,whiteSpace:"nowrap"},w),n),m&&a.createElement(Hx,Tx({"data-testid":l+"-icon-right"},x)))})),Gx=Nx,Ux=["background","className","color","containerWidth","height","value","width"];function Wx(){return Wx=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Ux);return h=Array.isArray(h)?h:[h||{width:v,color:l}],a.createElement(rg,Wx({background:r,border:{side:"all",color:r},className:o,"data-testid":"progressBar",height:d,ref:t,round:"2px",width:s},m),h.map((function(e,t){var n=e.color,r=e.width;return"0%"===r?null:a.createElement(sg,{background:n,border:{side:"all",color:n},"data-testid":"progressBar-progress"+r,height:"100%",key:r+"-"+t,position:"relative",round:"2px",width:r})})))})),Qx=qx,Yx=["background","icon","text"];function Kx(){return Kx=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Yx);return a.createElement(Gx,Kx({background:n,borderColor:n,"data-testid":"mastercard-pill",icon:r,ref:t},i),!r&&(o||"-"))})),$x=Xx,Jx=["data-testid","height","normal","onClick","labelProps","pillLeft","pillRight","pillEnd","round","size"];function eZ(){return eZ=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,Jx),w={height:o,round:g,size:f},b=nZ(nZ({background:Ox.alert,color:"text",icon:"alarm_bell",zIndex:4},s),w),y=nZ(nZ({normal:i},w),p),x=jx(d.background,d.flavour||"disabledError"),Z=jx(v.background,v.flavour||"disabledWarning"),B=m&&jx(m.background,m.flavour||"disabledClear"),O=nZ(nZ(nZ({background:x,position:"relative",margin:[0,0,0,"large"===f?-4.5:-3.5],padding:[0,2,0,"large"===f?5:4],zIndex:3},y),d),{},{round:s.hidden}),_=nZ(nZ({background:Z,margin:[0,0,0,"large"===f?-4.5:-3.5],padding:[0,2,0,"large"===f?5:4],zIndex:2},y),v),C=m&&nZ(nZ({background:B,margin:[0,0,0,"large"===f?-4.5:-3.5],padding:[0,2,0,"large"===f?5:4],zIndex:1},y),m);return a.createElement(Fx,eZ({"data-testid":r,onClick:l,ref:t},w),!s.hidden&&a.createElement($x,eZ({"data-testid":r+"-icon-pill"},b)),a.createElement($x,eZ({"data-testid":r+"-left-pill"},O)),a.createElement($x,eZ({"data-testid":r+"-right-pill"},_)),C&&a.createElement($x,eZ({"data-testid":r+"-end-pill"},C)))})),aZ=oZ,iZ=["children","data-testid","height","normal","onClick","pillLeft","pillRight","pillEnd","round","size","zIndex"];function lZ(){return lZ=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,iZ)),p={height:i,round:m,size:g},w=sZ(sZ({normal:l},p),f),b=jx(h.background,h.flavour||"disabledWarning"),y=v&&jx(v.background,v.flavour||"disabledClear"),x=sZ(sZ(sZ({background:jx(u.background,u.flavour||"disabledError"),position:"relative",width:{min:dZ[f.size]||dZ.default}},w),u),{},{zIndex:3}),Z=sZ(sZ(sZ({background:b,margin:[0,0,0,"large"===g?-4.5:-3.5],padding:[0,2,0,"large"===g?5:4],width:{min:dZ[f.size]||dZ.default}},w),h),{},{zIndex:2}),B=v&&sZ(sZ(sZ({background:y,margin:[0,0,0,"large"===g?-4.5:-3.5],padding:[0,2,0,"large"===g?5:4],width:{min:dZ[f.size]||dZ.default}},w),v),{},{zIndex:1});return a.createElement(Fx,lZ({"data-testid":o,onClick:c,ref:t},p),n||a.createElement(a.Fragment,null,a.createElement($x,lZ({"data-testid":o+"-left-pill"},x)),a.createElement($x,lZ({"data-testid":o+"-right-pill"},Z)),B&&a.createElement($x,lZ({"data-testid":o+"-end-pill"},B))))})),vZ=hZ,mZ=["background","children","testId"],gZ=["children","testId"],fZ=["children","testId"],pZ=["children","hasBorder","parentPadding","testId"],wZ=["iconName","onClick","testId"],bZ=["onClose","testId"],yZ=["children"];function xZ(){return xZ=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}var BZ=function(e){var t=e.background,n=void 0===t?"mainBackground":t,r=e.children,o=e.testId,i=ZZ(e,mZ);return a.createElement(rg,xZ({background:n,column:!0,"data-testid":o,round:!0},i),r)},OZ=function(e){var t=e.children,n=e.testId,r=ZZ(e,gZ);return a.createElement(rg,xZ({background:"modalHeaderBackground","data-testid":n,padding:[2,4]},r),t)},_Z=function(e){var t=e.children,n=e.testId,r=ZZ(e,fZ);return a.createElement(rg,xZ({"data-testid":n,padding:[2,4],column:!0},r),t)},CZ=function(e){var t=e.children,n=e.hasBorder,r=void 0===n||n,o=e.parentPadding,i=e.testId,l=ZZ(e,pZ);return a.createElement(rg,{column:!0,padding:o||[0,4],flex:1},a.createElement(rg,xZ({"data-testid":i,flex:1,padding:[2,0],alignItems:"center",justifyContent:"end",border:r?{size:"1px",type:"solid",side:"top",color:"borderSecondary"}:{}},l),t))},MZ=function(e){var t=e.iconName,n=e.onClick,r=e.testId,o=ZZ(e,wZ);return a.createElement(Mf,xZ({"data-testid":r,icon:t,neutral:!0,onClick:n,cursor:"pointer",flavour:"borderless"},o))},kZ=function(e){var t=e.onClose,n=e.testId,r=ZZ(e,bZ);return a.createElement(MZ,xZ({iconName:"x",onClick:t,testId:n,position:"absolute",height:"14px",width:"14px",top:2,right:2},r))},HZ=function(e){var t=e.children,n=ZZ(e,yZ);return a.createElement(ib,n,t)},jZ=(0,i.default)(CZ).attrs({gap:3,hasBorder:!1,parentPadding:[0],padding:[0]}).withConfig({displayName:"styled__Actions",componentId:"sc-bc708g-0"})([""]),EZ=(0,i.default)(af).attrs({as:BZ,background:"dropdown",gap:4,height:{max:"calc(100vh - 32px)"},padding:[4],width:{base:128,max:140,min:70}}).withConfig({displayName:"styled__Content",componentId:"sc-bc708g-1"})([""]),SZ=(0,i.default)(_Z).attrs({column:!1,padding:[0]}).withConfig({displayName:"styled__Body",componentId:"sc-bc708g-2"})(["display:block;strong{font-weight:bold;}"]),VZ=(0,i.default)(HZ).attrs({backdropProps:{backdropBlur:8}}).withConfig({displayName:"styled__Dialog",componentId:"sc-bc708g-3"})(["box-shadow:0 11px 15px -7px rgb(0 0 0 / 20%),0px 24px 38px 3px rgb(0 0 0 / 14%),0px 9px 46px 8px rgb(0 0 0 / 12%);"]),PZ=(0,i.default)(OZ).attrs({alignItems:"center",padding:[0],background:""}).withConfig({displayName:"styled__Header",componentId:"sc-bc708g-4"})([""]),LZ=(0,i.default)(Xg).attrs({margin:[0]}).withConfig({displayName:"styled__Title",componentId:"sc-bc708g-5"})([""]),AZ=(0,i.default)(Xv).attrs({color:"main",height:"24px",width:"24px"}).withConfig({displayName:"styled__TitleIcon",componentId:"sc-bc708g-6"})([""]),DZ=["children"];var FZ=function(e){var t=e.children,n=function(e,t){if(null==e)return{};var n,r,o={},a=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,DZ);return"object"===typeof t?t:a.createElement(af,n,t)},IZ=function(e){var t=e.confirmLabel,n=void 0===t?"Yes, remove":t,r=e.confirmWidth,o=void 0===r?"128px":r,i=e["data-ga"],l=void 0===i?"confirmation-dialog":i,c=e["data-testid"],s=void 0===c?"confirmationDialog":c,u=e.declineLabel,d=void 0===u?"Cancel":u,h=e.declineWidth,v=void 0===h?"128px":h,m=e.handleConfirm,g=e.handleDecline,f=e.hideIcon,p=e.iconName,w=void 0===p?"warning_triangle_hollow":p,b=e.isConfirmDisabled,y=e.isConfirmLoading,x=e.isDeclineDisabled,Z=e.isConfirmPositive,B=e.message,O=e.title;return a.createElement(VZ,{onEsc:g},a.createElement(EZ,{"data-testid":s},a.createElement(PZ,{"data-testid":s+"-headerContainer"},a.createElement(rg,{"data-testid":s+"-header",gap:2},!f&&a.createElement(AZ,{"data-testid":s+"-headerIcon",name:w}),a.createElement(LZ,{"data-testid":s+"-headerText"},O)),g&&a.createElement(kZ,{"data-testid":s+"-headerClose",onClose:g})),a.createElement(SZ,{"data-testid":s+"-body"},a.createElement(FZ,{"data-testid":s+"-bodyMessage"},B)),a.createElement(jZ,{"data-testid":s+"-actions"},g&&a.createElement(ig,{"data-ga":l+"-::click-cancel::global-view","data-testid":s+"-cancelAction",flavour:"hollow",disabled:x,label:d,onClick:g,width:v}),a.createElement(ig,{"data-ga":l+"-::click-confirm::global-view","data-testid":s+"-confirmAction",danger:!Z&&!0,disabled:b,label:n,onClick:m,width:o,isLoading:y}))))},zZ=n(74094),TZ=n(27060),RZ=n(18446),NZ=n.n(RZ),GZ=n(6557),UZ=n.n(GZ),WZ={gt:function(e,t){return e>t},eq:function(e,t){return e===t},lt:function(e,t){return e=0||(o[n]=e[n]);return o}(e,KZ),j=(0,a.useState)(!1),E=j[0],S=j[1];if(!1===w)return null;var V=C?ig:Mf;return a.createElement(a.Fragment,null,E&&_&&a.createElement(_,{handleAction:i(),onClose:function(){return S(!1)},data:(null==b?void 0:b.original)||y}),E&&!_&&a.createElement(IZ,{actionButtonDirection:m,declineLabel:v,confirmLabel:h,title:"function"===typeof s?s(null==b?void 0:b.original,y):s,message:"function"===typeof u?u(null==b?void 0:b.original,y):u,handleDecline:function(){S(!1),null==d||d()},handleConfirm:function(){S(!1),null==i||i()}}),a.createElement(k,{content:p?x:l},a.createElement(rg,{ref:t,alignItems:"center",justifyContent:"center",_hover:{background:p||C?null:"borderSecondary"},cursor:p?"auto":"pointer",key:n,round:!0,background:C?null:o},a.createElement(V,XZ({iconSize:"small","data-testid":"netdata-table-action-"+n+g,"data-ga":f,disabled:p,onClick:function(e){e.stopPropagation(),c||_?S(!0):i()},icon:r,flavour:O,iconColor:Z,label:C,padding:[.5]},H)))))})),JZ=$Z,eB=["id","handleAction","isDisabled","isVisible","dataGa"];function tB(){return tB=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,eB);return a.createElement(JZ,tB({},d,{disabled:"function"===typeof l?l(t.original):l,visible:"function"===typeof s?s(t.original):s,dataGa:"function"===typeof u?u(t.original):u,key:o,id:o,handleAction:function(){return i(t.original,n)},testPrefix:r,currentRow:t}))})))},enableColumnFilter:!1,enableSorting:!1,tableMeta:o,size:35*i.length<60?60:35*i.length,meta:{cellStyles:{justifyContent:"end"}},notFlex:!0}}),[i])};function lB(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function cB(e){for(var t=1;t=0||(o[n]=e[n]);return o}(n,xB);return OB(OB({control:function(t,n){return OB(OB({},t),{},{borderColor:n.isFocused?e.colors.inputBorderFocus:e.colors.inputBorder,boxShadow:"none",minHeight:18,minWidth:r||160,":hover":{borderColor:e.colors.inputBorderHover}})},input:function(t,n){return OB(OB({},t),{},{color:n.isDisabled?e.colors.placeholder:e.colors.textDescription},"tiny"===o?{lineHeight:"18px",paddingBottom:0,paddingTop:0}:{})},menu:function(e){return OB(OB({},e),{},{zIndex:100})},menuPortal:function(e){return OB(OB({},e),{},{zIndex:9999})},multiValue:function(e){return OB(OB({},e),{},{fontSize:"tiny"===o?"12px":"14px",flexDirection:"row-reverse"},"tiny"===o?{minHeight:18}:{})},multiValueLabel:function(t,n){return OB(OB(OB({},t),{},{backgroundColor:e.colors.disabled,borderRadius:"0 2px 2px 0",color:n.isDisabled?e.colors.placeholder:e.colors.textDescription},"tiny"===o?{padding:"1px"}:{}),{},{paddingRight:n.data.isDisabled?"8px":""})},multiValueRemove:function(t,n){return OB({color:n.isDisabled?e.colors.placeholder:e.colors.textDescription},n.data.isDisabled?OB(OB({},t),{},{display:"none"}):OB(OB({},t),{},{borderRadius:"2px 0 0 2px",background:e.colors.disabled,":hover":{background:e.colors.tabsBorder}}))},option:function(t,n){return OB(OB({},t),{},{color:EB(e,n)},"tiny"===o?{fontSize:"12px",minHeight:28,padding:"4px 8px"}:{})},placeholder:function(t){return OB(OB({},t),{},{color:e.colors.placeholder},"tiny"===o?{fontSize:"12px",lineHeight:"18px"}:{})},singleValue:function(t,n){return OB(OB({},t),{},{color:n.isDisabled?e.colors.placeholder:e.colors.textDescription,fontSize:"tiny"===o?"12px":"14px"})}},"tiny"===o?{dropdownIndicator:function(e){return OB(OB({},e),{},{padding:"3px"})},clearIndicator:function(e){return OB(OB({},e),{},{padding:"3px"})},indicatorsContainer:function(e){return OB(OB({},e),{},{minHeight:18})},valueContainer:function(e){return OB(OB({},e),{},{minHeight:18,padding:"1px 6px"})}}:{dropdownIndicator:function(e){return OB(OB({},e),{},{padding:"3px"})},clearIndicator:function(e){return OB(OB({},e),{},{padding:"3px"})},indicatorsContainer:function(e){return OB(OB({},e),{},{minHeight:28})},valueContainer:function(e){return OB(OB({},e),{},{minHeight:28,padding:"1px 6px"})}}),a)},VB=(0,i.default)(yB.ZP).attrs((function(e){return OB(OB({},e),{},{components:OB(OB({},jB),e.components),theme:(t=e.theme,function(e){return OB(OB({},e),{},{borderRadius:4,colors:OB(OB({},e.colors),{},{primary:t.colors.border,primary25:t.colors.selected,primary50:t.colors.border,primary75:t.colors.tooltip,danger:t.colors.text,dangerLight:t.colors.border,neutral0:t.colors.mainBackground,neutral5:t.colors.mainBackgroundDisabled,neutral30:t.colors.controlFocused,neutral60:t.colors.border,neutral80:t.colors.text,neutral10:t.colors.border,neutral20:t.colors.border})})}),styles:SB(e.theme,e.styles)});var t})).withConfig({displayName:"select__Select",componentId:"sc-4oqdin-0"})([""]);function PB(){return PB=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,QB),m=(0,a.useRef)(),g="function"===typeof n?n():n,f="function"===typeof r?r():r;return a.createElement(JZ,YB({ref:qB(m,t),testPrefix:o+"-bulk",key:i,visible:f,id:i,icon:l,handleAction:function(){return c(s,u)},tooltipText:d,disabled:!h&&(null==s?void 0:s.length)<1||g,background:"elementBackground",selectedRows:s},v))})),XB=KB,$B=function(e){var t=e.column,n=e.dataGa,r=e.disabled,o=t.getIsVisible();return a.createElement(rg,{alignItems:"center",as:qg,justifyContent:"between",padding:[1]},a.createElement($f,{checked:o,disabled:r,label:t.columnDef.name||t.id,onChange:function(e){return t.getToggleVisibilityHandler()({target:{checked:e}})},"data-ga":"columns-menu::click-"+(o?"disable":"enable")+"-"+t.id+"-::"+n}))},JB=function(e){var t=e.dataGa,n=e.parentRef,r=e.isOpen,o=e.columns,i=e.onClose,l=e.pinnedColumns;return n.current&&r?a.createElement(Lg,{background:"dropdown",height:{max:"400px"},onClickOutside:i,overflow:{vertical:"auto"},round:1,target:n.current,width:50,align:{top:"bottom",right:"right"}},a.createElement(rg,{border:{size:"1px",type:"solid",side:"bottom",color:"borderSecondary"},padding:[3,3,1]},a.createElement(af,{color:"textLite"},"Edit columns")),a.createElement(rg,{column:!0,padding:[1,3]},l.length?a.createElement(rg,{border:{size:"1px",type:"solid",side:"bottom",color:"borderSecondary"},column:!0},l.map((function(e){return a.createElement($B,{column:e,dataGa:t,key:e.id})}))):null,o.map((function(e){return a.createElement($B,{column:e,dataGa:t,key:e.id})})))):null},eO=["alwaysEnabled","columnPinning","dataGa","enableColumnPinning","handleAction","id","icon","isDisabled","isOpen","isVisible","onClose","selectedRows","table","testPrefix","tooltipText"],tO=["isVisible"];function nO(){return nO=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}var sO=function(e){return e.columnVisibility},uO=function(e){var t=e.alwaysEnabled,n=e.columnPinning,r=void 0===n?{}:n,o=e.dataGa,i=e.enableColumnPinning,l=e.handleAction,c=e.id,s=e.icon,u=e.isDisabled,d=e.isOpen,h=e.isVisible,v=e.onClose,m=e.selectedRows,g=e.table,f=e.testPrefix,p=e.tooltipText,w=cO(e,eO);mB(sO);var b=(0,a.useRef)(),y="function"===typeof u?u():u,x="function"===typeof h?h():h,Z=(0,a.useMemo)((function(){return iO(g.getAllLeafColumns()).sort((function(e,t){return e.id.localeCompare(t.id,void 0,{sensitivity:"accent",ignorePunctuation:!0})}))}),[g.getAllLeafColumns()]),B=i?[].concat(iO((null==r?void 0:r.left)||[]),iO((null==r?void 0:r.right)||[])):[],O=i?Z.reduce((function(e,t){var n;if(!t.getCanHide())return e;var r="columns";return B.includes(t.id)&&(r="pinnedColumns"),oO(oO({},e),{},((n={})[r]=[].concat(iO(e[r]),[t]),n))}),{columns:[],pinnedColumns:[]}):{columns:Z,pinnedColumns:[]},_=O.columns,C=O.pinnedColumns;return a.createElement(a.Fragment,null,a.createElement(XB,nO({ref:b,testPrefix:"-bulk"+f,visible:x,id:c,icon:s,handleAction:function(){return l(m,g)},tooltipText:p,disabled:!t&&(null==m?void 0:m.length)<1||y,background:"elementBackground",selectedRows:m,dataGa:o},w)),a.createElement(JB,{columns:_,dataGa:o,isOpen:d,onClose:v,parentRef:b,pinnedColumns:C}))},dO=function(e){var t=e.isVisible,n=cO(e,tO),r=Ag(!1),o=r[0],i=r[1],l=r[3];return a.createElement(uO,nO({key:"columnVisibility"},WB.columnVisibility,{handleAction:i,isOpen:o,isVisible:t,id:"columnVisibility",onClose:l},n))};function hO(){return hO=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,wO);mB(OO);var h=(0,a.useMemo)((function(){return"function"===typeof n&&(!t||!t({data:r.original,table:o,fullRow:r}))}),[r,n]);return a.createElement(rg,{"data-testid":"netdata-table-row"+i+(l?"-"+l(r.original):""),onClick:(0,a.useCallback)(h?function(){return n({data:r.original,table:o,fullRow:r})}:void 0,[h,r,n]),cursor:h?"pointer":"default",onMouseEnter:function(){return null==u?void 0:u({row:r.index})},onMouseLeave:function(){return null==u?void 0:u({row:null})},flex:!0},!!r.getLeftVisibleCells().length&&a.createElement(rg,{position:"sticky",left:0,border:{side:"right"},zIndex:s||1,basis:o.getLeftTotalSize()+"px",flex:"grow",background:c%2===0?"tableRowBg2":"tableRowBg",_hover:{background:c%2===0?"tableRowBg2Hover":"tableRowBgHover"}},r.getLeftVisibleCells().map((function(e,t){return a.createElement(BO,bO({cell:e,row:r,key:e.id,testPrefix:i,header:o.getLeftLeafHeaders()[t]},d))}))),a.createElement(rg,{width:o.getCenterTotalSize()+"px",flex:"grow",background:c%2===0?"tableRowBg2":"tableRowBg",_hover:{background:c%2===0?"tableRowBg2Hover":"tableRowBgHover"}},a.createElement(rg,{flex:!0},r.getCenterVisibleCells().map((function(e,t){return a.createElement(BO,bO({cell:e,row:r,key:e.id,testPrefix:i,header:o.getCenterLeafHeaders()[t]},d))})))),!!r.getRightVisibleCells().length&&a.createElement(rg,{position:"sticky",right:0,border:{side:"left"},zIndex:s||1,basis:o.getRightTotalSize()+"px",flex:"grow",background:c%2===0?"tableRowBg2":"tableRowBg",_hover:{background:c%2===0?"tableRowBg2Hover":"tableRowBgHover"}},r.getRightVisibleCells().map((function(e,t){return a.createElement(BO,bO({cell:e,row:r,key:e.id,testPrefix:i,header:o.getRightLeafHeaders()[t]},d))}))))}));function CO(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function MO(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,EO);return n?a.createElement(AO,{sorting:r},a.createElement(Xv,SO({height:"12px",width:"12px",color:r?"text":"textLite",name:null!=(t=LO[r||"indicator"])?t:null},o))):null},FO=function(e){var t=e.meta,n=t&&null!=t&&t.tooltip?null==t?void 0:t.tooltip:"";return n?a.createElement(rg,{position:"absolute",top:"1px",right:"1px",width:3,height:3},a.createElement(Zf,{align:"bottom",content:n},a.createElement(Xv,{color:"nodeBadgeColor",size:"small",name:"information"}))):null};function IO(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function zO(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,GO),u=t.setFilterValue,d=(0,t.getFilterValue)(),h=r?i:[QO].concat(WO(i)),v=r?d:h[0];return a.createElement(RO,UO({value:d||v,isMulti:r,options:h,onChange:function(e){return u(e)},styles:c&&{size:"tiny"}},s))},default:function(e){var t=e.column,n=e.testPrefix,r=t.id,o=void 0===r?"":r;return a.createElement(sg,{"data-testid":"netdata-table-filter-"+o+n,as:Zw,value:t.getFilterValue(),width:{max:50},placeholder:"Search...",iconRight:a.createElement(Xv,{color:"textLite",name:"magnify",height:"18px",width:"18px"}),onChange:t.setFilterValue})}},XO=function(e){var t,n,r=e.column,o=e.testPrefix,i=e.index;if(!r.getCanFilter())return null;var l="function"===typeof r.columnDef.meta?r.columnDef.meta({},r,i):r.columnDef.meta,c=l&&null!=l&&null!=(t=l.filter)&&t.component?null==l||null==(n=l.filter)?void 0:n.component:"default",s=l&&null!=l&&l.filter?null==l?void 0:l.filter:{},u=KO[c];return a.createElement(u,YO({column:r,testPrefix:o},s))};function $O(){return $O=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}var d_=function(e){return{sizing:e.columnSizing,expanded:e.expanded,columnVisibility:e.columnVisibility,selectedRows:e.selectedRows,grouping:e.grouping}},h_=function(e){var t=e.headerGroup,n=e.testPrefix,r=u_(e,i_);return a.createElement(rg,{id:t.id,"data-testid":"netdata-table-headRow"+n,flex:!0,background:"tableRowBg2"},t.headers.map((function(e,t){return a.createElement(a_,s_({key:e.id,index:t},r,{header:e,testPrefix:n}))})))},v_=function(e){var t=e.groups,n=e.size,r=e.side,o=u_(e,l_);return t[0].headers.length?a.createElement(rg,s_({position:r?"sticky":"relative"},"right"===r?{right:0,border:{side:"left"}}:{left:0,border:{side:"right"}},{zIndex:r?11:void 0,width:n+"px",flex:"grow"}),t.map((function(e){return a.createElement(h_,s_({headerGroup:e,key:e.id},o))}))):null},m_=(0,a.memo)((function(e){var t=e.table,n=e.testPrefix,r=u_(e,c_);return mB(d_),a.createElement(rg,{"data-testid":"netdata-table-head"+n,flex:!0,border:{size:"1px",type:"solid",side:"bottom",color:"border"}},a.createElement(v_,s_({groups:t.getLeftHeaderGroups(),side:"left",size:t.getLeftTotalSize(),testPrefix:n},r,{table:t})),a.createElement(v_,s_({groups:t.getCenterHeaderGroups(),size:t.getCenterTotalSize(),testPrefix:n},r,{table:t})),a.createElement(v_,s_({groups:t.getRightHeaderGroups(),side:"right",size:t.getRightTotalSize(),testPrefix:n},r,{table:t})))})),g_=m_,f_=["dataGa","table","testPrefix","testPrefixCallback","coloredSortedColumn","meta","overscan","getHasNextPage","getHasPrevPage","getItemKey","loading","loadMore","onVirtualChange","virtualRef","initialOffset"];function p_(){return p_=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,f_);mB(x_);var B=(0,a.useRef)(),O=n.getRowModel().rows,_=(0,fw.MG)({count:O.length?O.length+1:1,getScrollElement:function(){return B.current},enableSmoothScroll:!1,estimateSize:function(){var e,t;return!(null==(e=l.styles)||!e.height)&&parseInt(l.styles.height)||!(null==(t=l.cellStyles)||!t.height)&&parseInt(l.cellStyles.height)||35},overscan:c||15,onChange:w,initialOffset:x,getItemKey:function(e){return 0===e?"header":m(e-1)},rangeExtractor:(0,a.useCallback)((function(e){if(e.count&&e.startIndex>=0){var t=new Set([0].concat(w_((0,pO.MK)(e))));return w_(t).sort((function(e,t){return e-t}))}return(0,pO.MK)(e)}),[])});b&&(b.current=_);var C=_.getVirtualItems();return(0,a.useEffect)((function(){if(p){var e=C[C.length-1];e&&e.index>=O.length&&u()&&!g&&p("backward")}}),[C,g]),(0,a.useEffect)((function(){if(p){var e=C[1];e&&e.index<=1&&h()&&!g&&p("forward")}}),[C,h(),g]),a.createElement("div",{ref:B,style:{display:"flex",height:"100%",overflow:"auto"},"data-testid":"netdata-table"+r},a.createElement("div",{style:{height:_.getTotalSize()+"px",position:"relative",display:"flex",flex:"1 0 auto"}},C.map((function(e){return a.createElement("div",{key:e.key,style:{transform:"translateY("+e.start+"px)",top:0,left:0,position:"header"===e.key?"sticky":"absolute",zIndex:"header"===e.key?1:0,minWidth:"100%",alignSelf:"start",flex:"1 1 auto",display:"flex"},"data-index":e.index,ref:_.measureElement},0===e.index?a.createElement(g_,p_({dataGa:t,table:n,testPrefix:r,coloredSortedColumn:i,index:e.index},Z)):a.createElement(_O,p_({dataGa:t,table:n,testPrefix:r,testPrefixCallback:o,coloredSortedColumn:i,meta:l,row:O[e.index-1],index:e.index},Z)))}))))})),B_=Z_,O_=function(){},__={},C_=function(){},M_={},k_=function(){},H_={},j_=function(){},E_={pageIndex:0,pageSize:0},S_=function(){},V_=function(){},P_={},L_=function(){},A_=[],D_=function(){},F_=["bulkActions","headerChildren","data","dataColumns","dataGa","enableColumnPinning","columnPinning","onColumnPinningChange","enableColumnVisibility","columnVisibility","onColumnVisibilityChange","enablePagination","enableResizing","enableSelection","enableSubRowSelection","rowSelection","onRowSelectionChange","expanded","onExpandedChange","enableSorting","sortBy","onSortingChange","globalFilter","onSearch","globalFilterFn","enableCustomSearch","grouping","onGroupByChange","groupByColumns","onRowSelected","paginationOptions","onPaginationChange","rowActions","testPrefix","meta","title","virtualizeOptions","tableRef","className"];function I_(){return I_=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,F_),G=function(e,t){void 0===e&&(e=M_),void 0===t&&(t=C_);var n=(0,a.useState)((function(){return e})),r=n[0],o=n[1],i=(0,a.useRef)(!1);(0,a.useEffect)((function(){r!==e&&(i.current=!0,o(e))}),[e]);var l=(0,a.useCallback)((function(e){t(e(r)),o(e(r))}),[r]);return[r,l]}(h,v),U=G[0],W=G[1],q=function(e,t){void 0===e&&(e=__),void 0===t&&(t=O_);var n=(0,a.useState)((function(){return e})),r=n[0],o=n[1],i=(0,a.useRef)(!1);(0,a.useEffect)((function(){e&&r!==e&&(i.current=!0,o(e))}),[e]);var l=(0,a.useCallback)((function(e){t(e),o(e)}),[]);return[r,l]}(s,u),Q=q[0],Y=q[1],K=function(e,t){void 0===e&&(e=H_),void 0===t&&(t=k_);var n=(0,a.useRef)(!1),r=(0,a.useState)((function(){return e})),o=r[0],i=r[1];(0,a.useEffect)((function(){e&&o!==e&&(n.current=!0,i(e))}),[e]);var l=(0,a.useCallback)((function(e){t(e),i(e)}),[]);return[o,l]}(y,x),X=K[0],$=K[1],J=function(e,t){void 0===e&&(e=P_),void 0===t&&(t=V_);var n=(0,a.useState)((function(){return e})),r=n[0],o=n[1];(0,a.useRef)(!1),(0,a.useEffect)((function(){r!==e&&o(e)}),[e]);var i=(0,a.useCallback)((function(e){t(e(r)),o(e(r))}),[r]);return[r,i]}(w,b),ee=J[0],te=J[1],ne=function(e,t){void 0===e&&(e=A_),void 0===t&&(t=L_);var n=(0,a.useState)((function(){return e})),r=n[0],o=n[1],i=(0,a.useRef)(!1);(0,a.useEffect)((function(){r!==e&&(i.current=!0,o(e))}),[e]);var l=(0,a.useCallback)((function(e){t(e(r)),o(e(r))}),[r]);return[r,l]}(B,O),re=ne[0],oe=ne[1],ae=function(e,t){void 0===e&&(e=E_),void 0===t&&(t=j_);var n=(0,a.useState)((function(){return e})),r=n[0],o=n[1],i=(0,a.useRef)(!1);(0,a.useEffect)((function(){r!==e&&(i.current=!0,o(e))}),[e]);var l=(0,a.useCallback)((function(e){t(e),o(e)}),[]);return[r,l]}(P,L),ie=ae[0],le=ae[1],ce=function(e,t){void 0===e&&(e=""),void 0===t&&(t=D_);var n=(0,a.useState)(e),r=n[0],o=n[1],i=(0,a.useRef)(!1);(0,a.useEffect)((function(){r!==e&&(i.current=!0,o(e))}),[e]);var l=(0,a.useCallback)((function(e){t(e),o(e)}),[]);return[r,l]}(j,E),se=ce[0],ue=ce[1],de=function(e,t){void 0===e&&(e=""),void 0===t&&(t=S_);var n=(0,a.useState)(e),r=n[0],o=n[1],i=(0,a.useRef)(!1);(0,a.useEffect)((function(){r!==e&&(i.current=!0,o(e))}),[e]);var l=(0,a.useCallback)((function(e){t(e),o(e)}),[]);return[r,l]}(_,C),he=de[0],ve=de[1],me=uB(i,{testPrefix:D,enableSelection:f,enableResizing:g,enableSorting:Z,rowActions:A,tableMeta:F}),ge=(0,zZ.b7)(T_(T_({columns:me,data:o,manualPagination:!m,columnResizeMode:"onEnd",filterFns:U_,state:{columnVisibility:U,rowSelection:ee,globalFilter:H?"":he,sorting:re,pagination:ie,columnPinning:Q,expanded:X,grouping:(0,a.useMemo)((function(){var e;return Array.isArray(se)?[se].filter(Boolean):(null==S||null==(e=S[se])?void 0:e.columns)||[]}),[se]),columnOrder:[]},onExpandedChange:$},!H&&k?{globalFilterFn:k}:{}),{},{getCoreRowModel:(0,TZ.sC)(),getFilteredRowModel:(0,TZ.vL)(),onRowSelectionChange:te,onGlobalFilterChange:H?void 0:ve,onSortingChange:oe,getSortedRowModel:(0,TZ.tj)(),getPaginationRowModel:(0,TZ.G_)(),getExpandedRowModel:(0,TZ.rV)(),getGroupedRowModel:(0,TZ.qe)(),getSubRows:(0,a.useCallback)((function(e){return e.children}),[]),onPaginationChange:le,onColumnVisibilityChange:W,onColumnPinningChange:Y,enableSubRowSelection:p,columnGroupingMode:"reorder"})),fe=(0,a.useRef)(ge.getState());ge.isEqual=function(e){if(void 0===e&&(e=UZ()),!fe.current)return fe.current=ge.getState(),!1;var t=NZ()(e(fe.current),e(ge.getState()));return fe.current=ge.getState(),t};var pe=hB(vB,(function(e){return e.setState})),we=(0,a.useCallback)((0,Uy.P)(10,pe),[]);(0,a.useLayoutEffect)((function(){we(T_(T_({},ge.getState()),{},{rowsById:ge.getRowModel().rowsById,table:ge,selectedRows:ge.getSelectedRowModel().rows}))}),[ge.getState()]),T&&(T.current=ge);var be=z.getHasNextPage,ye=z.loading,xe=z.warning;return a.createElement(rg,{height:{max:"100%"},overflow:"hidden",column:!0,ref:t,className:R},a.createElement(RB,{q:he,hasSearch:!!C,onSearch:ve,groupByColumns:S,onGroupBy:ue,grouping:se,tableMeta:F,title:I,dataColumns:i,enableColumnVisibility:d,bulkActions:n},r||null,a.createElement(fO,{rowSelection:ee,bulkActions:n,columnPinning:Q,dataGa:l,enableColumnVisibility:d,enableColumnPinning:c,table:ge,testPrefix:D,onRowSelected:V})),a.createElement(B_,I_({table:ge,dataGa:l,testPrefix:D,meta:F},N,z)),!(null!=be&&be())&&!ye&&!!xe&&a.createElement(rg,{alignItems:"center",justifyContent:"center",gap:2,padding:[4],width:"100%"},a.createElement(Xv,{name:"warning_triangle_hollow",color:"warning"})," ",a.createElement(af,{color:"warningText"},xe)),ye&&a.createElement(ib,{backdrop:!1,position:"bottom",margin:[0,0,10],padding:[0,0,10],zIndex:20},a.createElement(rg,{background:"tooltip",padding:[1,2],gap:2},a.createElement(af,{strong:!0},"Loading more..."))),m&&a.createElement(wB,{table:ge}))})));W_.defaultProps={coloredSortedColumn:!0,enableColumnPinning:!1,enableColumnVisibility:!1,enableResizing:!1,onColumnVisibilityChange:N_,onSortingChange:N_,onExpandedChange:N_,paginationOptions:{pageIndex:0,pageSize:100},expanded:G_,rowSelection:G_,rowActions:G_,meta:G_,globalFilter:"",testPrefix:"",virtualizeOptions:{}};var q_=function(e){return(0,a.forwardRef)((function(t,n){return a.createElement(fB,null,a.createElement(e,I_({},t,{ref:n})))}))}(W_),Q_=(0,i.createGlobalStyle)(['body{font-family:"IBM Plex Sans",sans-serif;background-color:',";}*{box-sizing:border-box;}"],(function(e){return e.theme.colors.mainBackground}))},15986:function(e,t){t.__esModule=!0,t.default=void 0;var n={end:"flex-end",start:"flex-start",center:"center",stretch:"stretch"};t.default=function(e){var t=e.alignSelf;return t in n&&"align-self: "+n[t]+";"}},61430:function(e,t){t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.cursor;return t?"cursor: "+t+";":""}},38249:function(e,t,n){t.__esModule=!0,t.default=void 0;var r=n(22837);t.default=function(e){var t=e.theme,n=e.margin;return n?Array.isArray(n)&&n.length>=1&&n.length<=4?"margin: "+(0,r.getDimensions)(t,n)+";":(console.error("Please provide an array (max 4 elements) for `margin` style helper."),""):""}},22837:function(e,t){t.__esModule=!0,t.getDimensions=t.getDimension=void 0;var n=t.getDimension=function(e,t){return"number"===typeof t?0===(n=e.constants.SIZE_SUB_UNIT*t)?"0":n+"px":"auto";var n};t.getDimensions=function(e,t){return t.map((function(t){return n(e,t)})).join(" ")}},47885:function(e,t,n){t.__esModule=!0,t.default=void 0;var r,o=(r=n(31749))&&r.__esModule?r:{default:r};function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n>16&255)+", "+(a>>8&255)+", "+(255&a)+", "+t+")"}},t.getSizeBy=function(e){return void 0===e&&(e=1),function(t){return isNaN(e)?e:(c(t)||0)*e+"px"}},t.getValidatedControlColor=function(e,t){return void 0===e&&(e="border"),void 0===t&&(t="disabled"),function(n){var r=n.theme,o=n.success,a=n.error,i=n.disabled;return o?s(["success"])({theme:r}):a?s(["error"])({theme:r}):i?s([t])({theme:r}):s([e])({theme:r})}}},37517:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"area_chart",use:"area_chart-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},54216:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"bar_chart",use:"bar_chart-usage",viewBox:"0 0 18 18",content:''});i().add(l);t.default=l},47615:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"collapse",use:"collapse-usage",viewBox:"0 0 16 2",content:''});i().add(l);t.default=l},5766:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"expand",use:"expand-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},6615:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"heatmap_chart",use:"heatmap_chart-usage",viewBox:"0 0 18 18",content:''});i().add(l);t.default=l},30075:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"information",use:"information-usage",viewBox:"0 0 18 18",content:''});i().add(l);t.default=l},55118:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"line_chart2",use:"line_chart2-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},23451:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"loading",use:"loading-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},79238:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"nav_dots",use:"nav_dots-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},7849:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"rearrange",use:"rearrange-usage",viewBox:"0 0 8 14",content:''});i().add(l);t.default=l},90038:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"reload2",use:"reload2-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l},87931:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"stacked_bar_chart",use:"stacked_bar_chart-usage",viewBox:"0 0 18 18",content:''});i().add(l);t.default=l},55591:function(e,t,n){n.r(t);var r=n(87854),o=n.n(r),a=n(95348),i=n.n(a),l=new(o())({id:"stacked_chart",use:"stacked_chart-usage",viewBox:"0 0 24 24",content:''});i().add(l);t.default=l}}]); \ No newline at end of file diff --git a/web/gui/v2/9513.68ac17c54e2a98d13112.js.LICENSE.txt b/web/gui/v2/9513.68ac17c54e2a98d13112.js.LICENSE.txt new file mode 100644 index 00000000000000..ae386fb79c9744 --- /dev/null +++ b/web/gui/v2/9513.68ac17c54e2a98d13112.js.LICENSE.txt @@ -0,0 +1 @@ +/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */ diff --git a/web/gui/v2/9594.89070793921be1288bb5.css b/web/gui/v2/9594.89070793921be1288bb5.css new file mode 100644 index 00000000000000..e5d02668feebc7 --- /dev/null +++ b/web/gui/v2/9594.89070793921be1288bb5.css @@ -0,0 +1,2 @@ +.default .dygraph-axis-label{color:#35414a}.dark .dygraph-axis-label{color:#fff}.dygraph-label-rotate-right{text-align:center;transform:rotate(-90deg);-webkit-transform:rotate(-90deg);-moz-transform:rotate(-90deg);-o-transform:rotate(-90deg);-ms-transform:rotate(-90deg)}.dygraph-annotation{position:absolute;z-index:10;overflow:hidden;border:1px solid} + diff --git a/web/gui/v2/9594.b5b73051fd8e1b9901f1.chunk.js b/web/gui/v2/9594.b5b73051fd8e1b9901f1.chunk.js new file mode 100644 index 00000000000000..8782fa6c883f7b --- /dev/null +++ b/web/gui/v2/9594.b5b73051fd8e1b9901f1.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="6bbdc889-00be-4090-b1e5-120561684110",e._sentryDebugIdIdentifier="sentry-dbid-6bbdc889-00be-4090-b1e5-120561684110")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9594],{33244:function(e,t,n){var a=n(67294),r=n(93416),o=n(22648);t.Z=function(e){var t=e.children;return(0,o.m)()?a.createElement(r.Layer,{full:!0},a.createElement(r.Flex,{width:"100%",background:"mainBackground","data-testid":"alertView-mobileContainer"},t)):t}},65840:function(e,t,n){n.d(t,{HK:function(){return d},M5:function(){return u},vS:function(){return i},x4:function(){return c}});var a=n(29439),r=n(4480),o=(0,r.cn)({key:"spaceKeyAtom",default:0}),l=(0,r.cn)({key:"roomViewLoading",default:!0}),i=function(){return(0,r.sJ)(o)},d=function(){var e=(0,r.FV)(o),t=(0,a.Z)(e,2),n=t[0],l=t[1];return function(){return l(n+1)}},c=function(){return(0,r.sJ)(l)},u=function(){return(0,r.FV)(l)}},44695:function(e,t,n){n.r(t);var a=n(67294),r=n(89250),o=n(62567),l=n(33244);t.default=function(e){var t=e.spaceId,n=e.roomId,i=e.nodeName,d=(0,r.UO)().alertId;return a.createElement(l.Z,null,a.createElement(o.Z,{alertId:d,spaceId:t,roomId:n,nodeName:i,isWebview:!0}))}},62567:function(e,t,n){n.d(t,{Z:function(){return L}});var a=n(67294),r=n(93416),o=n(12008),l=n(78266),i=n(22648),d=n(37518),c=n(92253),u=n(80854),s=n(32071),m=n(39840),f=n(89250),g=n(4822),v=n(97945),p=n(71893),h=(0,p.default)(r.Flex).attrs({alignItems:"center"}).withConfig({displayName:"styled__StyledButtonContainer",componentId:"sc-1glv09p-0"})(["position:sticky;bottom:0;"]),b=function(e){var t=e.disabled,n=e.nodeId,o=e.alertId,l=e.context,i=e.lastStatusChange,d=e.onClose,c=e.isLoading,u=e.small,s=void 0!==u&&u,m=e.testid,p=void 0===m?"alertDetailsModal":m,b=(0,f.s0)(),E=(0,v.m3)(n),y=(0,a.useCallback)((function(){d&&d(),b(E,o?{state:{alertId:o}}:{state:{contextToGo:l}})}),[E,o]),w=(0,g.TQ)(),x=(0,a.useCallback)((function(){var e=1e3*i;w({highlight:{after:e-6e4,before:e},correlation:!0}),y()}),[i,l,y,o]);return a.createElement(h,{justifyContent:"end",gap:2},a.createElement(r.Flex,{gap:2,justifyContent:"end"},a.createElement(r.Button,{small:s,label:"Run correlations",onClick:x,flavour:"hollow",isLoading:c,width:s?"112px":"170px","data-testid":"".concat(p,"-runCorrelations-button"),"data-ga":"alert-modal::click-run-correlations::alerts-view"}),a.createElement(r.Button,{small:s,label:"Go to chart",onClick:y,isLoading:c,disabled:c||t,width:s?"112px":"150px","data-testid":"".concat(p,"-goToNode-button"),"data-ga":"alert-modal::click-goto-chart::alerts-view"})))},E=(0,a.memo)(b),y=n(29876),w=n(54244),x=function(e){var t=e.alertId,n=e.context,o=e.name,l=e.nodeId,d=e.status,c=e.lastStatusChange,u=e.fullyLoaded,s=e.isWebview,m=(0,i.m)(),f=m?r.H4:r.H0;return a.createElement(r.Flex,{column:!0,gap:4},a.createElement(r.Flex,{justifyContent:"between"},a.createElement(r.Flex,{alignItems:"center",gap:2},a.createElement(w.Z,{margin:m?null:[.5,0,0],flavour:d,"data-testid":"alertView-statusPill"},d),a.createElement(f,{"data-testid":"alertView-alertName"},o)),!1),a.createElement(r.Flex,{justifyContent:"between",alignItems:"center"},a.createElement(y.Z,{alertId:t}),!s&&u&&!m&&a.createElement(E,{lastStatusChange:c,alertId:t,context:n,name:o,nodeId:l,small:!0,testid:"alertView"})))},I=n(46189),C=n(63346),Z=p.default.img.withConfig({displayName:"sc-404__Illustration",componentId:"sc-4w81fg-0"})(["height:35%;width:35%;"]),k=p.default.div.withConfig({displayName:"sc-404__ButtonContainer",componentId:"sc-4w81fg-1"})(["margin:",";"],(0,r.getSizeBy)(4)),S=function(){var e="".concat(I.Z.assetsBaseURL,"/img/no-nodes-room.svg");return a.createElement(C.Ht,null,a.createElement(r.Flex,{column:!0,alignItems:"center",justifyItems:"center",justifyContent:"center",height:"100%",width:"100%",padding:[0,0,"10%"]},a.createElement(Z,{src:e,alt:"Unreachable alert",title:"Unreachable alert"}),a.createElement(r.H3,null,"We couldn't find the alert"),a.createElement(r.Text,null,"This can be a temporary problem of that specific alert."),a.createElement(k,null,a.createElement(r.Button,{label:"Retry",icon:"reload"}))))},D=n(50709),V=n(34735),T=n(82210),F=function(e){var t=e.children;return a.createElement(r.Flex,{background:"modalHeaderBackground",height:12,flex:!1,gap:4,padding:[0,2,0,4],alignItems:"center"},a.createElement(r.Icon,{name:"logo_s",color:"success",width:"23px"}),t)},L=function(e){var t=e.alertId,n=e.spaceId,f=e.roomId,g=e.isWebview,v=e.nodeName,p=(0,d.Hm)("name"),h=(0,V.Z)().isNodeRestricted,b=(0,o.E5)(t),E=b.fullyLoaded,y=void 0!==E&&E,w=b.fullyLoading,I=void 0===w||w,Z=b.info,k=b.units,L=b.lastStatusChangeValue,R=b.lastStatusChange,_=b.context,N=b.instance,B=b.name,M=b.nodeId,H=b.status,j=b.lastUpdated,A=b.value;(0,o.yx)(t,{spaceId:n,roomId:f});var U=(0,m.pK)(A,k),O=(0,m.pK)(L,k),W=(0,i.m)();return M&&h(M)?a.createElement(C.ZP,{feature:"AlertDetailsViewRestricted"},a.createElement(T.Z,null)):a.createElement(C.ZP,{feature:"AlertDetailsView"},a.createElement(r.Flex,{column:!0,width:W?"100%":{max:280},padding:W?null:[0,0,10],background:W?"modalBackground":null},!g&&W&&a.createElement(F,null,a.createElement(r.Flex,{column:!0},a.createElement(r.H6,{color:"textLite"},"ROOM"),a.createElement(r.Text,{"data-testid":"alertView-mobile-roomName"},p))),a.createElement(r.Flex,{column:!0,padding:W?[3]:[0],overflow:W?"auto":"visible",gap:3},a.createElement(x,{alertId:t,context:_,status:H,name:B,nodeId:M,lastStatusChange:R,fullyLoaded:y,isWebview:g}),y?null:I?a.createElement(l.Z,{title:"Loading alert..."}):a.createElement(S,null),y&&Z&&a.createElement(a.Fragment,null,a.createElement(u.Z,{iconName:"documentation"},"Alert Description"),a.createElement(r.Text,{"data-testid":"alertView-info"},Z),a.createElement(D.Z,{alertId:t})),y&&a.createElement(c.Z,{id:t,context:_,instance:N,formattedLastValue:U,formattedLastStatusChangeValue:O,lastStatusChange:R,lastUpdated:j,isFormattedValueLoaded:y,nodeId:M,status:H,testid:"alertView",spaceId:n,roomId:f}),y&&a.createElement(s.Z,{id:t,nodeName:v,testid:"alertView"})),W&&a.createElement(r.Box,{position:"sticky",padding:[4],background:"modalBackground",bottom:0,border:{side:"top",color:"border"}},a.createElement(r.TextSmall,null,"In order to ",a.createElement(r.TextSmall,{strong:!0},"Run Correlations")," or"," ",a.createElement(r.TextSmall,{strong:!0},"View the Chart")," you will have to visit this alert from its' dedicated page on a desktop device."))))}},52149:function(e,t,n){function a(e,t){if(null==e)throw new TypeError("assign requires that input parameter not be null or undefined");for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n]);return e}n.d(t,{Z:function(){return a}})},82268:function(e,t,n){n.d(t,{Z:function(){return r}});var a=n(52149);function r(e){return(0,a.Z)({},e)}},49474:function(e,t,n){n.d(t,{Z:function(){return o}});var a=n(19013),r=n(13882);function o(e,t){(0,r.Z)(2,arguments);var n=(0,a.default)(e),o=(0,a.default)(t),l=n.getTime()-o.getTime();return l<0?-1:l>0?1:l}},12195:function(e,t,n){n.d(t,{Z:function(){return v}});var a=n(84314),r=n(24262),o=n(49474),l=n(19013),i=n(82268),d=n(52149),c=n(8958),u=n(13882),s=6e4,m=1440,f=30*m,g=365*m;function v(e,t,n){var v,p,h;(0,u.Z)(2,arguments);var b=(0,a.j)(),E=null!==(v=null!==(p=null===n||void 0===n?void 0:n.locale)&&void 0!==p?p:b.locale)&&void 0!==v?v:c.Z;if(!E.formatDistance)throw new RangeError("locale must contain localize.formatDistance property");var y=(0,o.Z)(e,t);if(isNaN(y))throw new RangeError("Invalid time value");var w,x,I=(0,d.Z)((0,i.Z)(n),{addSuffix:Boolean(null===n||void 0===n?void 0:n.addSuffix),comparison:y});y>0?(w=(0,l.default)(t),x=(0,l.default)(e)):(w=(0,l.default)(e),x=(0,l.default)(t));var C,Z=String(null!==(h=null===n||void 0===n?void 0:n.roundingMethod)&&void 0!==h?h:"round");if("floor"===Z)C=Math.floor;else if("ceil"===Z)C=Math.ceil;else{if("round"!==Z)throw new RangeError("roundingMethod must be 'floor', 'ceil' or 'round'");C=Math.round}var k,S=x.getTime()-w.getTime(),D=S/s,V=(S-((0,r.Z)(x)-(0,r.Z)(w)))/s,T=null===n||void 0===n?void 0:n.unit;if("second"===(k=T?String(T):D<1?"second":D<60?"minute":D0&&r.getRangeAt(0);t.append(n),n.select(),n.selectionStart=0,n.selectionEnd=e.length;let l=!1;try{l=document.execCommand("copy")}catch{}return n.remove(),o&&(r.removeAllRanges(),r.addRange(o)),a&&a.focus(),l}n.d(t,{Z:function(){return a}})}}]); \ No newline at end of file diff --git a/web/gui/v2/9768.3e539b24a5eb4979ea85.chunk.js b/web/gui/v2/9768.3e539b24a5eb4979ea85.chunk.js new file mode 100644 index 00000000000000..0545391567148e --- /dev/null +++ b/web/gui/v2/9768.3e539b24a5eb4979ea85.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="8ba2d2b1-5d36-4131-8386-688d2b629dac",e._sentryDebugIdIdentifier="sentry-dbid-8ba2d2b1-5d36-4131-8386-688d2b629dac")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9768],{39768:function(e,t,n){n.r(t),n.d(t,{default:function(){return C}});var r=n(45987),o=n(29439),a=(n(92222),n(9170),n(66992),n(41539),n(88674),n(34668),n(78783),n(33948),n(21249),n(57640),n(9924),n(69826),n(31672),n(2490),n(59461),n(26699),n(32023),n(74916),n(64765),n(15306),n(67294)),i=n(89250),c=n(17563),u=n(26398),s=n(62200),d=n(13477),f=n(25819),l=n(93017),p=n(76201),g=n(74059),v=n(6308),w=n(4942);n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);function h(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function m(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{},r=e.origin,o=e.id,a=e.name;if(r){var i=decodeURIComponent(o),c=decodeURIComponent(r);(0,E.kq)(o).then((function(e){var r=e.data,o=void 0===r?[]:r;return n(i,c,a),o.length?u.Z.get(s.MC).then((function(e){var t=e.data;return Promise.any(t.map((function(e){return u.Z.get((0,O.W)({spaceId:e.id})).then((function(t){var n=t.data;return Promise.any(n.map((function(t){return(0,f.Fz)({roomId:t.id,spaceId:e.id}).then((function(n){var r=n.nodes.find((function(e){var t=e.id;return o.includes(t)}));if(!r)throw new Error("can't find matching node");var a=window.location,i=a.protocol,c=a.host;return"".concat(i,"//").concat(c,"/spaces/").concat(e.slug,"/rooms/").concat(t.slug,"/nodes/").concat(r.id)}))})))}))})))})).catch((function(){})):(0,E.YZ)(i).then((function(e){var n=e.data.claimed;throw t(n?{errorMsgKey:"ErrForbidden",errorMessage:"You tried to access this Node on Netdata and you don't have access to it. Please contact your Space admin to give you access to it."}:{errorMsgKey:"ErrForbidden",errorMessage:"This Node isn't connected to Netdata. Please connect it, if you have permission for it, or contact your Space admin."}),"no access"}))})).then((function(e){e&&setTimeout(location.assign(e))})).catch((function(){}))}}),[])}(),m=(0,P.Z)();if(!e||m.isFetching||m.hasAccess&&!w||n)return null;var _=window.location,C=_.pathname,D=_.search,R=_.hash,A=c.parse(R),N=A.error_retry,U=A.token,F=c.parse(D),z=F.cloudRoute,T=F.redirect_uri,M=(0,r.Z)(F,x);if(!t&&N)return a.createElement(Z,{errorRetry:N,token:U,redirectUri:T});if(!t&&w){if(z){var L=z.includes("join-callback")?decodeURI(z):z,H=Array.isArray(L)?L[0]:L;return a.createElement(i.Fg,{replace:!0,to:H})}return T?(h(M),window.location.replace(decodeURIComponent(T)),null):a.createElement(i.Fg,{replace:!0,to:"/spaces"})}if(I.ZP)return null;if(t&&!w){var G=R.includes("join-callback")?R:encodeURIComponent(R),K="".concat(D).concat(D?"&":"?","cloudRoute=").concat(C);return a.createElement(i.Fg,{replace:!0,to:{pathname:"/sign-in",search:K,hash:G}})}return null}},7335:function(e,t,n){n.d(t,{aG:function(){return E},H3:function(){return _},XU:function(){return O},DH:function(){return k},z2:function(){return y},lL:function(){return b}});var r=n(29439),o=n(93433),a=n(15861),i=n(4942),c=n(64687),u=n.n(c),s=(n(21249),n(57640),n(9924),n(85827),n(41539),n(25387),n(2490),n(72608),n(57327),n(88449),n(59849),n(26699),n(32023),n(92222),n(66992),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(69826),n(31672),n(59461),n(2707),n(47941),n(82526),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(67294)),d=n(4480),f=n(89250),l=n(13477),p=(0,d.xu)({key:"visitedNodes",default:function(){return[]}}),g=n(97945),v=n(18761);function w(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function h(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{}).autoFetch,n=void 0===t||t,o=(0,d.FV)(p(e)),a=(0,r.Z)(o,2)[1],i=(0,s.useState)(0),c=(0,r.Z)(i,2),u=c[0],f=c[1],l=(0,s.useCallback)((function(){f((function(e){return e+1}))}),[f]);return(0,s.useEffect)((function(){e&&(n||u)&&(0,v.Fz)(e).then((function(e){if(e){var t=e.data.results.sort((function(e,t){return new Date(t.lastAccessTime)-new Date(e.lastAccessTime)}));a(t)}}))}),[n,u,e]),l},O=function(){var e=(0,l.jr)(),t=p(e);return(0,d._8)((function(e){var n=e.snapshot,r=e.set;return function(){var e=(0,a.Z)(u().mark((function e(o){var a,i,c,s;return u().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,n.getPromise((0,g.$E)(o));case 2:return a=e.sent,i=a.map((function(e){return e.machineGUID})),e.next=6,n.getPromise(t);case 6:c=e.sent,(s=c.filter((function(e){return!i.includes(e.id)}))).length!==c.length&&r(t,s);case 9:case"end":return e.stop()}}),e)})));return function(t){return e.apply(this,arguments)}}()}),[e])}},11060:function(e,t,n){var r=n(1702),o=Error,a=r("".replace),i=String(new o("zxcasd").stack),c=/\n\s*at [^:]*:[^\n]*/,u=c.test(i);e.exports=function(e,t){if(u&&"string"==typeof e&&!o.prepareStackTrace)for(;t--;)e=a(e,c,"");return e}},5392:function(e,t,n){var r=n(68880),o=n(11060),a=n(22914),i=Error.captureStackTrace;e.exports=function(e,t,n,c){a&&(i?i(e,t):r(e,"stack",o(n,c)))}},22914:function(e,t,n){var r=n(47293),o=n(79114);e.exports=!r((function(){var e=new Error("a");return!("stack"in e)||(Object.defineProperty(e,"stack",o(1,7)),7!==e.stack)}))},58340:function(e,t,n){var r=n(70111),o=n(68880);e.exports=function(e,t){r(t)&&"cause"in t&&o(e,"cause",t.cause)}},56277:function(e,t,n){var r=n(41340);e.exports=function(e,t){return void 0===e?arguments.length<2?"":t:r(e)}},56967:function(e,t,n){var r=n(82109),o=n(47976),a=n(79518),i=n(27674),c=n(99920),u=n(70030),s=n(68880),d=n(79114),f=n(58340),l=n(5392),p=n(20408),g=n(56277),v=n(5112)("toStringTag"),w=Error,h=[].push,m=function(e,t){var n,r=o(y,this);i?n=i(new w,r?a(this):y):(n=r?this:u(y),s(n,v,"Error")),void 0!==t&&s(n,"message",g(t)),l(n,m,n.stack,1),arguments.length>2&&f(n,arguments[2]);var c=[];return p(e,h,{that:c}),s(n,"errors",c),n};i?i(m,w):c(m,w,{name:!0});var y=m.prototype=u(w.prototype,{constructor:d(1,m),message:d(1,""),name:d(1,"AggregateError")});r({global:!0,constructor:!0,arity:2},{AggregateError:m})},9170:function(e,t,n){n(56967)},34668:function(e,t,n){var r=n(82109),o=n(46916),a=n(19662),i=n(35005),c=n(78523),u=n(12534),s=n(20408),d=n(80612),f="No one promise resolved";r({target:"Promise",stat:!0,forced:d},{any:function(e){var t=this,n=i("AggregateError"),r=c.f(t),d=r.resolve,l=r.reject,p=u((function(){var r=a(t.resolve),i=[],c=0,u=1,p=!1;s(e,(function(e){var a=c++,s=!1;u++,o(r,t,e).then((function(e){s||p||(p=!0,d(e))}),(function(e){s||p||(s=!0,i[a]=e,--u||l(new n(i,f)))}))})),--u||l(new n(i,f))}));return p.error&&l(p.value),r.promise}})}}]); \ No newline at end of file diff --git a/web/gui/v2/9886.7d6c2ffca80bd567e52d.chunk.js b/web/gui/v2/9886.7d6c2ffca80bd567e52d.chunk.js new file mode 100644 index 00000000000000..ec07d84e66ae0e --- /dev/null +++ b/web/gui/v2/9886.7d6c2ffca80bd567e52d.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="9a380c62-3ba1-4bf6-b652-ab003fad57c0",e._sentryDebugIdIdentifier="sentry-dbid-9a380c62-3ba1-4bf6-b652-ab003fad57c0")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9886],{39886:function(e,t,n){n.r(t),n.d(t,{default:function(){return C}});var l=n(87462),a=n(29439),o=n(67294),r=n(93416),i=n(13477),c=n(71893),d=(c.default.ul.withConfig({displayName:"styled__StyledList",componentId:"sc-z791f1-0"})(["list-style:disc outside none;margin-left:16px;"]),(0,c.default)(r.ModalContent).attrs((function(e){return{width:e.isMobile?{base:"95vw"}:{}}})).withConfig({displayName:"styled__TrialWelcomeModalContent",componentId:"sc-z791f1-1"})([""])),u=n(62195),s=n(29292),f=function(e){var t=e.onOptOutClick,n=e.isFailure;return o.createElement(r.Flex,{column:!0,gap:4,width:{max:120},padding:[0,0,4,0]},n?o.createElement(o.Fragment,null,o.createElement(r.TextBigger,null,"Enrolment to 30-day free Business trial failed."),o.createElement(r.TextBigger,{lineHeight:1.5},"Something unexpected happened when trying to enrol you to the free Business trial.")):o.createElement(o.Fragment,null,o.createElement(r.TextBigger,{lineHeight:1.5},"We are happy to upgrade your account to Netdata Business, for free, for"," ",o.createElement(r.TextBigger,{strong:!0},"30 days"),"."),o.createElement(r.TextBigger,null,"Enjoy the best of Netdata!"),"function"===typeof t&&o.createElement(s.Z,{onOptOutClick:t})))},m=n(46667),g=n(39979),b=n(63346),p=n(22648),E=n(60511),y=n(96009),h=n(80699),w=n(36285),_=n(33937),k=(0,g.Z)(r.Button),C=(0,o.memo)((function(){var e=(0,E.Z)().trialWelcomeVisible,t=(0,p.m)(),n=(0,i.Iy)("email"),c=(0,m.Z)(!0),s=(0,a.Z)(c,4),g=s[0],C=s[3],Z=(0,m.Z)(),x=(0,a.Z)(Z,3),B=x[0],F=x[2],I=(0,h.Z)(),T=I.isFailure,v=I.reset,M=(0,m.Z)(),P=(0,a.Z)(M,4),D=P[0],N=P[2],S=P[3],O=(0,E.Z)().refreshPlan,W=(0,_.pI)("trialModalDismissed"),H=(0,a.Z)(W,2)[1],R=(0,w.kK)(),L=(0,o.useCallback)((function(){N(),R({email:n}).then((function(){setTimeout((function(){O(),v(),S()}),2e3)})).catch((function(){S()}))}),[n]),j=(0,o.useMemo)((function(){return T?{feature:"CloseTrialEnrolmentErrorModal",label:"Close",flavour:"hollow"}:{feature:"TrialAccept",label:"OK",textTransform:"uppercase",flavour:"hollow",icon:"thumb_up"}}),[T]);return e?B?o.createElement(u.Z,{onDecline:F,onCancellingEnd:function(){localStorage.setItem(y.Bk,!0),F(),C()}}):g?o.createElement(r.Modal,{backdropProps:{backdropBlur:!0}},o.createElement(b.ZP,{feature:"TrialWelcome"},o.createElement(d,{isMobile:t},o.createElement(r.ModalHeader,null,o.createElement(r.Flex,{gap:2,alignItems:"center"},o.createElement(r.Icon,{name:"netdataPress",color:"text"}),o.createElement(r.H4,null,"Welcome to Netdata!"))),o.createElement(r.ModalBody,null,o.createElement(f,{isFailure:T})),o.createElement(r.ModalFooter,null,o.createElement(r.Flex,{gap:4,justifyContent:"end",padding:[1,2]},T&&o.createElement(k,{feature:"RetryTrialEnrolment",label:"Retry",onClick:L,disabled:D,isLoading:D}),o.createElement(k,(0,l.Z)({onClick:function(){var e,t;(C(),v(),localStorage.setItem(y.Bk,!0),!T&&null!==(e=window.posthog)&&void 0!==e&&e.setPersonPropertiesForFlags)&&(null===(t=window.posthog)||void 0===t||t.setPersonPropertiesForFlags({netdata_cloud_trial_modal_seen:!0}));H(!0)},disabled:D},j))))))):null:null}))}}]); \ No newline at end of file diff --git a/web/gui/v2/9893.b40d1bffe447771a2e43.chunk.js b/web/gui/v2/9893.b40d1bffe447771a2e43.chunk.js new file mode 100644 index 00000000000000..a30201dfcce94a --- /dev/null +++ b/web/gui/v2/9893.b40d1bffe447771a2e43.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="cbaa299a-848b-4e08-8c81-c467adba81ee",e._sentryDebugIdIdentifier="sentry-dbid-cbaa299a-848b-4e08-8c81-c467adba81ee")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9893],{99893:function(e,t,n){function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,i)}return n}function r(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,i=new Array(t);n=e.length?{done:!0}:{done:!1,value:e[i++]}},e:function(e){throw e},f:r}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var o,s=!0,a=!1;return{s:function(){n=n.call(e)},n:function(){var e=n.next();return s=e.done,e},e:function(e){a=!0,o=e},f:function(){try{s||null==n.return||n.return()}finally{if(a)throw o}}}}n.r(t),n.d(t,{Compression:function(){return Xt},PostHog:function(){return Qn},SurveyQuestionType:function(){return Xn},SurveyType:function(){return Jn},default:function(){return Zn},posthog:function(){return Zn}});var y={DEBUG:!1,LIB_VERSION:"1.88.2"},b=Array.isArray,w=Object.prototype,k=w.hasOwnProperty,S=w.toString,F=b||function(e){return"[object Array]"===S.call(e)},P=function(e){return"[object Uint8Array]"===S.call(e)},I=function(e){try{return/^\s*\bfunction\b/.test(e)}catch(e){return!1}},E=function(e){return e===Object(e)&&!F(e)},x=function(e){return void 0===e},O=function(e){return"[object String]"==S.call(e)},R=function(e){return null===e},T=function(e){return"[object Number]"==S.call(e)},$=function(e){return"[object Boolean]"===S.call(e)},C=Array.prototype,M=C.forEach,A=C.indexOf,D="undefined"!=typeof window?window:{},L=D.navigator||{userAgent:""},N=D.document||{},B=L.userAgent,q="[PostHog.js]",j={_log:function(e){if((y.DEBUG||D.POSTHOG_DEBUG)&&!x(D.console)&&D.console){for(var t=("__rrweb_original__"in D.console[e]?D.console[e].__rrweb_original__:D.console[e]),n=arguments.length,i=new Array(n>1?n-1:0),r=1;r1?t-1:0),i=1;i0&&(t[n]=e)})),t},K=["$performance_raw"];function Z(e){var t,n,i,r,o,s="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",a=0,u=0,c="",l=[];if(!e)return e;e=ee(e);do{t=(o=e.charCodeAt(a++)<<16|e.charCodeAt(a++)<<8|e.charCodeAt(a++))>>18&63,n=o>>12&63,i=o>>6&63,r=63&o,l[u++]=s.charAt(t)+s.charAt(n)+s.charAt(i)+s.charAt(r)}while(a127&&s<2048?String.fromCharCode(s>>6|192,63&s|128):String.fromCharCode(s>>12|224,s>>6&63|128,63&s|128),R(a)||(n>t&&(o+=e.substring(t,n)),o+=a,t=n=r+1)}return n>t&&(o+=e.substring(t,e.length)),o},te=["ahrefsbot","applebot","baiduspider","bingbot","bingpreview","bot.htm","bot.php","crawler","duckduckbot","facebookexternal","facebookcatalog","gptbot","hubspot","linkedinbot","mj12bot","petalbot","pinterest","prerender","rogerbot","screaming frog","semrushbot","sitebulb","twitterbot","yahoo! slurp","yandexbot","adsbot-google","apis-google","duplexweb-google","feedfetcher-google","google favicon","google web preview","google-read-aloud","googlebot","googleweblight","mediapartners-google","storebot-google"],ne=function(){function e(t){return t&&(t.preventDefault=e.preventDefault,t.stopPropagation=e.stopPropagation),t}return e.preventDefault=function(){this.returnValue=!1},e.stopPropagation=function(){this.cancelBubble=!0},function(t,n,i,r,o){if(t)if(t.addEventListener&&!r)t.addEventListener(n,i,!!o);else{var s="on"+n,a=t[s];t[s]=function(t,n,i){return function(r){if(r=r||e(window.event)){var o,s=!0;I(i)&&(o=i(r));var a=n.call(t,r);return!1!==o&&!1!==a||(s=!1),s}}}(t,i,a)}else j.error("No valid element provided to register_event")}}();function ie(e,t){var n=function(){var n=N.createElement("script");n.type="text/javascript",n.src=e,n.onload=function(e){return t(void 0,e)},n.onerror=function(e){return t(e)};var i,r=N.querySelectorAll("body > script");r.length>0?null===(i=r[0].parentNode)||void 0===i||i.insertBefore(n,r[0]):N.body.appendChild(n)};N.body?n():N.addEventListener("DOMContentLoaded",n)}function re(e){switch(o(e.className)){case"string":return e.className;case"object":return("baseVal"in e.className?e.className.baseVal:null)||e.getAttribute("class")||"";default:return""}}function oe(e){var t="";return de(e)&&!fe(e)&&e.childNodes&&e.childNodes.length&&W(e.childNodes,(function(e){ue(e)&&e.textContent&&(t+=V(e.textContent).split(/(\s+)/).filter(he).join("").replace(/[\r\n]/g," ").replace(/[ ]+/g," ").substring(0,255))})),V(t)}function se(e){return!!e&&1===e.nodeType}function ae(e,t){return!!e&&!!e.tagName&&e.tagName.toLowerCase()===t.toLowerCase()}function ue(e){return!!e&&3===e.nodeType}function ce(e){return!!e&&11===e.nodeType}var le=["a","button","form","input","select","textarea","label"];function de(e){for(var t=e;t.parentNode&&!ae(t,"body");t=t.parentNode){var n=re(t).split(" ");if(z(n,"ph-sensitive")||z(n,"ph-no-capture"))return!1}if(z(re(e).split(" "),"ph-include"))return!0;var i=e.type||"";if(O(i))switch(i.toLowerCase()){case"hidden":case"password":return!1}var r=e.name||e.id||"";return!O(r)||!/^cc|cardnum|ccnum|creditcard|csc|cvc|cvv|exp|pass|pwd|routing|seccode|securitycode|securitynum|socialsec|socsec|ssn/i.test(r.replace(/[^a-zA-Z0-9]/g,""))}function fe(e){return!!(ae(e,"input")&&!["button","checkbox","submit","reset"].includes(e.type)||ae(e,"select")||ae(e,"textarea")||"true"===e.getAttribute("contenteditable"))}function he(e){if(R(e)||x(e))return!1;if(O(e)){if(e=V(e),/^(?:(4[0-9]{12}(?:[0-9]{3})?)|(5[1-5][0-9]{14})|(6(?:011|5[0-9]{2})[0-9]{12})|(3[47][0-9]{13})|(3(?:0[0-5]|[68][0-9])[0-9]{11})|((?:2131|1800|35[0-9]{3})[0-9]{11}))$/.test((e||"").replace(/[- ]/g,"")))return!1;if(/(^\d{3}-?\d{2}-?\d{4}$)/.test(e))return!1}return!0}function pe(e){var t=oe(e);return he(t="".concat(t," ").concat(_e(e)).trim())?t:""}function _e(e){var t="";return e&&e.childNodes&&e.childNodes.length&&W(e.childNodes,(function(e){var n;if(e&&"span"===(null===(n=e.tagName)||void 0===n?void 0:n.toLowerCase()))try{var i=oe(e);t="".concat(t," ").concat(i).trim(),e.childNodes&&e.childNodes.length&&(t="".concat(t," ").concat(_e(e)).trim())}catch(e){j.error(e)}})),t}var ge=function(){function e(t){s(this,e),this.clicks=[],this.enabled=t}return u(e,[{key:"isRageClick",value:function(e,t,n){if(!this.enabled)return!1;var i=this.clicks[this.clicks.length-1];if(i&&Math.abs(e-i.x)+Math.abs(t-i.y)<30&&n-i.timestamp<1e3){if(this.clicks.push({x:e,y:t,timestamp:n}),3===this.clicks.length)return!0}else this.clicks=[{x:e,y:t,timestamp:n}];return!1}}]),e}(),ve="$people_distinct_id",me="__alias",ye="__timers",be="$autocapture_disabled_server_side",we="$session_recording_enabled_server_side",ke="$console_log_recording_enabled_server_side",Se="$session_recording_recorder_version_server_side",Fe="$sesid",Pe="$session_is_sampled",Ie="$enabled_feature_flags",Ee="$early_access_features",xe="$stored_person_properties",Oe="$stored_group_properties",Re="$surveys",Te="$flag_call_reported",$e="$user_state",Ce="$posthog_quota_limited",Me="$client_session_props",Ae=[ve,me,"__cmpns",ye,we,Fe,Ie,$e,Ce,Ee,Oe,xe,Re,Te,Me];function De(e,t){return t.length>e?t.slice(0,e)+"...":t}var Le={_initializedTokens:[],_isDisabledServerSide:null,_isAutocaptureEnabled:!1,_setIsAutocaptureEnabled:function(e){var t,n=R(this._isDisabledServerSide)?!(null===(t=e.persistence)||void 0===t||!t.props[be]):this._isDisabledServerSide,i=!!e.config.autocapture;this._isAutocaptureEnabled=i&&!n},_previousElementSibling:function(e){if(e.previousElementSibling)return e.previousElementSibling;var t=e;do{t=t.previousSibling}while(t&&!se(t));return t},_getAugmentPropertiesFromElement:function(e){if(!de(e))return{};var t={};return W(e.attributes,(function(e){if(0===e.name.indexOf("data-ph-capture-attribute")){var n=e.name.replace("data-ph-capture-attribute-",""),i=e.value;n&&i&&he(i)&&(t[n]=i)}})),t},_getPropertiesFromElement:function(e,t,n){var i,r=e.tagName.toLowerCase(),o={tag_name:r};le.indexOf(r)>-1&&!n&&("a"===r.toLowerCase()||"button"===r.toLowerCase()?o.$el_text=De(1024,pe(e)):o.$el_text=De(1024,oe(e)));var s=re(e);s.length>0&&(o.classes=s.split(" ").filter((function(e){return""!==e})));var a=null===(i=this.config)||void 0===i?void 0:i.element_attribute_ignorelist;W(e.attributes,(function(n){var i;fe(e)&&-1===["name","id","class"].indexOf(n.name)||null!=a&&a.includes(n.name)||!t&&he(n.value)&&(i=n.name,!O(i)||"_ngcontent"!==i.substring(0,10)&&"_nghost"!==i.substring(0,7))&&(o["attr__"+n.name]=De(1024,n.value))}));for(var u=1,c=1,l=e;l=this._previousElementSibling(l);)u++,l.tagName===e.tagName&&c++;return o.nth_child=u,o.nth_of_type=c,o},_getDefaultProperties:function(e){return{$event_type:e,$ce_version:1}},_extractCustomPropertyValue:function(e){var t=[];return W(document.querySelectorAll(e.css_selector),(function(e){var n;["input","select"].indexOf(e.tagName.toLowerCase())>-1?n=e.value:e.textContent&&(n=e.textContent),he(n)&&t.push(n)})),t.join(", ")},_getCustomProperties:function(e){var t=this,n={};return W(this._customProperties,(function(i){W(i.event_selectors,(function(r){W(document.querySelectorAll(r),(function(r){z(e,r)&&de(r)&&(n[i.name]=t._extractCustomPropertyValue(i))}))}))})),n},_getEventTarget:function(e){return x(e.target)?e.srcElement||null:null!==(t=e.target)&&void 0!==t&&t.shadowRoot?e.composedPath()[0]||null:e.target||null;var t},_captureEvent:function(e,t){var n,i=this,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"$autocapture",o=this._getEventTarget(e);if(ue(o)&&(o=o.parentNode||null),"$autocapture"===r&&"click"===e.type&&e instanceof MouseEvent&&null!==(n=this.rageclicks)&&void 0!==n&&n.isRageClick(e.clientX,e.clientY,(new Date).getTime())&&this._captureEvent(e,t,"$rageclick"),o&&function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0;if(!e||ae(e,"html")||!se(e))return!1;if(null!=n&&n.url_allowlist){var i=window.location.href,r=n.url_allowlist;if(r&&!r.some((function(e){return i.match(e)})))return!1}if(null!=n&&n.dom_event_allowlist){var o=n.dom_event_allowlist;if(o&&!o.some((function(e){return t.type===e})))return!1}if(null!=n&&n.element_allowlist){var s=n.element_allowlist;if(s&&!s.some((function(t){return e.tagName.toLowerCase()===t})))return!1}if(null!=n&&n.css_selector_allowlist){var a=n.css_selector_allowlist;if(a&&!a.some((function(t){return e.matches(t)})))return!1}for(var u=!1,c=[e],l=!0,d=e;d.parentNode&&!ae(d,"body");)if(ce(d.parentNode))c.push(d.parentNode.host),d=d.parentNode.host;else{if(!(l=d.parentNode||!1))break;if(le.indexOf(l.tagName.toLowerCase())>-1)u=!0;else{var f=window.getComputedStyle(l);f&&"pointer"===f.getPropertyValue("cursor")&&(u=!0)}c.push(l),d=l}var h=window.getComputedStyle(e);if(h&&"pointer"===h.getPropertyValue("cursor")&&"click"===t.type)return!0;var p=e.tagName.toLowerCase();switch(p){case"html":return!1;case"form":return"submit"===t.type;case"input":case"select":case"textarea":return"change"===t.type||"click"===t.type;default:return u?"click"===t.type:"click"===t.type&&(le.indexOf(p)>-1||"true"===e.getAttribute("contenteditable"))}}(o,e,this.config)){for(var s,a,u=[o],c=o;c.parentNode&&!ae(c,"body");)ce(c.parentNode)?(u.push(c.parentNode.host),c=c.parentNode.host):(u.push(c.parentNode),c=c.parentNode);var l,d=[],f={},h=!1;if(W(u,(function(e){var n=de(e);"a"===e.tagName.toLowerCase()&&(l=e.getAttribute("href"),l=n&&he(l)&&l),z(re(e).split(" "),"ph-no-capture")&&(h=!0),d.push(i._getPropertiesFromElement(e,t.config.mask_all_element_attributes,t.config.mask_all_text));var r=i._getAugmentPropertiesFromElement(e);G(f,r)})),t.config.mask_all_text||("a"===o.tagName.toLowerCase()||"button"===o.tagName.toLowerCase()?d[0].$el_text=pe(o):d[0].$el_text=oe(o)),l&&(d[0].attr__href=l),h)return!1;var p=G(this._getDefaultProperties(e.type),{$elements:d},null!==(s=d[0])&&void 0!==s&&s.$el_text?{$el_text:null===(a=d[0])||void 0===a?void 0:a.$el_text}:{},this._getCustomProperties(u),f);return t.capture(r,p),!0}},_navigate:function(e){window.location.href=e},_addDomEventHandlers:function(e){var t=this,n=function(n){n=n||window.event,t._captureEvent(n,e)};ne(document,"submit",n,!1,!0),ne(document,"change",n,!1,!0),ne(document,"click",n,!1,!0)},_customProperties:[],rageclicks:null,config:void 0,init:function(e){var t;$(e.__autocapture)||(this.config=e.__autocapture),null!==(t=this.config)&&void 0!==t&&t.url_allowlist&&(this.config.url_allowlist=this.config.url_allowlist.map((function(e){return new RegExp(e)}))),this.rageclicks=new ge(e.config.rageclick)},afterDecideResponse:function(e,t){var n=t.config.token;this._initializedTokens.indexOf(n)>-1?j.info('autocapture already initialized for token "'+n+'"'):(t.persistence&&t.persistence.register(c({},be,!!e.autocapture_opt_out)),this._isDisabledServerSide=!!e.autocapture_opt_out,this._setIsAutocaptureEnabled(t),this._initializedTokens.push(n),e&&e.config&&e.config.enable_collect_everything&&this._isAutocaptureEnabled?(e.custom_properties&&(this._customProperties=e.custom_properties),this._addDomEventHandlers(t)):t.__autocapture=!1)},enabledForProject:function(e,t,n){if(!e)return!0;t=x(t)?10:t,n=x(n)?10:n;for(var i=0,r=0;r1&&void 0!==arguments[1]?arguments[1]:{};if(this.instance.decideEndpointWasHit||this.getFlags()&&this.getFlags().length>0){var n,i=this.getFlagVariants()[e],r="".concat(i),o=this.instance.get_property(Te)||{};return!t.send_event&&"send_event"in t||e in o&&o[e].includes(r)||(F(o[e])?o[e].push(r):o[e]=[r],null===(n=this.instance.persistence)||void 0===n||n.register(c({},Te,o)),this.instance.capture("$feature_flag_called",{$feature_flag:e,$feature_flag_response:i})),i}j.warn('getFeatureFlag for key "'+e+"\" failed. Feature flags didn't load in time.")}},{key:"getFeatureFlagPayload",value:function(e){return this.getFlagPayloads()[e]}},{key:"isFeatureEnabled",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(this.instance.decideEndpointWasHit||this.getFlags()&&this.getFlags().length>0)return!!this.getFeatureFlag(e,t);j.warn('isFeatureEnabled for key "'+e+"\" failed. Feature flags didn't load in time.")}},{key:"addFeatureFlagsHandler",value:function(e){this.featureFlagEventHandlers.push(e)}},{key:"removeFeatureFlagsHandler",value:function(e){this.featureFlagEventHandlers=this.featureFlagEventHandlers.filter((function(t){return t!==e}))}},{key:"receivedFeatureFlags",value:function(e){if(this.instance.persistence){this.instance.decideEndpointWasHit=!0;var t=this.getFlagVariants(),n=this.getFlagPayloads();!function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},o=e.featureFlags,s=e.featureFlagPayloads;if(o)if(F(o)){var a,u={};if(o)for(var l=0;l1&&void 0!==arguments[1]&&arguments[1],i=this.instance.get_property(Ee);if(i&&!n)return e(i);this.instance._send_request("".concat(this.instance.config.api_host,"/api/early_access_features/?token=").concat(this.instance.config.token),{},{method:"GET"},(function(n){var i,r=n.earlyAccessFeatures;return null===(i=t.instance.persistence)||void 0===i||i.register(c({},Ee,r)),e(r)}))}},{key:"_prepareFeatureFlagsForCallbacks",value:function(){var e=this.getFlags(),t=this.getFlagVariants();return{flags:e.filter((function(e){return t[e]})),flagVariants:Object.keys(t).filter((function(e){return t[e]})).reduce((function(e,n){return e[n]=t[n],e}),{})}}},{key:"_fireFeatureFlagsCallbacks",value:function(){var e=this._prepareFeatureFlagsForCallbacks(),t=e.flags,n=e.flagVariants;this.featureFlagEventHandlers.forEach((function(e){return e(t,n)}))}},{key:"setPersonPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],n=this.instance.get_property(xe)||{};this.instance.register(c({},xe,r(r({},n),e))),t&&this.instance.reloadFeatureFlags()}},{key:"resetPersonPropertiesForFlags",value:function(){this.instance.unregister(xe)}},{key:"setGroupPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],n=this.instance.get_property(Oe)||{};0!==Object.keys(n).length&&Object.keys(n).forEach((function(t){n[t]=r(r({},n[t]),e[t]),delete e[t]})),this.instance.register(c({},Oe,r(r({},n),e))),t&&this.instance.reloadFeatureFlags()}},{key:"resetGroupPropertiesForFlags",value:function(e){if(e){var t=this.instance.get_property(Oe)||{};this.instance.register(c({},Oe,r(r({},t),{},c({},e,{}))))}else this.instance.unregister(Oe)}}]),e}(),Ue=/[a-z0-9][a-z0-9-]+\.[a-z]{2,}$/i,We={is_supported:function(){return!0},error:function(e){j.error("cookieStore error: "+e)},get:function(e){try{for(var t=e+"=",n=document.cookie.split(";").filter((function(e){return e.length})),i=0;i1e3?B.substring(0,997)+"...":B,$browser_version:it.browserVersion(B,navigator.vendor,window.opera),$browser_language:it.browserLanguage(),$screen_height:null===(i=window)||void 0===i?void 0:i.screen.height,$screen_width:null===(r=window)||void 0===r?void 0:r.screen.width,$viewport_height:null===(o=window)||void 0===o?void 0:o.innerHeight,$viewport_width:null===(s=window)||void 0===s?void 0:s.innerWidth,$lib:"web",$lib_version:y.LIB_VERSION,$insert_id:Math.random().toString(36).substring(2,10)+Math.random().toString(36).substring(2,10),$time:Y()/1e3})},people_properties:function(){var e=it.os(B),t=e.os_name,n=e.os_version;return G(X({$os:t,$os_version:n,$browser:it.browser(B,navigator.vendor,window.opera)}),{$browser_version:it.browserVersion(B,navigator.vendor,window.opera)})}},rt=["cookie","localstorage","localstorage+cookie","sessionstorage","memory"],ot=function(){function e(t){s(this,e);var n="";t.token&&(n=t.token.replace(/\+/g,"PL").replace(/\//g,"SL").replace(/=/g,"EQ")),this.props={},this.campaign_params_saved=!1,this.custom_campaign_params=t.custom_campaign_params||[],t.persistence_name?this.name="ph_"+t.persistence_name:this.name="ph_"+n+"_posthog",-1===rt.indexOf(t.persistence.toLowerCase())&&(j.critical("Unknown persistence type "+t.persistence+"; falling back to cookie"),t.persistence="cookie");var i=t.persistence.toLowerCase();"localstorage"===i&&ze.is_supported()?this.storage=ze:"localstorage+cookie"===i&&Qe.is_supported()?this.storage=Qe:"sessionstorage"===i&&Ze.is_supported()?this.storage=Ze:this.storage="memory"===i?Xe:We,this.user_state="anonymous",this.load(),this.update_config(t),this.save()}return u(e,[{key:"properties",value:function(){var e={};return W(this.props,(function(t,n){if(n===Ie&&E(t))for(var i=Object.keys(t),r=0;r1&&void 0!==arguments[1]?arguments[1]:{};s(this,e),c(this,"bucketSize",100),c(this,"refillRate",10),c(this,"mutationBuckets",{}),c(this,"loggedTracker",{}),c(this,"refillBuckets",(function(){Object.keys(r.mutationBuckets).forEach((function(e){r.mutationBuckets[e]=r.mutationBuckets[e]+r.refillRate,r.mutationBuckets[e]>=r.bucketSize&&delete r.mutationBuckets[e]}))})),c(this,"getNodeOrRelevantParent",(function(e){var t=r.rrweb.mirror.getNode(e);if("svg"!==(null==t?void 0:t.nodeName)&&t instanceof Element){var n=t.closest("svg");if(n)return[r.rrweb.mirror.getId(n),n]}return[e,t]})),c(this,"numberOfChanges",(function(e){var t,n,i,r,o,s,a,u;return(null!==(t=null===(n=e.removes)||void 0===n?void 0:n.length)&&void 0!==t?t:0)+(null!==(i=null===(r=e.attributes)||void 0===r?void 0:r.length)&&void 0!==i?i:0)+(null!==(o=null===(s=e.texts)||void 0===s?void 0:s.length)&&void 0!==o?o:0)+(null!==(a=null===(u=e.adds)||void 0===u?void 0:u.length)&&void 0!==a?a:0)})),c(this,"throttleMutations",(function(e){if(3!==e.type||0!==e.data.source)return e;var t=e.data,n=r.numberOfChanges(t);t.attributes&&(t.attributes=t.attributes.filter((function(e){var t,n,i,o=p(r.getNodeOrRelevantParent(e.id),2),s=o[0],a=o[1];return 0!==r.mutationBuckets[s]&&(r.mutationBuckets[s]=null!==(t=r.mutationBuckets[s])&&void 0!==t?t:r.bucketSize,r.mutationBuckets[s]=Math.max(r.mutationBuckets[s]-1,0),0===r.mutationBuckets[s]&&(r.loggedTracker[s]||(r.loggedTracker[s]=!0,null===(n=(i=r.options).onBlockedNode)||void 0===n||n.call(i,s,a))),e)})));var i=r.numberOfChanges(t);return 0!==i||n===i?e:void 0})),this.rrweb=t,this.options=o,this.refillRate=null!==(n=this.options.refillRate)&&void 0!==n?n:this.refillRate,this.bucketSize=null!==(i=this.options.bucketSize)&&void 0!==i?i:this.bucketSize,setInterval((function(){r.refillBuckets()}),1e3)})),ct=function(e){return e[e.DomContentLoaded=0]="DomContentLoaded",e[e.Load=1]="Load",e[e.FullSnapshot=2]="FullSnapshot",e[e.IncrementalSnapshot=3]="IncrementalSnapshot",e[e.Meta=4]="Meta",e[e.Custom=5]="Custom",e[e.Plugin=6]="Plugin",e}(ct||{});!function(e){e[e.Mutation=0]="Mutation",e[e.MouseMove=1]="MouseMove",e[e.MouseInteraction=2]="MouseInteraction",e[e.Scroll=3]="Scroll",e[e.ViewportResize=4]="ViewportResize",e[e.Input=5]="Input",e[e.TouchMove=6]="TouchMove",e[e.MediaInteraction=7]="MediaInteraction",e[e.StyleSheetRule=8]="StyleSheetRule",e[e.CanvasMutation=9]="CanvasMutation",e[e.Font=10]="Font",e[e.Log=11]="Log",e[e.Drag=12]="Drag",e[e.StyleDeclaration=13]="StyleDeclaration",e[e.Selection=14]="Selection",e[e.AdoptedStyleSheet=15]="AdoptedStyleSheet"}(Ne||(Ne={}));var lt=[Ne.MouseMove,Ne.MouseInteraction,Ne.Scroll,Ne.ViewportResize,Ne.Input,Ne.TouchMove,Ne.MediaInteraction,Ne.Drag],dt=function(){function e(t){var n=this;if(s(this,e),c(this,"isIdle",!1),c(this,"_linkedFlagSeen",!1),c(this,"_lastActivityTimestamp",Date.now()),c(this,"windowId",null),c(this,"sessionId",null),c(this,"_linkedFlag",null),c(this,"_sampleRate",null),c(this,"_minimumDuration",null),this.instance=t,this._captureStarted=!1,this._endpoint="/s/",this.stopRrweb=void 0,this.receivedDecide=!1,window.addEventListener("beforeunload",(function(){n._flushBuffer()})),!this.instance.sessionManager)throw j.error("Session recording started without valid sessionManager"),new Error("Session recording started without valid sessionManager. This is a bug.");this.buffer=this.clearBuffer()}return u(e,[{key:"started",get:function(){return this._captureStarted}},{key:"sessionManager",get:function(){if(!this.instance.sessionManager)throw j.error("Session recording started without valid sessionManager"),new Error("Session recording started without valid sessionManager. This is a bug.");return this.instance.sessionManager}},{key:"isSampled",get:function(){return T(this._sampleRate)?this.instance.get_property(Pe):null}},{key:"sessionDuration",get:function(){var e,t,n=null===(e=this.buffer)||void 0===e?void 0:e.data[(null===(t=this.buffer)||void 0===t?void 0:t.data.length)-1],i=this.sessionManager.checkAndGetSessionAndWindowId(!0).sessionStartTimestamp;return n?n.timestamp-i:null}},{key:"isRecordingEnabled",get:function(){var e=!!this.instance.get_property(we),t=!this.instance.config.disable_session_recording;return e&&t}},{key:"isConsoleLogCaptureEnabled",get:function(){var e=!!this.instance.get_property(ke),t=this.instance.config.enable_recording_console_log;return null!=t?t:e}},{key:"recordingVersion",get:function(){var e,t=this.instance.get_property(Se);return(null===(e=this.instance.config.session_recording)||void 0===e?void 0:e.recorderVersion)||t||"v1"}},{key:"status",get:function(){return this.receivedDecide?this.isRecordingEnabled?O(this._linkedFlag)&&!this._linkedFlagSeen?"buffering":$(this.isSampled)?this.isSampled?"sampled":"disabled":"active":"disabled":"buffering"}},{key:"startRecordingIfEnabled",value:function(){this.isRecordingEnabled?this.startCaptureAndTrySendingQueuedSnapshots():(this.stopRecording(),this.clearBuffer())}},{key:"stopRecording",value:function(){this._captureStarted&&this.stopRrweb&&(this.stopRrweb(),this.stopRrweb=void 0,this._captureStarted=!1)}},{key:"makeSamplingDecision",value:function(e){var t,n=this.sessionId!==e;if(T(this._sampleRate)){var i,r=this.isSampled;(i=n||!$(r)?Math.random()1&&void 0!==arguments[1]?arguments[1]:"log";null===(t=this.instance.sessionRecording)||void 0===t||t.onRRwebEmit({type:6,data:{plugin:"rrweb/console@1",payload:{level:n,trace:[],payload:[JSON.stringify(e)]}},timestamp:Y()})}},{key:"startCaptureAndTrySendingQueuedSnapshots",value:function(){this._startCapture()}},{key:"_startCapture",value:function(){var e=this;if(!x(Object.assign)&&!this._captureStarted&&!this.instance.config.disable_session_recording){this._captureStarted=!0,this.sessionManager.checkAndGetSessionAndWindowId();var t="v2"===this.recordingVersion?"recorder-v2.js":"recorder.js";this.instance.__loaded_recorder_version!==this.recordingVersion?ie(this.instance.config.api_host+"/static/".concat(t,"?v=").concat(y.LIB_VERSION),(function(n){if(n)return j.error("Could not load ".concat(t),n);e._onScriptLoaded()})):this._onScriptLoaded()}}},{key:"_isInteractiveEvent",value:function(e){var t;return 3===e.type&&-1!==lt.indexOf(null===(t=e.data)||void 0===t?void 0:t.source)}},{key:"_updateWindowAndSessionIds",value:function(e){var t=this._isInteractiveEvent(e);if(t||this.isIdle||e.timestamp-this._lastActivityTimestamp>3e5&&(this.isIdle=!0),t&&(this._lastActivityTimestamp=e.timestamp,this.isIdle&&(this.isIdle=!1,this._tryTakeFullSnapshot())),!this.isIdle){var n=this.sessionManager.checkAndGetSessionAndWindowId(!t,e.timestamp),i=n.windowId,r=n.sessionId,o=this.sessionId!==r,s=this.windowId!==i;-1===[st,at].indexOf(e.type)&&(s||o)&&this._tryTakeFullSnapshot(),this.windowId=i,this.sessionId=r}}},{key:"_tryTakeFullSnapshot",value:function(){if(!this._captureStarted)return!1;try{var e;return null===(e=this.rrwebRecord)||void 0===e||e.takeFullSnapshot(),!0}catch(e){return j.error("Error taking full snapshot.",e),!1}}},{key:"_onScriptLoaded",value:function(){var e,t=this,n={blockClass:"ph-no-capture",blockSelector:void 0,ignoreClass:"ph-ignore-input",maskTextClass:"ph-mask",maskTextSelector:void 0,maskTextFn:void 0,maskAllInputs:!0,maskInputOptions:{},maskInputFn:void 0,slimDOMOptions:{},collectFonts:!1,inlineStylesheet:!0,recordCrossOriginIframes:!1};this.rrwebRecord=window.rrweb?window.rrweb.record:window.rrwebRecord;for(var i=this.instance.config.session_recording,o=0,s=Object.entries(i||{});o10&&(t.data.payload.payload=t.data.payload.payload.slice(0,10),t.data.payload.payload.push("...[truncated]"));for(var n=[],i=0;i2e3?n.push(t.data.payload.payload[i].slice(0,2e3)+"...[truncated]"):n.push(t.data.payload.payload[i]);return t.data.payload.payload=n,e}return e}(n),r=JSON.stringify(i).length;if(this._updateWindowAndSessionIds(i),!this.isIdle){var o={$snapshot_bytes:r,$snapshot_data:i,$session_id:this.sessionId,$window_id:this.windowId};"disabled"!==this.status?this._captureSnapshotBuffered(o):this.clearBuffer()}}}}},{key:"_maskUrl",value:function(e){var t=this.instance.config.session_recording;if(t.maskNetworkRequestFn){var n,i={url:e};return null===(n=i=t.maskNetworkRequestFn(i))||void 0===n?void 0:n.url}return e}},{key:"clearBuffer",value:function(){return this.buffer=void 0,{size:0,data:[],sessionId:this.sessionId,windowId:this.windowId}}},{key:"_flushBuffer",value:function(){var e=this;this.flushBufferTimer&&(clearTimeout(this.flushBufferTimer),this.flushBufferTimer=void 0);var t=this._minimumDuration,n=this.sessionDuration,i=T(t)&&T(n)&&n943718.4||this.buffer.sessionId&&this.buffer.sessionId!==this.sessionId)&&(this.buffer=this._flushBuffer()),R(this.buffer.sessionId)&&!R(this.sessionId)&&(this.buffer.sessionId=this.sessionId,this.buffer.windowId=this.windowId),this.buffer.size+=e.$snapshot_bytes,this.buffer.data.push(e.$snapshot_data),this.flushBufferTimer||(this.flushBufferTimer=setTimeout((function(){n._flushBuffer()}),2e3))}},{key:"_captureSnapshot",value:function(e){this.instance.capture("$snapshot",e,{transport:"XHR",method:"POST",endpoint:this._endpoint,_noTruncate:!0,_batchKey:"recordings",_metrics:{rrweb_full_snapshot:e.$snapshot_data.type===st}})}}]),e}(),ft={entryType:0,timeOrigin:1,name:2,startTime:3,redirectStart:4,redirectEnd:5,workerStart:6,fetchStart:7,domainLookupStart:8,domainLookupEnd:9,connectStart:10,secureConnectionStart:11,connectEnd:12,requestStart:13,responseStart:14,responseEnd:15,decodedBodySize:16,encodedBodySize:17,initiatorType:18,nextHopProtocol:19,renderBlockingStatus:20,responseStatus:21,transferSize:22,element:23,renderTime:24,loadTime:25,size:26,id:27,url:28,domComplete:29,domContentLoadedEvent:30,domInteractive:31,loadEventEnd:32,loadEventStart:33,redirectCount:34,navigationType:35,unloadEventEnd:36,unloadEventStart:37,duration:39,timestamp:40},ht=["first-input","navigation","paint","resource"],pt=["/s/","/e/"],_t=function(){function e(t){s(this,e),c(this,"_forceAllowLocalhost",!1),this.instance=t}return u(e,[{key:"startObservingIfEnabled",value:function(){this.isEnabled()?this.startObserving():this.stopObserving()}},{key:"startObserving",value:function(){var e,t,n=this;if(!this.observer)if(x(null===(e=window)||void 0===e||null===(t=e.PerformanceObserver)||void 0===t?void 0:t.supportedEntryTypes))j.info("[PerformanceObserver] not started because PerformanceObserver is not supported by this browser.");else if(!et.includes(location.hostname)||this._forceAllowLocalhost)try{this.observer=new PerformanceObserver((function(e){e.getEntries().forEach((function(e){n._capturePerformanceEvent(e)}))}));var i=PerformanceObserver.supportedEntryTypes.filter((function(e){return ht.includes(e)}));i.forEach((function(e){var t;null===(t=n.observer)||void 0===t||t.observe({type:e,buffered:!0})}))}catch(e){j.error("PostHog failed to start performance observer",e),this.stopObserving()}else j.info("[PerformanceObserver] not started because we are on localhost.")}},{key:"stopObserving",value:function(){this.observer&&(this.observer.disconnect(),this.observer=void 0)}},{key:"isObserving",value:function(){return!!this.observer}},{key:"isEnabled",value:function(){var e,t;return null!==(e=null!==(t=this.instance.config.capture_performance)&&void 0!==t?t:this.remoteEnabled)&&void 0!==e&&e}},{key:"afterDecideResponse",value:function(e){this.remoteEnabled=e.capturePerformance||!1,this.isEnabled()&&this.startObserving()}},{key:"_capturePerformanceEvent",value:function(e){if(0===e.name.indexOf(this.instance.config.api_host)){var t=e.name.replace(this.instance.config.api_host,"");if(pt.find((function(e){return 0===t.indexOf(e)})))return}var n={url:e.name},i=this.instance.config.session_recording;if(i.maskNetworkRequestFn&&(n=i.maskNetworkRequestFn(n)),n){var r=e.toJSON();r.name=n.url;var o={},s=Math.floor(Date.now()-performance.now());for(var a in o[ft.timeOrigin]=s,o[ft.timestamp]=Math.floor(s+e.startTime),ft)x(r[a])||(o[ft[a]]=r[a]);if(this.capturePerformanceEvent(o),gt(e)){var u,l=m(e.serverTiming||[]);try{for(l.s();!(u=l.n()).done;){var d,f=u.value;this.capturePerformanceEvent((c(d={},ft.timeOrigin,s),c(d,ft.timestamp,Math.floor(s+e.startTime)),c(d,ft.name,f.name),c(d,ft.duration,f.duration),c(d,ft.entryType,"serverTiming"),d))}}catch(e){l.e(e)}finally{l.f()}}}}},{key:"capturePerformanceEvent",value:function(e){var t,n=e[ft.timestamp];null===(t=this.instance.sessionRecording)||void 0===t||t.onRRwebEmit({type:6,data:{plugin:"posthog/network@1",payload:e},timestamp:n})}}]),e}(),gt=function(e){return"navigation"===e.entryType||"resource"===e.entryType},vt=function(){function e(t){s(this,e),this.instance=t,this.instance.decideEndpointWasHit=this.instance._hasBootstrappedFeatureFlags()}return u(e,[{key:"call",value:function(){var e=this,t=Z(JSON.stringify({token:this.instance.config.token,distinct_id:this.instance.get_distinct_id(),groups:this.instance.getGroups(),person_properties:this.instance.get_property(xe),group_properties:this.instance.get_property(Oe),disable_flags:this.instance.config.advanced_disable_feature_flags||this.instance.config.advanced_disable_feature_flags_on_first_load||void 0}));this.instance._send_request("".concat(this.instance.config.api_host,"/decide/?v=3"),{data:t,verbose:!0},{method:"POST"},(function(t){return e.parseDecideResponse(t)}))}},{key:"parseDecideResponse",value:function(e){var t,n,i,r,o=this;if(this.instance.featureFlags.setReloadingPaused(!1),this.instance.featureFlags._startReloadTimer(),0!==(null==e?void 0:e.status)){if(!document||!document.body)return j.info("document not ready yet, trying again in 500 milliseconds..."),void setTimeout((function(){o.parseDecideResponse(e)}),500);this.instance.toolbar.afterDecideResponse(e),null===(t=this.instance.sessionRecording)||void 0===t||t.afterDecideResponse(e),Le.afterDecideResponse(e,this.instance),null===(n=this.instance.webPerformance)||void 0===n||n.afterDecideResponse(e),this.instance._afterDecideResponse(e),this.instance.config.advanced_disable_feature_flags_on_first_load||this.instance.featureFlags.receivedFeatureFlags(e);var s=null===(i=window)||void 0===i?void 0:i.extendPostHogWithSurveys;e.surveys&&!s&&ie(this.instance.config.api_host+"/static/surveys.js",(function(e){if(e)return j.error("Could not load surveys script",e);window.extendPostHogWithSurveys(o.instance)}));var a=null===(r=window)||void 0===r?void 0:r.extendPostHogWithExceptionAutoCapture;if(e.autocaptureExceptions&&e.autocaptureExceptions&&x(a)&&ie(this.instance.config.api_host+"/static/exception-autocapture.js",(function(t){if(t)return j.error("Could not load exception autocapture script",t);window.extendPostHogWithExceptionAutocapture(o.instance,e)})),e.siteApps)if(this.instance.config.opt_in_site_apps){var u,c=this.instance.config.api_host,l=m(e.siteApps);try{var d=function(){var e=u.value,t=e.id,n=e.url,i=[c,"/"===c[c.length-1]&&"/"===n[0]?n.substring(1):n].join("");window["__$$ph_site_app_".concat(t)]=o.instance,ie(i,(function(e){e&&j.error("Error while initializing PostHog app with config id ".concat(t),e)}))};for(l.s();!(u=l.n()).done;)d()}catch(e){l.e(e)}finally{l.f()}}else e.siteApps.length>0&&j.error('PostHog site apps are disabled. Enable the "opt_in_site_apps" config to proceed.')}else j.error("Failed to fetch feature flags from PostHog.")}}]),e}(),mt=["https://app.posthog.com","https://eu.posthog.com"],yt=["source"],bt=D.location?nt(D.location.hash,"__posthog")||nt(location.hash,"state"):null,wt=function(){function e(t){s(this,e),this.instance=t}return u(e,[{key:"afterDecideResponse",value:function(e){var t=e.toolbarParams||e.editorParams||(e.toolbarVersion?{toolbarVersion:e.toolbarVersion}:{});e.isAuthenticated&&t.toolbarVersion&&0===t.toolbarVersion.indexOf("toolbar")&&this.loadToolbar(r({},t))}},{key:"maybeLoadToolbar",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:D.location,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:void 0,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:D.history;try{if(!t){try{D.localStorage.setItem("test","test"),D.localStorage.removeItem("test")}catch(e){return!1}t=D.localStorage}var i,r=bt||nt(e.hash,"__posthog")||nt(e.hash,"state"),o=r?Q((function(){return JSON.parse(atob(decodeURIComponent(r)))}))||Q((function(){return JSON.parse(decodeURIComponent(r))})):null;return o&&"ph_authorize"===o.action?((i=o).source="url",i&&Object.keys(i).length>0&&(o.desiredHash?e.hash=o.desiredHash:n?n.replaceState("",document.title,e.pathname+e.search):e.hash="")):((i=JSON.parse(t.getItem("_postHogToolbarParams")||"{}")).source="localstorage",delete i.userIntent),!(!i.token||this.instance.config.token!==i.token)&&(this.loadToolbar(i),!0)}catch(e){return!1}}},{key:"loadToolbar",value:function(e){var t=this;if(D._postHogToolbarLoaded)return!1;D._postHogToolbarLoaded=!0;var n=this.instance.config.api_host,i=3e5,o=Math.floor(Date.now()/i)*i,s="".concat(n).concat(n.endsWith("/")?"":"/","static/toolbar.js?t=").concat(o),a=!mt.includes(this.instance.config.api_host)&&this.instance.config.advanced_disable_toolbar_metrics,u=r(r({token:this.instance.config.token},e),{},{apiURL:n},a?{instrument:!1}:{});u.source;var c=function(e,t){if(null==e)return{};var n,i,r=function(e,t){if(null==e)return{};var n,i,r={},o=Object.keys(e);for(i=0;i=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}(u,yt);return D.localStorage.setItem("_postHogToolbarParams",JSON.stringify(c)),ie(s,(function(e){e?j.error("Failed to load toolbar",e):(D.ph_load_toolbar||D.ph_load_editor)(u,t.instance)})),ne(D,"turbolinks:load",(function(){D._postHogToolbarLoaded=!1,t.loadToolbar(u)})),!0}},{key:"_loadEditor",value:function(e){return this.loadToolbar(e)}},{key:"maybeLoadEditor",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:D.location,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:void 0,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:D.history;return this.maybeLoadToolbar(e,t,n)}}]),e}(),kt="__ph_opt_in_out_";function St(e,t){Tt(!0,e,t)}function Ft(e,t){Tt(!1,e,t)}function Pt(e,t){return"1"===Rt(e,t)}function It(e,t){return!!function(e){if(e&&e.respectDnt){var t=e&&e.window||D,n=t.navigator||{},i=!1;return W([n.doNotTrack,n.msDoNotTrack,t.doNotTrack],(function(e){z([!0,1,"1","yes"],e)&&(i=!0)})),i}return!1}(t)||"0"===Rt(e,t)}function Et(e,t){xt(t=t||{}).remove(Ot(e,t),!!t.crossSubdomainCookie)}function xt(e){return"localStorage"===(e=e||{}).persistenceType?ze:"localStorage+cookie"===e.persistenceType?Qe:We}function Ot(e,t){return((t=t||{}).persistencePrefix||kt)+e}function Rt(e,t){return xt(t).get(Ot(e,t))}function Tt(e,t,n){O(t)&&t.length?(xt(n=n||{}).set(Ot(t,n),e?1:0,T(n.cookieExpiration)?n.cookieExpiration:null,n.crossSubdomainCookie,n.secureCookie),n.capture&&e&&n.capture(n.captureEventName||"$opt_in",n.captureProperties||{},{send_instantly:!0})):j.error("gdpr."+(e?"optIn":"optOut")+" called with an invalid token")}var $t=function(){function e(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:3e3;s(this,e),this.isPolling=!0,this._event_queue=[],this._empty_queue_count=0,this._poller=void 0,this._pollInterval=t}return u(e,[{key:"setPollInterval",value:function(e){this._pollInterval=e,this.isPolling&&this.poll()}},{key:"poll",value:function(){}},{key:"unload",value:function(){}},{key:"getTime",value:function(){return(new Date).getTime()}}]),e}(),Ct=function(e){l(n,$t);var t=h(n);function n(e){var i,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:3e3;return s(this,n),(i=t.call(this,r)).handlePollRequest=e,i}return u(n,[{key:"enqueue",value:function(e,t,n){this._event_queue.push({url:e,data:t,options:n}),this.isPolling||(this.isPolling=!0,this.poll())}},{key:"poll",value:function(){var e=this;clearTimeout(this._poller),this._poller=setTimeout((function(){if(e._event_queue.length>0){var t=e.formatQueue(),n=function(n){var i=t[n],r=i.url,o=i.data,s=i.options;W(o,(function(t,n){o[n].offset=Math.abs(o[n].timestamp-e.getTime()),delete o[n].timestamp})),e.handlePollRequest(r,o,s)};for(var i in t)n(i);e._event_queue.length=0,e._empty_queue_count=0}else e._empty_queue_count++;e._empty_queue_count>4&&(e.isPolling=!1,e._empty_queue_count=0),e.isPolling&&e.poll()}),this._pollInterval)}},{key:"unload",value:function(){var e=this;clearTimeout(this._poller);var t=this._event_queue.length>0?this.formatQueue():{};this._event_queue.length=0;var n=Object.values(t);[].concat(_(n.filter((function(e){return 0===e.url.indexOf("/e")}))),_(n.filter((function(e){return 0!==e.url.indexOf("/e")})))).map((function(t){var n=t.url,i=t.data,o=t.options;e.handlePollRequest(n,i,r(r({},o),{},{transport:"sendBeacon"}))}))}},{key:"formatQueue",value:function(){var e={};return W(this._event_queue,(function(t){var n=t.url,i=t.data,r=t.options,o=(r?r._batchKey:null)||n;x(e[o])&&(e[o]={data:[],url:n,options:r}),r&&e[o].options&&e[o].options._metrics&&!e[o].options._metrics.rrweb_full_snapshot&&(e[o].options._metrics.rrweb_full_snapshot=r._metrics.rrweb_full_snapshot),e[o].data.push(i)})),e}}]),n}(),Mt=Uint8Array,At=Uint16Array,Dt=Uint32Array,Lt=new Mt([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,0]),Nt=new Mt([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0]),Bt=new Mt([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]),qt=function(e,t){for(var n=new At(31),i=0;i<31;++i)n[i]=t+=1<>>1|(21845&Gt)<<1;zt=(61680&(zt=(52428&zt)>>>2|(13107&zt)<<2))>>>4|(3855&zt)<<4,Wt[Gt]=((65280&zt)>>>8|(255&zt)<<8)>>>1}var Yt=function(e,t,n){for(var i=e.length,r=0,o=new At(t);r>>u]=c}else for(s=new At(i),r=0;r>>15-e[r];return s},Qt=new Mt(288);for(Gt=0;Gt<144;++Gt)Qt[Gt]=8;for(Gt=144;Gt<256;++Gt)Qt[Gt]=9;for(Gt=256;Gt<280;++Gt)Qt[Gt]=7;for(Gt=280;Gt<288;++Gt)Qt[Gt]=8;var Jt=new Mt(32);for(Gt=0;Gt<32;++Gt)Jt[Gt]=5;var Xt,Kt=Yt(Qt,9,0),Zt=Yt(Jt,5,0),en=function(e){return(e/8>>0)+(7&e&&1)},tn=function(e,t,n){(null==t||t<0)&&(t=0),(null==n||n>e.length)&&(n=e.length);var i=new(e instanceof At?At:e instanceof Dt?Dt:Mt)(n-t);return i.set(e.subarray(t,n)),i},nn=function(e,t,n){n<<=7&t;var i=t/8>>0;e[i]|=n,e[i+1]|=n>>>8},rn=function(e,t,n){n<<=7&t;var i=t/8>>0;e[i]|=n,e[i+1]|=n>>>8,e[i+2]|=n>>>16},on=function(e,t){for(var n=[],i=0;if&&(f=o[i].s);var h=new At(f+1),p=sn(n[l-1],h,0);if(p>t){i=0;var _=0,g=p-t,v=1<t))break;_+=v-(1<>>=g;_>0;){var y=o[i].s;h[y]=0&&_;--i){var b=o[i].s;h[b]==t&&(--h[b],++_)}p=t}return[new Mt(h),p]},sn=function e(t,n,i){return-1==t.s?Math.max(e(t.l,n,i+1),e(t.r,n,i+1)):n[t.s]=i},an=function(e){for(var t=e.length;t&&!e[--t];);for(var n=new At(++t),i=0,r=e[0],o=1,s=function(e){n[i++]=e},a=1;a<=t;++a)if(e[a]==r&&a!=t)++o;else{if(!r&&o>2){for(;o>138;o-=138)s(32754);o>2&&(s(o>10?o-11<<5|28690:o-3<<5|12305),o=0)}else if(o>3){for(s(r),--o;o>6;o-=6)s(8304);o>2&&(s(o-3<<5|8208),o=0)}for(;o--;)s(r);o=1,r=e[a]}return[n.subarray(0,i),t]},un=function(e,t){for(var n=0,i=0;i>>8,e[r+2]=255^e[r],e[r+3]=255^e[r+1];for(var o=0;o4&&!I[Bt[x-1]];--x);var O,R,T,$,C=c+5<<3,M=un(r,Qt)+un(o,Jt)+s,A=un(r,f)+un(o,_)+s+14+3*x+un(S,I)+(2*S[16]+3*S[17]+7*S[18]);if(C<=M&&C<=A)return cn(t,l,e.subarray(u,u+c));if(nn(t,l,1+(A15&&(nn(t,l,B[F]>>>5&127),l+=B[F]>>>12)}}}else O=Kt,R=Qt,T=Zt,$=Jt;for(F=0;F255){q=i[F]>>>18&31,rn(t,l,O[q+257]),l+=R[q+257],q>7&&(nn(t,l,i[F]>>>23&31),l+=Lt[q]);var j=31&i[F];rn(t,l,T[j]),l+=$[j],j>3&&(rn(t,l,i[F]>>>5&8191),l+=Nt[j])}else rn(t,l,O[i[F]]),l+=R[i[F]];return rn(t,l,O[256]),l+R[256]},dn=new Dt([65540,131080,131088,131104,262176,1048704,1048832,2114560,2117632]),fn=new Mt(0),hn=function(){for(var e=new Dt(256),t=0;t<256;++t){for(var n=t,i=9;--i;)n=(1&n&&3988292384)^n>>>1;e[t]=n}return e}(),pn=function(e,t,n){for(;n;++t)e[t]=n,n>>>=8};function _n(e,t){void 0===t&&(t={});var n=function(){var e=4294967295;return{p:function(t){for(var n=e,i=0;i>>8;e=n},d:function(){return 4294967295^e}}}(),i=e.length;n.p(e);var r=function(e,t,n,i,r){return function(e,t,n,i,r,o){var s=e.length,a=new Mt(i+s+5*(1+Math.floor(s/7e3))+r),u=a.subarray(i,a.length-r),c=0;if(!t||s<8)for(var l=0;l<=s;l+=65535){var d=l+65535;d>>13,p=8191&f,_=(1<7e3||I>24576)&&$>423){c=ln(e,u,0,w,k,S,P,I,x,l-x,c),I=F=P=0,x=l;for(var C=0;C<286;++C)k[C]=0;for(C=0;C<30;++C)S[C]=0}var M=2,A=0,D=p,L=R-T&32767;if($>2&&O==b(l-L))for(var N=Math.min(h,$)-1,B=Math.min(32767,l),q=Math.min(258,$);L<=B&&--D&&R!=T;){if(e[l+M]==e[l+M-L]){for(var j=0;jM){if(M=j,A=L,j>N)break;var H=Math.min(L,j-2),V=0;for(C=0;CV&&(V=W,T=U)}}}L+=(R=T)-(T=g[R])+32768&32767}if(A){w[I++]=268435456|Vt[M]<<18|Ut[A];var G=31&Vt[M],z=31&Ut[A];P+=Lt[G]+Nt[z],++k[257+G],++S[z],E=l+M,++F}else w[I++]=e[l],++k[e[l]]}}c=ln(e,u,o,w,k,S,P,I,x,l-x,c),o||(c=cn(u,c,fn))}return tn(a,0,i+en(c)+r)}(e,null==t.level?6:t.level,null==t.mem?Math.ceil(1.5*Math.max(8,Math.min(13,Math.log(e.length)))):12+t.mem,n,i,!r)}(e,t,function(e){return 10+(e.filename&&e.filename.length+1||0)}(t),8),o=r.length;return function(e,t){var n=t.filename;if(e[0]=31,e[1]=139,e[2]=8,e[8]=t.level<2?4:9==t.level?2:0,e[9]=3,0!=t.mtime&&pn(e,4,Math.floor(new Date(t.mtime||Date.now())/1e3)),n){e[3]=8;for(var i=0;i<=n.length;++i)e[i+10]=n.charCodeAt(i)}}(r,t),pn(r,o-8,n.d()),pn(r,o-4,i),r}function gn(e,t){var n=e.length;if(!t&&"undefined"!=typeof TextEncoder)return(new TextEncoder).encode(e);for(var i=new Mt(e.length+(e.length>>>1)),r=0,o=function(e){i[r++]=e},s=0;si.length){var a=new Mt(r+8+(n-s<<1));a.set(i),i=a}var u=e.charCodeAt(s);u<128||t?o(u):u<2048?(o(192|u>>>6),o(128|63&u)):u>55295&&u<57344?(o(240|(u=65536+(1047552&u)|1023&e.charCodeAt(++s))>>>18),o(128|u>>>12&63),o(128|u>>>6&63),o(128|63&u)):(o(224|u>>>12),o(128|u>>>6&63),o(128|63&u))}return tn(i,0,r)}!function(e){e.GZipJS="gzip-js",e.Base64="base64"}(Xt||(Xt={}));var vn=function(e,t,n){var i=t||{};i.ip=n.ip?1:0,i._=(new Date).getTime().toString(),i.ver=y.LIB_VERSION;var r=e.split("?");if(r.length>1){var o,s=m(r[1].split("&"));try{for(s.s();!(o=s.n()).done;){var a=o.value.split("=")[0];x(i[a])||delete i[a]}}catch(e){s.e(e)}finally{s.f()}}var u=e.indexOf("?")>-1?"&":"?";return e+u+function(e){var t,n,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"&",r=[];return W(e,(function(e,i){x(e)||x(i)||"undefined"===i||(t=encodeURIComponent(e.toString()),n=encodeURIComponent(i),r[r.length]=n+"="+t)})),r.join(i)}(i)},mn=function e(t,n){if(n.blob&&t.buffer)return new Blob([P(t)?t:t.buffer],{type:"text/plain"});if(n.sendBeacon||n.blob){var i=e(t,{method:"POST"});return new Blob([i],{type:"application/x-www-form-urlencoded"})}return"POST"!==n.method?null:(r=F(t)||P(t)?"data="+encodeURIComponent(t):"data="+encodeURIComponent(t.data),"compression"in t&&t.compression&&(r+="&compression="+t.compression),r);var r},yn=function(e){var t=e.url,n=e.data,i=e.headers,r=e.options,o=e.callback,s=e.retriesPerformedSoFar,a=e.retryQueue,u=e.onXHRError,c=e.timeout,l=void 0===c?6e4:c,d=e.onResponse;T(s)&&s>0&&(t=vn(t,{retry_count:s},{}));var f=new XMLHttpRequest;f.open(r.method||"GET",t,!0);var h=mn(n,r);W(i,(function(e,t){f.setRequestHeader(t,e)})),"POST"!==r.method||r.blob||f.setRequestHeader("Content-Type","application/x-www-form-urlencoded"),f.timeout=l,f.withCredentials=!0,f.onreadystatechange=function(){if(4===f.readyState)if(null==d||d(f),200===f.status){if(o){var e;try{e=JSON.parse(f.responseText)}catch(e){return void j.error(e)}o(e)}}else I(u)&&u(f),(f.status<400||f.status>500)&&a.enqueue({url:t,data:n,options:r,headers:i,retriesPerformedSoFar:(s||0)+1,callback:o}),null==o||o({status:0})},f.send(h)},bn=function(e){l(n,$t);var t=h(n);function n(e,i){var r;return s(this,n),(r=t.call(this)).isPolling=!1,r.queue=[],r.areWeOnline=!0,r.onXHRError=e,r.rateLimiter=i,!x(window)&&"onLine"in window.navigator&&(r.areWeOnline=window.navigator.onLine,window.addEventListener("online",(function(){r._handleWeAreNowOnline()})),window.addEventListener("offline",(function(){r.areWeOnline=!1}))),r}return u(n,[{key:"enqueue",value:function(e){var t=e.retriesPerformedSoFar||0;if(!(t>=10)){var n=function(e){var t=3e3*Math.pow(2,e),n=t/2,i=Math.min(18e5,t),r=(Math.random()-.5)*(i-n);return Math.ceil(i+r)}(t),i=new Date(Date.now()+n);this.queue.push({retryAt:i,requestData:e});var r="Enqueued failed request for retry in ".concat(n);navigator.onLine||(r+=" (Browser is offline)"),j.warn(r),this.isPolling||(this.isPolling=!0,this.poll())}}},{key:"poll",value:function(){var e=this;this._poller&&clearTimeout(this._poller),this._poller=setTimeout((function(){e.areWeOnline&&e.queue.length>0&&e.flush(),e.poll()}),this._pollInterval)}},{key:"flush",value:function(){var e=new Date(Date.now()),t=this.queue.filter((function(t){return t.retryAt0){this.queue=this.queue.filter((function(t){return t.retryAt>=e}));var n,i=m(t);try{for(i.s();!(n=i.n()).done;){var r=n.value.requestData;this._executeXhrRequest(r)}}catch(e){i.e(e)}finally{i.f()}}}},{key:"unload",value:function(){this._poller&&(clearTimeout(this._poller),this._poller=void 0);var e,t=m(this.queue);try{for(t.s();!(e=t.n()).done;){var n=e.value.requestData,i=n.url,o=n.data,s=n.options;if(this.rateLimiter.isRateLimited(s._batchKey))j.warn("[RetryQueue] is quota limited. Dropping request.");else try{window.navigator.sendBeacon(i,mn(o,r(r({},s),{},{sendBeacon:!0})))}catch(e){j.error(e)}}}catch(e){t.e(e)}finally{t.f()}this.queue=[]}},{key:"_executeXhrRequest",value:function(e){var t=e.url,n=e.data,i=e.options,r=e.headers,o=e.callback,s=e.retriesPerformedSoFar;this.rateLimiter.isRateLimited(i._batchKey)||yn({url:t,data:n||{},options:i||{},headers:r||{},retriesPerformedSoFar:s||0,callback:o,retryQueue:this,onXHRError:this.onXHRError,onResponse:this.rateLimiter.checkForLimiting})}},{key:"_handleWeAreNowOnline",value:function(){this.areWeOnline=!0,this.flush()}}]),n}();Math.trunc||(Math.trunc=function(e){return e<0?Math.ceil(e):Math.floor(e)}),Number.isInteger||(Number.isInteger=function(e){return T(e)&&isFinite(e)&&Math.floor(e)===e});var wn="0123456789abcdef",kn=function(){function e(t){if(s(this,e),this.bytes=t,16!==t.length)throw new TypeError("not 128-bit length")}return u(e,[{key:"toString",value:function(){for(var e="",t=0;t>>4)+wn.charAt(15&this.bytes[t]),3!==t&&5!==t&&7!==t&&9!==t||(e+="-");if(36!==e.length)throw new Error("Invalid UUIDv7 was generated");return e}},{key:"clone",value:function(){return new e(this.bytes.slice(0))}},{key:"equals",value:function(e){return 0===this.compareTo(e)}},{key:"compareTo",value:function(e){for(var t=0;t<16;t++){var n=this.bytes[t]-e.bytes[t];if(0!==n)return Math.sign(n)}return 0}}],[{key:"fromFieldsV7",value:function(t,n,i,r){if(!Number.isInteger(t)||!Number.isInteger(n)||!Number.isInteger(i)||!Number.isInteger(r)||t<0||n<0||i<0||r<0||t>0xffffffffffff||n>4095||i>1073741823||r>4294967295)throw new RangeError("invalid field value");var o=new Uint8Array(16);return o[0]=t/Math.pow(2,40),o[1]=t/Math.pow(2,32),o[2]=t/Math.pow(2,24),o[3]=t/Math.pow(2,16),o[4]=t/Math.pow(2,8),o[5]=t,o[6]=112|n>>>8,o[7]=n,o[8]=128|i>>>24,o[9]=i>>>16,o[10]=i>>>8,o[11]=i,o[12]=r>>>24,o[13]=r>>>16,o[14]=r>>>8,o[15]=r,new e(o)}}]),e}(),Sn=function(){function e(){s(this,e),c(this,"timestamp",0),c(this,"counter",0),c(this,"random",new In)}return u(e,[{key:"generate",value:function(){var e=this.generateOrAbort();if(x(e)){this.timestamp=0;var t=this.generateOrAbort();if(x(t))throw new Error("Could not generate UUID after timestamp reset");return t}return e}},{key:"generateOrAbort",value:function(){var e=Date.now();if(e>this.timestamp)this.timestamp=e,this.resetCounter();else{if(!(e+1e4>this.timestamp))return;this.counter++,this.counter>4398046511103&&(this.timestamp++,this.resetCounter())}return kn.fromFieldsV7(this.timestamp,Math.trunc(this.counter/Math.pow(2,30)),this.counter&Math.pow(2,30)-1,this.random.nextUint32())}},{key:"resetCounter",value:function(){this.counter=1024*this.random.nextUint32()+(1023&this.random.nextUint32())}}]),e}(),Fn=function(e){if("undefined"!=typeof UUIDV7_DENY_WEAK_RNG&&UUIDV7_DENY_WEAK_RNG)throw new Error("no cryptographically strong RNG available");for(var t=0;t=this.buffer.length&&(Fn(this.buffer),this.cursor=0),this.buffer[this.cursor++]}}]),e}(),En=function(){return xn().toString()},xn=function(){return(Pn||(Pn=new Sn)).generate()},On=1800,Rn=function(){function e(t,n,i,r){s(this,e),c(this,"_sessionIdChangedHandlers",[]),this.config=t,this.persistence=n,this._windowId=void 0,this._sessionId=void 0,this._sessionStartTimestamp=null,this._sessionActivityTimestamp=null,this._sessionIdGenerator=i||En,this._windowIdGenerator=r||En;var o=t.persistence_name||t.token,a=t.session_idle_timeout_seconds||On;if(T(a)?a>On?j.warn("session_idle_timeout_seconds cannot be greater than 30 minutes. Using 30 minutes instead."):a<60&&j.warn("session_idle_timeout_seconds cannot be less than 60 seconds. Using 60 seconds instead."):(j.warn("session_idle_timeout_seconds must be a number. Defaulting to 30 minutes."),a=On),this._sessionTimeoutMs=1e3*Math.min(Math.max(a,60),On),this._window_id_storage_key="ph_"+o+"_window_id",this._primary_window_exists_storage_key="ph_"+o+"_primary_window_exists",this._canUseSessionStorage()){var u=Ze.parse(this._window_id_storage_key),l=Ze.parse(this._primary_window_exists_storage_key);u&&!l?this._windowId=u:Ze.remove(this._window_id_storage_key),Ze.set(this._primary_window_exists_storage_key,!0)}this._listenToReloadWindow()}return u(e,[{key:"onSessionId",value:function(e){var t=this;return x(this._sessionIdChangedHandlers)&&(this._sessionIdChangedHandlers=[]),this._sessionIdChangedHandlers.push(e),this._sessionId&&e(this._sessionId,this._windowId),function(){t._sessionIdChangedHandlers=t._sessionIdChangedHandlers.filter((function(t){return t!==e}))}}},{key:"_canUseSessionStorage",value:function(){return"memory"!==this.config.persistence&&!this.persistence.disabled&&Ze.is_supported()}},{key:"_setWindowId",value:function(e){e!==this._windowId&&(this._windowId=e,this._canUseSessionStorage()&&Ze.set(this._window_id_storage_key,e))}},{key:"_getWindowId",value:function(){return this._windowId?this._windowId:this._canUseSessionStorage()?Ze.parse(this._window_id_storage_key):null}},{key:"_setSessionId",value:function(e,t,n){e===this._sessionId&&t===this._sessionActivityTimestamp&&n===this._sessionStartTimestamp||(this._sessionStartTimestamp=n,this._sessionActivityTimestamp=t,this._sessionId=e,this.persistence.register(c({},Fe,[t,e,n])))}},{key:"_getSessionId",value:function(){if(this._sessionId&&this._sessionActivityTimestamp&&this._sessionStartTimestamp)return[this._sessionActivityTimestamp,this._sessionId,this._sessionStartTimestamp];var e=this.persistence.props[Fe];return F(e)&&2===e.length&&e.push(e[0]),e||[0,null,0]}},{key:"resetSessionId",value:function(){this._setSessionId(null,null,null)}},{key:"_listenToReloadWindow",value:function(){var e=this;D.addEventListener("beforeunload",(function(){e._canUseSessionStorage()&&Ze.remove(e._primary_window_exists_storage_key)}))}},{key:"checkAndGetSessionAndWindowId",value:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:null)||(new Date).getTime(),n=p(this._getSessionId(),3),i=n[0],r=n[1],o=n[2],s=this._getWindowId(),a=o&&o>0&&Math.abs(t-o)>864e5,u=!1,c=!r,l=!e&&Math.abs(t-i)>this._sessionTimeoutMs;c||l||a?(r=this._sessionIdGenerator(),s=this._windowIdGenerator(),o=t,u=!0):s||(s=this._windowIdGenerator(),u=!0);var d=0===i||!e||a?t:i,f=0===o?(new Date).getTime():o;return this._setWindowId(s),this._setSessionId(r,d,f),u&&this._sessionIdChangedHandlers.forEach((function(e){return e(r,s)})),{sessionId:r,windowId:s,sessionStartTimestamp:f}}}]),e}(),Tn=u((function e(t,n,i,r){s(this,e),this.name="posthog-js",this.setupOnce=function(e){e((function(e){var o,s,a,u,c;if("error"!==e.level||!t.__loaded)return e;e.tags||(e.tags={});var l=t.config.ui_host||t.config.api_host;e.tags["PostHog Person URL"]=l+"/person/"+t.get_distinct_id(),t.sessionRecordingStarted()&&(e.tags["PostHog Recording URL"]=t.get_session_replay_url({withTimestamp:!0}));var d=(null===(o=e.exception)||void 0===o?void 0:o.values)||[],f={$exception_message:null===(s=d[0])||void 0===s?void 0:s.value,$exception_type:null===(a=d[0])||void 0===a?void 0:a.type,$exception_personURL:l+"/person/"+t.get_distinct_id(),$sentry_event_id:e.event_id,$sentry_exception:e.exception,$sentry_exception_message:null===(u=d[0])||void 0===u?void 0:u.value,$sentry_exception_type:null===(c=d[0])||void 0===c?void 0:c.type,$sentry_tags:e.tags};return n&&i&&(f.$sentry_url=(r||"https://sentry.io/organizations/")+n+"/issues/?project="+i+"&query="+e.event_id),t.capture("$exception",f),e}))}})),$n=function(){function e(){var t=this;s(this,e),c(this,"_hasSeenPageView",!1),c(this,"_updateScrollData",(function(){var e,n,i,r;t._pageViewData||(t._pageViewData=t._createPageViewData());var o=t._pageViewData,s=t._scrollY(),a=t._scrollHeight(),u=t._contentY(),c=t._contentHeight();o.lastScrollY=s,o.maxScrollY=Math.max(s,null!==(e=o.maxScrollY)&&void 0!==e?e:0),o.maxScrollHeight=Math.max(a,null!==(n=o.maxScrollHeight)&&void 0!==n?n:0),o.lastContentY=u,o.maxContentY=Math.max(u,null!==(i=o.maxContentY)&&void 0!==i?i:0),o.maxContentHeight=Math.max(c,null!==(r=o.maxContentHeight)&&void 0!==r?r:0)}))}return u(e,[{key:"_createPageViewData",value:function(){return{pathname:D.location.pathname}}},{key:"doPageView",value:function(){var e,t;return this._hasSeenPageView?(t=this._pageViewData,this._pageViewData=this._createPageViewData()):(this._hasSeenPageView=!0,t=void 0,this._pageViewData||(this._pageViewData=this._createPageViewData())),setTimeout(this._updateScrollData,0),r({$prev_pageview_pathname:null===(e=t)||void 0===e?void 0:e.pathname},this._calculatePrevPageScrollProperties(t))}},{key:"doPageLeave",value:function(){var e=this._pageViewData;return r({$prev_pageview_pathname:null==e?void 0:e.pathname},this._calculatePrevPageScrollProperties(e))}},{key:"_calculatePrevPageScrollProperties",value:function(e){if(!e||null==e.maxScrollHeight||null==e.lastScrollY||null==e.maxScrollY||null==e.maxContentHeight||null==e.lastContentY||null==e.maxContentY)return{};var t=e.maxScrollHeight,n=e.lastScrollY,i=e.maxScrollY,r=e.maxContentHeight,o=e.lastContentY,s=e.maxContentY;return t=Math.ceil(t),n=Math.ceil(n),i=Math.ceil(i),r=Math.ceil(r),o=Math.ceil(o),s=Math.ceil(s),{$prev_pageview_last_scroll:n,$prev_pageview_last_scroll_percentage:t<=1?1:Cn(n/t,0,1),$prev_pageview_max_scroll:i,$prev_pageview_max_scroll_percentage:t<=1?1:Cn(i/t,0,1),$prev_pageview_last_content:o,$prev_pageview_last_content_percentage:r<=1?1:Cn(o/r,0,1),$prev_pageview_max_content:s,$prev_pageview_max_content_percentage:r<=1?1:Cn(s/r,0,1)}}},{key:"startMeasuringScrollPosition",value:function(){D.addEventListener("scroll",this._updateScrollData),D.addEventListener("scrollend",this._updateScrollData),D.addEventListener("resize",this._updateScrollData)}},{key:"stopMeasuringScrollPosition",value:function(){D.removeEventListener("scroll",this._updateScrollData),D.removeEventListener("scrollend",this._updateScrollData),D.removeEventListener("resize",this._updateScrollData)}},{key:"_scrollHeight",value:function(){return Math.max(0,D.document.documentElement.scrollHeight-D.document.documentElement.clientHeight)}},{key:"_scrollY",value:function(){return D.scrollY||D.pageYOffset||D.document.documentElement.scrollTop||0}},{key:"_contentHeight",value:function(){return D.document.documentElement.scrollHeight||0}},{key:"_contentY",value:function(){var e=D.document.documentElement.clientHeight||0;return this._scrollY()+e}}]),e}();function Cn(e,t,n){return Math.max(t,Math.min(e,n))}var Mn,An,Dn,Ln={icontains:function(e){return window.location.href.toLowerCase().indexOf(e.toLowerCase())>-1},regex:function(e){return function(e,t){return!!function(e){try{new RegExp(e)}catch(e){return!1}return!0}(t)&&new RegExp(t).test(e)}(window.location.href,e)},exact:function(e){return window.location.href===e}},Nn=function(){function e(t){s(this,e),this.instance=t}return u(e,[{key:"getSurveys",value:function(e){var t=this,n=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=this.instance.get_property(Re);if(i&&!n)return e(i);this.instance._send_request("".concat(this.instance.config.api_host,"/api/surveys/?token=").concat(this.instance.config.token),{},{method:"GET"},(function(n){var i,r=n.surveys||[];return null===(i=t.instance.persistence)||void 0===i||i.register(c({},Re,r)),e(r)}))}},{key:"getActiveMatchingSurveys",value:function(e){var t=this,n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];this.getSurveys((function(n){var i=n.filter((function(e){return!(!e.start_date||e.end_date)})).filter((function(e){var t,n,i,r;if(!e.conditions)return!0;var o=null===(t=e.conditions)||void 0===t||!t.url||Ln[null!==(n=null===(i=e.conditions)||void 0===i?void 0:i.urlMatchType)&&void 0!==n?n:"icontains"](e.conditions.url),s=null===(r=e.conditions)||void 0===r||!r.selector||document.querySelector(e.conditions.selector);return o&&s})).filter((function(e){if(!e.linked_flag_key&&!e.targeting_flag_key)return!0;var n=!e.linked_flag_key||t.instance.featureFlags.isFeatureEnabled(e.linked_flag_key),i=!e.targeting_flag_key||t.instance.featureFlags.isFeatureEnabled(e.targeting_flag_key);return n&&i}));return e(i)}),n)}}]),e}(),Bn=function(){function e(){var t=this;s(this,e),c(this,"limits",{}),c(this,"checkForLimiting",(function(e){try{var n=e.responseText;if(!n||!n.length)return;(JSON.parse(n).quota_limited||[]).forEach((function(e){j.info("[RateLimiter] ".concat(e||"events"," is quota limited.")),t.limits[e]=(new Date).getTime()+6e4}))}catch(e){return void j.error(e)}}))}return u(e,[{key:"isRateLimited",value:function(e){var t=this.limits[e||"events"]||!1;return!1!==t&&(new Date).getTime()1&&void 0!==arguments[1]?arguments[1]:{},s=arguments.length>2?arguments[2]:void 0,a=arguments.length>3?arguments[3]:void 0;this.__loaded=!0,this.config={},this._triggered_notifs=[];var u,c={segmentRegister:!1,syncCode:!1},l=function(e){return function(){c[e]||(c[e]=!0,c.segmentRegister&&c.syncCode&&(null==a||a(i)))}};if(this.set_config(G({},zn(),o,{name:s,token:e,callback_fn:(s===Un?s:Un+"."+s)+"._jsc"})),this._jsc=function(){},(null!=D&&null!==(t=D.rrweb)&&void 0!==t&&t.record||null!=D&&D.rrwebRecord)&&(this.__loaded_recorder_version=null==D||null===(u=D.rrweb)||void 0===u?void 0:u.version),this.persistence=new ot(this.config),this._requestQueue=new Ct(this._handle_queued_event.bind(this)),this._retryQueue=new bn(this.config.on_xhr_error,this.rateLimiter),this.__captureHooks=[],this.__request_queue=[],this.sessionManager=new Rn(this.config,this.persistence),this.sessionPropsManager=new jn(this.sessionManager,this.persistence),this.sessionPersistence="sessionStorage"===this.config.persistence?this.persistence:new ot(r(r({},this.config),{},{persistence:"sessionStorage"})),this._gdpr_init(),o.segment?(this.config.get_device_id=function(){return o.segment.user().anonymousId()},o.segment.user().id()&&(this.register({distinct_id:o.segment.user().id()}),this.persistence.set_user_state("identified")),o.segment.register(this.segmentIntegration()).then(l("segmentRegister"))):l("segmentRegister")(),void 0!==(null===(n=o.bootstrap)||void 0===n?void 0:n.distinctID)){var d,f,h=this.config.get_device_id(En()),p=null!==(d=o.bootstrap)&&void 0!==d&&d.isIdentifiedID?h:o.bootstrap.distinctID;this.persistence.set_user_state(null!==(f=o.bootstrap)&&void 0!==f&&f.isIdentifiedID?"identified":"anonymous"),this.register({distinct_id:o.bootstrap.distinctID,$device_id:p})}if(this._hasBootstrappedFeatureFlags()){var _,g,v=Object.keys((null===(_=o.bootstrap)||void 0===_?void 0:_.featureFlags)||{}).filter((function(e){var t,n;return!(null===(t=o.bootstrap)||void 0===t||null===(n=t.featureFlags)||void 0===n||!n[e])})).reduce((function(e,t){var n,i;return e[t]=(null===(n=o.bootstrap)||void 0===n||null===(i=n.featureFlags)||void 0===i?void 0:i[t])||!1,e}),{}),m=Object.keys((null===(g=o.bootstrap)||void 0===g?void 0:g.featureFlagPayloads)||{}).filter((function(e){return v[e]})).reduce((function(e,t){var n,i,r,s;return null!==(n=o.bootstrap)&&void 0!==n&&null!==(i=n.featureFlagPayloads)&&void 0!==i&&i[t]&&(e[t]=null===(r=o.bootstrap)||void 0===r||null===(s=r.featureFlagPayloads)||void 0===s?void 0:s[t]),e}),{});this.featureFlags.receivedFeatureFlags({featureFlags:v,featureFlagPayloads:m})}if(!this.get_distinct_id()){var y=this.config.get_device_id(En());this.register_once({distinct_id:y,$device_id:y},""),this.persistence.set_user_state("anonymous")}D.addEventListener&&D.addEventListener("onpagehide"in self?"pagehide":"unload",this._handle_unload.bind(this)),l("syncCode")()}},{key:"_afterDecideResponse",value:function(e){var t;if(this.compression={},e.supportedCompression&&!this.config.disable_compression){var n,i={},r=m(e.supportedCompression);try{for(r.s();!(n=r.n()).done;)i[n.value]=!0}catch(e){r.e(e)}finally{r.f()}this.compression=i}null!==(t=e.analytics)&&void 0!==t&&t.endpoint&&(this.analyticsDefaultEndpoint=e.analytics.endpoint)}},{key:"_loaded",value:function(){var e=this.config.advanced_disable_decide;e||this.featureFlags.setReloadingPaused(!0);try{this.config.loaded(this)}catch(e){j.critical("`loaded` function failed",e)}this._start_queue_if_opted_in(),this.config.capture_pageview&&this.capture("$pageview",{title:N.title},{send_instantly:!0}),e||(new vt(this).call(),this.featureFlags.resetRequestQueue())}},{key:"_start_queue_if_opted_in",value:function(){var e;this.has_opted_out_capturing()||this.config.request_batching&&(null===(e=this._requestQueue)||void 0===e||e.poll())}},{key:"_dom_loaded",value:function(){var e=this;this.has_opted_out_capturing()||U(this.__request_queue,(function(t){e._send_request.apply(e,_(t))})),this.__request_queue=[],this._start_queue_if_opted_in()}},{key:"_prepare_callback",value:function(e,t){if(x(e))return null;if(Wn)return function(n){e(n,t)};var n=this._jsc,i=""+Math.floor(1e8*Math.random()),r=this.config.callback_fn+"["+i+"]";return n[i]=function(r){delete n[i],e(r,t)},r}},{key:"_handle_unload",value:function(){var e,t;this.config.request_batching?(this.config.capture_pageview&&this.config.capture_pageleave&&this.capture("$pageleave"),null===(e=this._requestQueue)||void 0===e||e.unload(),null===(t=this._retryQueue)||void 0===t||t.unload()):this.config.capture_pageview&&this.config.capture_pageleave&&this.capture("$pageleave",null,{transport:"sendBeacon"})}},{key:"_handle_queued_event",value:function(e,t,n){var i=JSON.stringify(t);this.__compress_and_send_json_request(e,i,n||Vn,Hn)}},{key:"__compress_and_send_json_request",value:function(e,t,n,i){var o=function(e,t,n){return e===Xt.GZipJS?[_n(gn(t),{mtime:0}),r(r({},n),{},{blob:!0,urlQueryArgs:{compression:Xt.GZipJS}})]:[{data:Z(t)},n]}(this.compression[Xt.GZipJS]?Xt.GZipJS:Xt.Base64,t,n),s=p(o,2),a=s[0],u=s[1];this._send_request(e,a,u,i)}},{key:"_send_request",value:function(e,t,n,i){if(this.__loaded&&this._retryQueue&&!this.rateLimiter.isRateLimited(n._batchKey))if(Gn)this.__request_queue.push([e,t,n,i]);else{var o={method:this.config.api_method,transport:this.config.api_transport,verbose:this.config.verbose};n=G(o,n||{}),Wn||(n.method="GET");var s="sendBeacon"in D.navigator&&"sendBeacon"===n.transport;if(e=vn(e,n.urlQueryArgs||{},{ip:this.config.ip}),s)try{D.navigator.sendBeacon(e,mn(t,r(r({},n),{},{sendBeacon:!0})))}catch(e){}else if(Wn)try{yn({url:e,data:t,headers:this.config.xhr_headers,options:n,callback:i,retriesPerformedSoFar:0,retryQueue:this._retryQueue,onXHRError:this.config.on_xhr_error,onResponse:this.rateLimiter.checkForLimiting})}catch(e){j.error(e)}else{var a,u=N.createElement("script");u.type="text/javascript",u.async=!0,u.defer=!0,u.src=e;var c=N.getElementsByTagName("script")[0];null===(a=c.parentNode)||void 0===a||a.insertBefore(u,c)}}}},{key:"_execute_array",value:function(e){var t,n=this,i=[],r=[],o=[];U(e,(function(e){e&&(t=e[0],F(t)?o.push(e):I(e)?e.call(n):F(e)&&"alias"===t?i.push(e):F(e)&&-1!==t.indexOf("capture")&&I(n[t])?o.push(e):r.push(e))}));var s=function(e,t){U(e,(function(e){if(F(e[0])){var n=t;W(e,(function(e){n=n[e[0]].apply(n,e.slice(1))}))}else this[e[0]].apply(this,e.slice(1))}),t)};s(i,this),s(r,this),s(o,this)}},{key:"_hasBootstrappedFeatureFlags",value:function(){var e,t;return(null===(e=this.config.bootstrap)||void 0===e?void 0:e.featureFlags)&&Object.keys(null===(t=this.config.bootstrap)||void 0===t?void 0:t.featureFlags).length>0||!1}},{key:"push",value:function(e){this._execute_array([e])}},{key:"capture",value:function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:Vn;if(!this.__loaded||!this.sessionPersistence||!this._requestQueue)return j.uninitializedWarning("posthog.capture");if(!function(e){var t=!1;try{var n=e.config.token,i=e.config.respect_dnt,r=e.config.opt_out_capturing_persistence_type,o=e.config.opt_out_capturing_cookie_prefix||void 0,s=e.config.window;n&&(t=It(n,{respectDnt:i,persistenceType:r,persistencePrefix:o,window:s}))}catch(e){j.error("Unexpected error when checking capturing opt-out status: "+e)}return t}(this)){var i=(n=n||Vn).transport;if(i&&(n.transport=i),!x(e)&&O(e)){if(!function(e,t){if(!e)return!1;var n=e.toLowerCase();return te.concat(t||[]).some((function(e){var t=e.toLowerCase();return n.includes?n.includes(t):-1!==n.indexOf(t)}))}(B,this.config.custom_blocked_useragents)){this.sessionPersistence.update_search_keyword(),this.config.store_google&&this.sessionPersistence.update_campaign_params(),this.config.save_referrer&&this.sessionPersistence.update_referrer_info();var o={uuid:En(),event:e,properties:this._calculate_event_properties(e,t||{})};"$identify"===e&&(o.$set=n.$set,o.$set_once=n.$set_once),(o=function(e,t){return n=e,i=function(e,n){return n&&K.indexOf(n)>-1?e:O(e)&&!R(t)?e.slice(0,t):e},r=new Set,function e(t,n){return t!==Object(t)?i?i(t,n):t:r.has(t)?void 0:(r.add(t),F(t)?(o=[],U(t,(function(t){o.push(e(t))}))):(o={},W(t,(function(t,n){r.has(t)||(o[n]=e(t,n))}))),o);var o}(n);var n,i,r}(o,n._noTruncate?null:this.config.properties_string_max_length)).timestamp=n.timestamp||new Date;var s=r(r({},o.properties.$set),o.$set);(function(e){if(E(e)){for(var t in e)if(k.call(e,t))return!1;return!0}return!1})(s)||this.setPersonPropertiesForFlags(s),j.info("send",o);var a=JSON.stringify(o),u=this.config.api_host+(n.endpoint||this.analyticsDefaultEndpoint),c=n!==Vn;return!this.config.request_batching||c&&!n._batchKey||n.send_instantly?this.__compress_and_send_json_request(u,a,n):this._requestQueue.enqueue(u,o,n),this._invokeCaptureHooks(e,o),o}}else j.error("No event name provided to posthog.capture")}}},{key:"_addCaptureHook",value:function(e){this.__captureHooks.push(e)}},{key:"_invokeCaptureHooks",value:function(e,t){this.config._onCapture(e,t),W(this.__captureHooks,(function(t){return t(e)}))}},{key:"_calculate_event_properties",value:function(e,t){if(!this.persistence||!this.sessionPersistence)return t;var n=this.persistence.remove_event_timer(e),i=r({},t);if(i.token=this.config.token,"$snapshot"===e){var o=r(r({},this.persistence.properties()),this.sessionPersistence.properties());return i.distinct_id=o.distinct_id,i}var s=it.properties();if(this.sessionManager){var a=this.sessionManager.checkAndGetSessionAndWindowId(),u=a.sessionId,c=a.windowId;i.$session_id=u,i.$window_id=c}if(this.sessionPropsManager&&this.config.__preview_send_client_session_params&&("$pageview"===e||"$pageleave"===e||"$autocapture"===e)){var l=this.sessionPropsManager.getSessionProps();i=G(i,l)}if(this.config.__preview_measure_pageview_stats){var d={};"$pageview"===e?d=this.pageViewManager.doPageView():"$pageleave"===e&&(d=this.pageViewManager.doPageLeave()),i=G(i,d)}if("$pageview"===e&&(i.title=N.title),"$performance_event"===e){var f=this.persistence.properties();return i.distinct_id=f.distinct_id,i.$current_url=s.$current_url,i}if(!x(n)){var h=(new Date).getTime()-n;i.$duration=parseFloat((h/1e3).toFixed(3))}i=G({},it.properties(),this.persistence.properties(),this.sessionPersistence.properties(),i);var p=this.config.property_blacklist;F(p)?W(p,(function(e){delete i[e]})):j.error("Invalid value for property_blacklist config: "+p);var _=this.config.sanitize_properties;return _&&(i=_(i,e)),i}},{key:"register",value:function(e,t){var n;null===(n=this.persistence)||void 0===n||n.register(e,t)}},{key:"register_once",value:function(e,t,n){var i;null===(i=this.persistence)||void 0===i||i.register_once(e,t,n)}},{key:"register_for_session",value:function(e){var t;null===(t=this.sessionPersistence)||void 0===t||t.register(e)}},{key:"unregister",value:function(e){var t;null===(t=this.persistence)||void 0===t||t.unregister(e)}},{key:"unregister_for_session",value:function(e){var t;null===(t=this.sessionPersistence)||void 0===t||t.unregister(e)}},{key:"_register_single",value:function(e,t){this.register(c({},e,t))}},{key:"getFeatureFlag",value:function(e,t){return this.featureFlags.getFeatureFlag(e,t)}},{key:"getFeatureFlagPayload",value:function(e){var t=this.featureFlags.getFeatureFlagPayload(e);try{return JSON.parse(t)}catch(e){return t}}},{key:"isFeatureEnabled",value:function(e,t){return this.featureFlags.isFeatureEnabled(e,t)}},{key:"reloadFeatureFlags",value:function(){this.featureFlags.reloadFeatureFlags()}},{key:"updateEarlyAccessFeatureEnrollment",value:function(e,t){this.featureFlags.updateEarlyAccessFeatureEnrollment(e,t)}},{key:"getEarlyAccessFeatures",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];return this.featureFlags.getEarlyAccessFeatures(e,t)}},{key:"onFeatureFlags",value:function(e){return this.featureFlags.onFeatureFlags(e)}},{key:"onSessionId",value:function(e){var t,n;return null!==(t=null===(n=this.sessionManager)||void 0===n?void 0:n.onSessionId(e))&&void 0!==t?t:function(){}}},{key:"getSurveys",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];this.surveys.getSurveys(e,t)}},{key:"getActiveMatchingSurveys",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];this.surveys.getActiveMatchingSurveys(e,t)}},{key:"identify",value:function(e,t,n){if(!this.__loaded||!this.persistence)return j.uninitializedWarning("posthog.identify");if(e){var i=this.get_distinct_id();if(this.register({$user_id:e}),!this.get_property("$device_id")){var r=i;this.register_once({$had_persisted_distinct_id:!0,$device_id:r},"")}e!==i&&e!==this.get_property(me)&&(this.unregister(me),this.register({distinct_id:e}));var o="anonymous"===this.persistence.get_user_state();e!==i&&o?(this.persistence.set_user_state("identified"),this.setPersonPropertiesForFlags(t||{},!1),this.capture("$identify",{distinct_id:e,$anon_distinct_id:i},{$set:t||{},$set_once:n||{}}),this.featureFlags.setAnonymousDistinctId(i)):(t||n)&&this.setPersonProperties(t,n),e!==i&&(this.reloadFeatureFlags(),this.unregister(Te))}else j.error("Unique user id has not been set in posthog.identify")}},{key:"setPersonProperties",value:function(e,t){(e||t)&&(this.setPersonPropertiesForFlags(e||{}),this.capture("$set",{$set:e||{},$set_once:t||{}}))}},{key:"group",value:function(e,t,n){if(e&&t){var i=this.getGroups();i[e]!==t&&this.resetGroupPropertiesForFlags(e),this.register({$groups:r(r({},i),{},c({},e,t))}),n&&(this.capture("$groupidentify",{$group_type:e,$group_key:t,$group_set:n}),this.setGroupPropertiesForFlags(c({},e,n))),i[e]===t||n||this.reloadFeatureFlags()}else j.error("posthog.group requires a group type and group key")}},{key:"resetGroups",value:function(){this.register({$groups:{}}),this.resetGroupPropertiesForFlags(),this.reloadFeatureFlags()}},{key:"setPersonPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];this.featureFlags.setPersonPropertiesForFlags(e,t)}},{key:"resetPersonPropertiesForFlags",value:function(){this.featureFlags.resetPersonPropertiesForFlags()}},{key:"setGroupPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];this.featureFlags.setGroupPropertiesForFlags(e,t)}},{key:"resetGroupPropertiesForFlags",value:function(e){this.featureFlags.resetGroupPropertiesForFlags(e)}},{key:"reset",value:function(e){var t,n,i,r;if(!this.__loaded)return j.uninitializedWarning("posthog.reset");var o=this.get_property("$device_id");null===(t=this.persistence)||void 0===t||t.clear(),null===(n=this.sessionPersistence)||void 0===n||n.clear(),null===(i=this.persistence)||void 0===i||i.set_user_state("anonymous"),null===(r=this.sessionManager)||void 0===r||r.resetSessionId();var s=this.config.get_device_id(En());this.register_once({distinct_id:s,$device_id:e?s:o},"")}},{key:"get_distinct_id",value:function(){return this.get_property("distinct_id")}},{key:"getGroups",value:function(){return this.get_property("$groups")||{}}},{key:"get_session_id",value:function(){var e,t;return null!==(e=null===(t=this.sessionManager)||void 0===t?void 0:t.checkAndGetSessionAndWindowId(!0).sessionId)&&void 0!==e?e:""}},{key:"get_session_replay_url",value:function(e){if(!this.sessionManager)return"";var t=this.config.ui_host||this.config.api_host,n=this.sessionManager.checkAndGetSessionAndWindowId(!0),i=n.sessionId,r=n.sessionStartTimestamp,o=t+"/replay/"+i;if(null!=e&&e.withTimestamp&&r){var s,a=null!==(s=e.timestampLookBack)&&void 0!==s?s:10;if(!r)return o;var u=Math.max(Math.floor(((new Date).getTime()-r)/1e3)-a,0);o+="?t=".concat(u)}return o}},{key:"alias",value:function(e,t){return e===this.get_property(ve)?(j.critical("Attempting to create alias for existing People user - aborting."),-2):(x(t)&&(t=this.get_distinct_id()),e!==t?(this._register_single(me,e),this.capture("$create_alias",{alias:e,distinct_id:t})):(j.warn("alias matches current distinct_id - skipping api call."),this.identify(e),-1))}},{key:"set_config",value:function(e){var t,n,i=r({},this.config);E(e)&&(G(this.config,e),this.config.persistence_name||(this.config.persistence_name=this.config.cookie_name),this.config.disable_persistence||(this.config.disable_persistence=this.config.disable_cookie),null===(t=this.persistence)||void 0===t||t.update_config(this.config),null===(n=this.sessionPersistence)||void 0===n||n.update_config(this.config),ze.is_supported()&&"true"===ze.get("ph_debug")&&(this.config.debug=!0),this.config.debug&&(y.DEBUG=!0),this.sessionRecording&&!x(e.disable_session_recording)&&i.disable_session_recording!==e.disable_session_recording&&(e.disable_session_recording?this.sessionRecording.stopRecording():this.sessionRecording.startRecordingIfEnabled()))}},{key:"startSessionRecording",value:function(){this.set_config({disable_session_recording:!1})}},{key:"stopSessionRecording",value:function(){this.set_config({disable_session_recording:!0})}},{key:"sessionRecordingStarted",value:function(){var e;return!(null===(e=this.sessionRecording)||void 0===e||!e.started)}},{key:"loadToolbar",value:function(e){return this.toolbar.loadToolbar(e)}},{key:"get_property",value:function(e){var t;return null===(t=this.persistence)||void 0===t?void 0:t.props[e]}},{key:"getSessionProperty",value:function(e){var t;return null===(t=this.sessionPersistence)||void 0===t?void 0:t.props[e]}},{key:"toString",value:function(){var e,t=null!==(e=this.config.name)&&void 0!==e?e:Un;return t!==Un&&(t=Un+"."+t),t}},{key:"_gdpr_init",value:function(){"localStorage"===this.config.opt_out_capturing_persistence_type&&ze.is_supported()&&(!this.has_opted_in_capturing()&&this.has_opted_in_capturing({persistence_type:"cookie"})&&this.opt_in_capturing({enable_persistence:!1}),!this.has_opted_out_capturing()&&this.has_opted_out_capturing({persistence_type:"cookie"})&&this.opt_out_capturing({clear_persistence:!1}),this.clear_opt_in_out_capturing({persistence_type:"cookie",enable_persistence:!1})),this.has_opted_out_capturing()?this._gdpr_update_persistence({clear_persistence:!0}):this.has_opted_in_capturing()||!this.config.opt_out_capturing_by_default&&!We.get("ph_optout")||(We.remove("ph_optout"),this.opt_out_capturing({clear_persistence:this.config.opt_out_persistence_by_default}))}},{key:"_gdpr_update_persistence",value:function(e){var t,n,i,r,o;if(e&&e.clear_persistence)i=!0;else{if(!e||!e.enable_persistence)return;i=!1}this.config.disable_persistence||(null===(t=this.persistence)||void 0===t?void 0:t.disabled)===i||null===(r=this.persistence)||void 0===r||r.set_disabled(i),this.config.disable_persistence||(null===(n=this.sessionPersistence)||void 0===n?void 0:n.disabled)===i||null===(o=this.sessionPersistence)||void 0===o||o.set_disabled(i)}},{key:"_gdpr_call_func",value:function(e,t){return t=G({capture:this.capture.bind(this),persistence_type:this.config.opt_out_capturing_persistence_type,cookie_prefix:this.config.opt_out_capturing_cookie_prefix,cookie_expiration:this.config.cookie_expiration,cross_subdomain_cookie:this.config.cross_subdomain_cookie,secure_cookie:this.config.secure_cookie},t||{}),ze.is_supported()||"localStorage"!==t.persistence_type||(t.persistence_type="cookie"),e(this.config.token,{capture:t.capture,captureEventName:t.capture_event_name,captureProperties:t.capture_properties,persistenceType:t.persistence_type,persistencePrefix:t.cookie_prefix,cookieExpiration:t.cookie_expiration,crossSubdomainCookie:t.cross_subdomain_cookie,secureCookie:t.secure_cookie})}},{key:"opt_in_capturing",value:function(e){e=G({enable_persistence:!0},e||{}),this._gdpr_call_func(St,e),this._gdpr_update_persistence(e)}},{key:"opt_out_capturing",value:function(e){var t=G({clear_persistence:!0},e||{});this._gdpr_call_func(Ft,t),this._gdpr_update_persistence(t)}},{key:"has_opted_in_capturing",value:function(e){return this._gdpr_call_func(Pt,e)}},{key:"has_opted_out_capturing",value:function(e){return this._gdpr_call_func(It,e)}},{key:"clear_opt_in_out_capturing",value:function(e){var t=G({enable_persistence:!0},null!=e?e:{});this._gdpr_call_func(Et,t),this._gdpr_update_persistence(t)}},{key:"debug",value:function(e){!1===e?(D.console.log("You've disabled debug mode."),localStorage&&localStorage.removeItem("ph_debug"),this.set_config({debug:!1})):(D.console.log("You're now in debug mode. All calls to PostHog will be logged in your console.\nYou can disable this with `posthog.debug(false)`."),localStorage&&localStorage.setItem("ph_debug","true"),this.set_config({debug:!0}))}}]),e}();!function(e,t){for(var n=0;n5&&void 0!==arguments[5]?arguments[5]:"gaCustomEvent";if(window.envSettings.tracking){if(window.dataLayer){var c={event:i,eventCategory:e,eventAction:t,eventLabel:n,eventValue:r,eventResults:o};window.dataLayer.push(c)}window.gtag&&window.gtag("event","gaCustomEvent",{eventCategory:e,eventAction:t,eventLabel:n,eventValue:r,eventResults:o}),null!==(a=window.posthog)&&void 0!==a&&a.__loaded&&window.posthog.capture(i,{eventCategory:e,eventAction:t,eventLabel:n,eventValue:r})}}},39979:function(e,t,n){n.d(t,{Z:function(){return h}});n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(87462),o=n(4942),a=n(45987),i=n(67294),c=n(54005),l=["callback","feature","isStart","isSuccess","isFailure","eventReason","payload"];function u(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function d(e){for(var t=1;t'}),S=(E().add(C),C),O=function(e){return a.createElement(c.Flex,(0,r.Z)({as:"svg",height:S.height,width:S.width,viewBox:S.viewBox},e),a.createElement("use",{xlinkHref:"#".concat(S.id)}))},Z=n(92432),_=(0,n(39979).Z)(c.Button),F=f.Z.demoSlug,j=function(){var e=(0,m.Z)((function(){return{fetch:h}}),[]),t=(0,g.Z)(e,1)[0],n=(0,a.useMemo)((function(){var e,n,r;return Array.isArray(t)?{nodesOnline:null===(e=t.find((function(e){return"Nodes Online"===e.key})))||void 0===e?void 0:e.value,githubStars:null===(n=t.find((function(e){return"GitHub Stars"===e.key})))||void 0===n?void 0:n.value,dockerHubPulls:null===(r=t.find((function(e){return"DockerHub Pulls"===e.key})))||void 0===r?void 0:r.value}:{}}),[t]),r=n.nodesOnline,o=n.githubStars,i=n.dockerHubPulls;return a.createElement(c.Flex,{column:!0,gap:6},a.createElement(O,{height:"160px",padding:[0,0,4,0]}),a.createElement(c.Flex,{column:!0,gap:3},a.createElement(y,{downScale:[c.TextBigger,c.TextBigger],color:"bright",textAlign:"center"},a.createElement(y,{downScale:[c.TextBigger,c.TextBigger],color:r?"primaryHighlight":"bright",textAlign:"center",strong:!0},r||"..."),a.createElement(c.Box,{as:"span",margin:[0,0,0,3]},"Nodes Online")),a.createElement(y,{downScale:[c.TextBigger,c.TextBigger],color:"bright",textAlign:"center"},a.createElement(y,{downScale:[c.TextBigger,c.TextBigger],color:o?"primaryHighlight":"bright",textAlign:"center",strong:!0},o||"..."),a.createElement(c.Box,{as:"span",margin:[0,0,0,3]},"GitHub Stars")),a.createElement(y,{downScale:[c.TextBigger,c.TextBigger],color:"bright",textAlign:"center"},a.createElement(y,{downScale:[c.TextBigger,c.TextBigger],color:i?"primaryHighlight":"bright",textAlign:"center",strong:!0},i||"..."),a.createElement(c.Box,{as:"span",margin:[0,0,0,3]},"DockerHub Pulls"))),!window.envSettings.onprem&&a.createElement(c.Flex,{justifyContent:"center",padding:[4,0,0,0]},a.createElement(_,{as:"a","data-ga":"signinup::click-demo::sni-view",href:"/spaces/".concat(F),target:"_blank",onClick:function(){(0,Z.L)("signinup","click-demo","sni-view")},label:"Live Demo",padding:[2,8]})))},M=function(e){var t=e.icon,n=e.title,r=e.text,o=(0,i.Z)("(min-width: 475px)");return a.createElement(c.Flex,{gap:4,alignItems:"start"},o&&a.createElement(c.Flex,{background:"transparent",justifyContent:"center",alignItems:"center",height:"64px",width:"64px",round:64,flex:"grow",border:{side:"all",color:"successLite"}},a.createElement(c.Icon,{name:t,height:"20px",widht:"20px",color:"successLite"})),a.createElement(c.Flex,{column:!0,justifyContent:"start",alignItems:"start",gap:2},a.createElement(c.TextBig,{color:"bright",strong:!0},n),a.createElement(c.Text,{color:"selected"},r)))},B=n(78312),I={url:"https://learn.netdata.cloud/",title:"docs"},P={url:"https://community.netdata.cloud/c/support/cloud-support/15",title:"forums"},T={url:"https://discord.gg/TjM6XCwC4e",title:"public discord channel"},D={url:"mailto:support@netdata.cloud",title:"Get support"},H=function(){return a.createElement(c.Text,{textAlign:"center",color:"bright"},"Need help?"," ",window.envSettings.onprem?a.createElement(a.Fragment,null,a.createElement(B.Fg,{href:D.url,target:"_blank",rel:"noopener noreferrer"},D.title)," ","or you may also want to check"):"Check"," ","out our"," ",a.createElement(B.Fg,{href:I.url,target:"_blank",rel:"noopener noreferrer"},I.title,",")," ",a.createElement(B.Fg,{href:P.url,target:"_blank",rel:"noopener noreferrer"},P.title)," ","or"," ",a.createElement(B.Fg,{href:T.url,target:"_blank",rel:"noopener noreferrer"},T.title))},N=n(87464),L=function(){return a.createElement(c.Flex,{column:!0,padding:[16,4,12],width:{max:"500px"},margin:[0,"auto"],gap:14},a.createElement(j,null),a.createElement(c.Flex,{column:!0,gap:8},s.map((function(e,t){return a.createElement(M,(0,r.Z)({key:t},e))}))),a.createElement(c.Flex,{column:!0,gap:6,alignItems:"center"},a.createElement(H,null),a.createElement(N.Z,null),a.createElement(c.Flex,{column:!0,gap:4,alignItems:"center"},a.createElement(u.lR,{theme:"dark"}),a.createElement(c.TextSmall,{textAlign:"center",color:"bright"},"Netdata is a member of the Cloud Native Computing Foundation (CNCF), and it is one of the most starred projects in the CNCF landscape."))))},A=["children"],R=function(e){var t=e.children,n=(0,o.Z)(e,A),u=(0,i.Z)("(min-width: 998px)");return a.createElement(c.Flex,(0,r.Z)({height:{min:"100vh"},flexWrap:!0},n),!window.envSettings.onprem&&a.createElement(l,{background:(0,c.getColor)(["neutral","grey25"])({theme:c.DarkTheme}),order:u?0:1},a.createElement(L,null)),a.createElement(l,{background:"mainBackground",order:u?1:0},a.createElement(c.Flex,{background:"transparent",column:!0,gap:8,padding:[12,4],width:{max:"500px"},margin:[0,"auto"]},a.createElement(d,null),t)))}},78312:function(e,t,n){n.d(t,{Fg:function(){return c},P2:function(){return u},Sn:function(){return d},U5:function(){return l},Yb:function(){return s},xG:function(){return i}});var r=n(71893),o=n(93416),a=n(16772),i=r.default.div.withConfig({displayName:"styled__SvgContainer",componentId:"sc-16ytcl4-0"})(["width:42px;height:42px;flex-shrink:0;display:flex;justify-content:center;align-items:center;border-radius:2px;background:white;"]),c=r.default.a.withConfig({displayName:"styled__StyledLink",componentId:"sc-16ytcl4-1"})(["display:inline-flex;align-items:center;text-decoration:none;color:",";cursor:pointer;&:hover{text-decoration:underline;color:"," !important;}&:visited{color:",";}> svg{fill:",";padding-right:",";}"],(0,o.getColor)("success"),(0,o.getColor)("success"),(0,o.getColor)("success"),(0,o.getColor)("main"),(0,o.getSizeBy)(1)),l=(0,r.default)(a.Z).withConfig({displayName:"styled__EmailInput",componentId:"sc-16ytcl4-2"})(["",""],(function(e){return e.isLastSignInMethod?"border: 2px solid green;":""})),u=(0,r.default)(o.Checkbox).withConfig({displayName:"styled__StyledCheckbox",componentId:"sc-16ytcl4-3"})(["margin:0 "," 0 0;& div:last-child{border-color:",";}"],(0,o.getSizeBy)(2),(function(e){return e.error&&(0,o.getColor)("error")})),d=(0,r.default)(o.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-16ytcl4-4"})(["&&{height:44px;}"]),s=(0,r.default)(o.Flex).attrs((function(e){var t=e.gap;return{column:!0,gap:void 0===t?8:t,alignSelf:"center",padding:[0,0,8,0],border:{side:"bottom",color:"disabled"},width:{max:"320px"}}})).withConfig({displayName:"styled__FormContainer",componentId:"sc-16ytcl4-5"})(["width:100%;"])},87562:function(e,t,n){n.d(t,{$:function(){return o},G:function(){return a}});n(26699),n(32023),n(92222),n(74916),n(64765);var r=n(58591),o=function(e,t){var n=(0,r.kG)(["expires_at",t]),o=t.includes("join-callback")?decodeURIComponent(n):n;return"".concat(window.location.origin).concat(e,"#").concat(o)},a=function(e,t){var n=window.location,r=n.search,a=n.hash,i=encodeURIComponent(o("/sign-in".concat(r).concat(r.length?"&":"?","oauth=").concat(e,"&"),a)),c=encodeURIComponent(o("/sign-up/verify".concat(r).concat(r.length?"&":"?","oauth=").concat(e,"&"),a));return"/api/v2/auth/account/".concat(e,"?redirect_uri=").concat(i,"®ister_uri=").concat(c).concat(t?"&is_unverified_registration=true":"")}},16772:function(e,t,n){var r=n(87462),o=n(45987),a=n(67294),i=n(93416),c=["onChange","value","onKeyDown","label"];t.Z=function(e){var t=e.onChange,n=e.value,l=e.onKeyDown,u=e.label,d=(0,o.Z)(e,c);return a.createElement(i.TextInput,(0,r.Z)({label:u,name:"userEmail",placeholder:"Enter an email address",value:n,onChange:t,onKeyDown:l},d))}}}]); \ No newline at end of file diff --git a/web/gui/v2/Makefile.am b/web/gui/v2/Makefile.am index a99fae0dae4fe2..105f6a521f891c 100644 --- a/web/gui/v2/Makefile.am +++ b/web/gui/v2/Makefile.am @@ -10,108 +10,119 @@ dist_noinst_DATA = \ webv2dir=$(webdir)/v2 dist_webv2_DATA = \ - $(srcdir)/1115.6acb1d00b53342cf4a94.chunk.js \ - $(srcdir)/1193.3f76ed755c2417f01c88.chunk.js \ - $(srcdir)/1282.f65cc3329e7e3eb8e645.js \ - $(srcdir)/161.c33d27d7097fd45f278a.chunk.js \ - $(srcdir)/161.c33d27d7097fd45f278a.chunk.js.LICENSE.txt \ - $(srcdir)/1655.f1c01cc3ba8b07dd8fae.chunk.js \ - $(srcdir)/2008.abd553afe7a6bed8cfc0.chunk.js \ - $(srcdir)/2097.d9ade1233ce20401ea8c.chunk.js \ - $(srcdir)/2701.89070793921be1288bb5.css \ - $(srcdir)/2701.98a4d24406e365a6ddf2.chunk.js \ - $(srcdir)/2833.78752757c7ac33d196dc.js \ - $(srcdir)/2833.78752757c7ac33d196dc.js.LICENSE.txt \ - $(srcdir)/2934.47ca322b2e59e64a0dae.chunk.js \ - $(srcdir)/3018.6eb82186a4656d2fce5d.chunk.js \ - $(srcdir)/3018.6eb82186a4656d2fce5d.chunk.js.LICENSE.txt \ - $(srcdir)/3032.7b4a2db28af84cd77c29.js \ - $(srcdir)/3071.91b1f856187aeafde398.chunk.js \ - $(srcdir)/3173.aedc1e477983499117c7.js \ - $(srcdir)/3173.aedc1e477983499117c7.js.LICENSE.txt \ - $(srcdir)/3241.c7a7e5d69626a9fb46d7.chunk.js \ - $(srcdir)/3495.7af81a22f9d135da8cbe.js \ - $(srcdir)/3495.7af81a22f9d135da8cbe.js.LICENSE.txt \ - $(srcdir)/3564.ba0e994ade7f97d72c01.chunk.js \ + $(srcdir)/1178.b54a742702a74832f653.chunk.js \ + $(srcdir)/1193.efd539c053944de2599b.chunk.js \ + $(srcdir)/1277.884a6e09fbb50c214d8e.chunk.js \ + $(srcdir)/1277.884a6e09fbb50c214d8e.chunk.js.LICENSE.txt \ + $(srcdir)/1282.8f29fe3a597e5ef695e5.js \ + $(srcdir)/1452.c2c843e34d17591ebafe.chunk.js \ + $(srcdir)/1729.b8957abec2922839104a.chunk.js \ + $(srcdir)/2337.40d93ac60f1ccbb653ae.chunk.js \ + $(srcdir)/2357.cd84c87ed27d878a2371.chunk.js \ + $(srcdir)/2406.f2927edc2116c8e03b3b.js \ + $(srcdir)/2406.f2927edc2116c8e03b3b.js.LICENSE.txt \ + $(srcdir)/2533.2b6849df953c0d8de977.js \ + $(srcdir)/2533.2b6849df953c0d8de977.js.LICENSE.txt \ + $(srcdir)/2713.2f8abb33ec163445f81d.chunk.js \ + $(srcdir)/2773.36292453483e1b60ea2b.chunk.js \ + $(srcdir)/2785.b5a9958703db6fd26399.chunk.js \ + $(srcdir)/281.93a7bf4372306eb81264.chunk.js \ + $(srcdir)/285.40bdfb7ccf71eab2df71.chunk.js \ + $(srcdir)/2934.787f7366de588d5e4212.chunk.js \ + $(srcdir)/3017.7a600c57c76d95b7de22.chunk.js \ + $(srcdir)/3169.9aa93f115e8a19171f69.chunk.js \ + $(srcdir)/3212.ef9057f6ac28ed55d62a.chunk.js \ + $(srcdir)/3241.fe7d3a8eca88df253a60.chunk.js \ + $(srcdir)/3253.48f218e54858427e997b.chunk.js \ + $(srcdir)/3253.89070793921be1288bb5.css \ + $(srcdir)/3283.88c006d10b31f8ed8088.chunk.js \ $(srcdir)/3981.ccb665950325037c0dda.css \ $(srcdir)/3D_PARTY_LICENSES.txt \ - $(srcdir)/4193.f5c9a2d9750a5bd2762d.chunk.js \ - $(srcdir)/4324.cbc343a58b942aec5218.chunk.js \ - $(srcdir)/4480.acae0ad582eb5265622a.js \ - $(srcdir)/4523.e41d6aac9a6433f9efb2.js \ - $(srcdir)/4523.e41d6aac9a6433f9efb2.js.LICENSE.txt \ - $(srcdir)/4532.0b0105ffbdd6db6f5d9a.js \ - $(srcdir)/4532.0b0105ffbdd6db6f5d9a.js.LICENSE.txt \ - $(srcdir)/4581.a60c1ffca04af99239c9.chunk.js \ - $(srcdir)/4744.38c08ef7e8943fa44006.chunk.js \ - $(srcdir)/4814.31d804681a19b084daa5.chunk.js \ - $(srcdir)/4890.24af5fbe5015c0b06c90.js \ - $(srcdir)/4915.245eefea4f250bc84a58.chunk.js \ - $(srcdir)/4934.565896e76ef20d10f992.chunk.js \ - $(srcdir)/5091.07dfc76b1d5c1623c330.chunk.js \ - $(srcdir)/5176.9ecb50692b5be2b8a5e2.js \ - $(srcdir)/5176.9ecb50692b5be2b8a5e2.js.LICENSE.txt \ - $(srcdir)/5316.0471244afc59c0d0d688.chunk.js \ - $(srcdir)/5451.b7da2b924e4d74fa28fc.chunk.js \ - $(srcdir)/5575.f2affb99b534dc6b7f3c.chunk.js \ - $(srcdir)/5575.f2affb99b534dc6b7f3c.chunk.js.LICENSE.txt \ - $(srcdir)/5623.d08ebc475a57a44d926c.js \ - $(srcdir)/5765.a33732202b95bbb627db.chunk.js \ - $(srcdir)/5969.f77624ecac93d1a600f5.chunk.js \ - $(srcdir)/597.f721ec431cd86411331e.chunk.js \ - $(srcdir)/6129.b1dace954d671f303383.chunk.js \ - $(srcdir)/6143.43acacdf8b2b70da410f.chunk.js \ - $(srcdir)/6252.c8a3dda4559b4b1a290f.chunk.js \ - $(srcdir)/6264.900c132d66035feb8143.chunk.js \ - $(srcdir)/6502.7c1716799823661c447d.chunk.js \ - $(srcdir)/6610.af47b6cda809af7dc878.chunk.js \ - $(srcdir)/6613.384da655707f4c3b6153.css \ - $(srcdir)/6613.b8903cda67bd33100ce4.chunk.js \ - $(srcdir)/6613.b8903cda67bd33100ce4.chunk.js.LICENSE.txt \ - $(srcdir)/6654.1a629783ec67ee7b2535.chunk.js \ - $(srcdir)/6723.c82b4d5b9c7d8207b985.chunk.js \ + $(srcdir)/4586.b2ee472b3fc901d6c88b.js \ + $(srcdir)/4586.b2ee472b3fc901d6c88b.js.LICENSE.txt \ + $(srcdir)/4744.2f7f0a1a4febd58f933a.chunk.js \ + $(srcdir)/4814.4aa06ec12491f116137a.chunk.js \ + $(srcdir)/4934.3ffbe62edd432c5a801d.chunk.js \ + $(srcdir)/498.8ac2616b1f317e54ddd6.chunk.js \ + $(srcdir)/498.8ac2616b1f317e54ddd6.chunk.js.LICENSE.txt \ + $(srcdir)/5100.70bf09de4a0223d97932.chunk.js \ + $(srcdir)/5112.93d13acdc6a1c411810a.chunk.js \ + $(srcdir)/5158.76b96a61b88ac13e64fb.js \ + $(srcdir)/5158.76b96a61b88ac13e64fb.js.LICENSE.txt \ + $(srcdir)/5575.d6c19cc08835f9f741cf.chunk.js \ + $(srcdir)/5575.d6c19cc08835f9f741cf.chunk.js.LICENSE.txt \ + $(srcdir)/5756.5bcfd00c65bb1f62ded1.chunk.js \ + $(srcdir)/5969.a5bd320bfdea9db1c60f.chunk.js \ + $(srcdir)/597.787ec8d8543dac902833.chunk.js \ + $(srcdir)/6129.bd0a9b3cc7f4c6f95728.chunk.js \ + $(srcdir)/6152.d7e1e2bfdb9cb0ae978f.js \ + $(srcdir)/6181.007ffc3728cc5a74130a.chunk.js \ + $(srcdir)/6187.0b79ef0afe565349e1bc.js \ + $(srcdir)/6187.0b79ef0afe565349e1bc.js.LICENSE.txt \ + $(srcdir)/6288.3316ccd0d95f1d801d18.chunk.js \ + $(srcdir)/6347.886fd82050c23f2e2db2.chunk.js \ + $(srcdir)/6502.94c9d91d866d8a75769b.chunk.js \ + $(srcdir)/6610.2be4959755f28fc7f452.chunk.js \ + $(srcdir)/6723.080a1abe2d9291d1909e.chunk.js \ $(srcdir)/6723.cc9fa5f3bdc0bf3ab2fc.css \ - $(srcdir)/6817.a41c740ef4ad290ddc09.chunk.js \ - $(srcdir)/7241.dae29a2c5dba9d8b64c6.chunk.js \ - $(srcdir)/7359.47dc8a0852f6cefdf8e4.chunk.js \ - $(srcdir)/7514.685fae6aee82518a9737.chunk.js \ - $(srcdir)/7514.685fae6aee82518a9737.chunk.js.LICENSE.txt \ - $(srcdir)/7707.d32bdcf8038b7eebaa97.js \ - $(srcdir)/7707.d32bdcf8038b7eebaa97.js.LICENSE.txt \ - $(srcdir)/8086.9d0c359423067e788807.chunk.js \ - $(srcdir)/8102.0d5c0d9f32667fc42e0c.chunk.js \ - $(srcdir)/8282.85c31db36364366177ab.chunk.js \ - $(srcdir)/8447.37fff40af8864776d155.chunk.js \ - $(srcdir)/8663.defe390dbe87f8ebb98f.chunk.js \ - $(srcdir)/8837.c7fd14cf3df616fdcc8f.chunk.js \ - $(srcdir)/8977.1e728c5c7e9af0e0089b.chunk.js \ - $(srcdir)/9020.afb7f9501284f53ab885.chunk.js \ - $(srcdir)/9201.3b4bde3431aac911f02e.chunk.js \ - $(srcdir)/9360.eda00d2b12ba6fe04e3e.chunk.js \ - $(srcdir)/9510.dfc219c382691661c69a.chunk.js \ - $(srcdir)/9851.cd13a054c85cef198291.chunk.js \ + $(srcdir)/6817.d23c368d61fe2ff69315.chunk.js \ + $(srcdir)/6848.6bbd4e759ec15bf74d51.chunk.js \ + $(srcdir)/6848.89070793921be1288bb5.css \ + $(srcdir)/6876.b74cba4369d8345b7f95.chunk.js \ + $(srcdir)/6914.bd8de0fb338f16858f0f.chunk.js \ + $(srcdir)/7068.5b656c774fade4cea185.chunk.js \ + $(srcdir)/7154.b209cde4e1813acc2cd4.chunk.js \ + $(srcdir)/7514.ea3b186181ecff7f8ae5.chunk.js \ + $(srcdir)/7514.ea3b186181ecff7f8ae5.chunk.js.LICENSE.txt \ + $(srcdir)/8099.4838cb5caf85574b5c9d.js \ + $(srcdir)/8099.4838cb5caf85574b5c9d.js.LICENSE.txt \ + $(srcdir)/8102.3b2b80fe00d67e577367.chunk.js \ + $(srcdir)/8276.cb877f5ee79184a9e0fb.chunk.js \ + $(srcdir)/8459.576da4e194a7e4007f03.css \ + $(srcdir)/8459.add89d7bb0434b110cd3.chunk.js \ + $(srcdir)/8459.add89d7bb0434b110cd3.chunk.js.LICENSE.txt \ + $(srcdir)/8663.c6d53b400dd7461702e6.chunk.js \ + $(srcdir)/88.fbc9567094081f35e29c.chunk.js \ + $(srcdir)/8977.321a7a13b267546a6e7c.chunk.js \ + $(srcdir)/90.c0f1d633c6e943af5628.chunk.js \ + $(srcdir)/9020.618bce1feb9efd7ead50.chunk.js \ + $(srcdir)/9201.7f3fc933d8e1520a2f6a.chunk.js \ + $(srcdir)/923.00170bf7762cedfa762e.chunk.js \ + $(srcdir)/9360.de29630b4dcacbeb5ecd.chunk.js \ + $(srcdir)/9510.dec77b81a86e7cd2ff86.chunk.js \ + $(srcdir)/9513.68ac17c54e2a98d13112.js \ + $(srcdir)/9513.68ac17c54e2a98d13112.js.LICENSE.txt \ + $(srcdir)/9594.89070793921be1288bb5.css \ + $(srcdir)/9594.b5b73051fd8e1b9901f1.chunk.js \ + $(srcdir)/9768.3e539b24a5eb4979ea85.chunk.js \ + $(srcdir)/9886.7d6c2ffca80bd567e52d.chunk.js \ + $(srcdir)/9893.b40d1bffe447771a2e43.chunk.js \ + $(srcdir)/9895.3d7f7d9acb001bf41011.chunk.js \ + $(srcdir)/9975.e14f6e47cf3025af8628.chunk.js \ $(srcdir)/LICENSE.md \ $(srcdir)/agent.html \ - $(srcdir)/allFiles.6.29.0.json \ + $(srcdir)/allFiles.6.66.1.json \ $(srcdir)/allFiles.6.json \ - $(srcdir)/app.0917ff2bf5d3b8b0678d.css \ - $(srcdir)/app.7bf3bd12482ad161443d.js \ + $(srcdir)/app.cb2e9f9a81cf9533384e.css \ + $(srcdir)/app.efc4ebf75cd13cc4f5f4.js \ + $(srcdir)/apple-app-site-association \ $(srcdir)/bundlesManifest.6.json \ - $(srcdir)/editor.b20cc786651a0c83801c.chunk.js \ + $(srcdir)/editor.ae2bfa9dd703149cdd28.chunk.js \ $(srcdir)/favicon.ico \ $(srcdir)/index.html \ $(srcdir)/local-agent.html \ - $(srcdir)/npm.react.dom.6431597f0353cbef2a34.js \ - $(srcdir)/npm.react.dom.6431597f0353cbef2a34.js.LICENSE.txt \ + $(srcdir)/npm.react.dom.443419261632ac7d4e78.js \ + $(srcdir)/npm.react.dom.443419261632ac7d4e78.js.LICENSE.txt \ $(srcdir)/registry-access.html \ $(srcdir)/registry-alert-redirect.html \ $(srcdir)/registry-hello.html \ - $(srcdir)/runtime.e3716b90b888609b7a5c.js \ + $(srcdir)/runtime.e7f678759af562297436.js \ $(srcdir)/sw.js \ $(NULL) webv2staticdir=$(webv2dir)/static dist_webv2static_DATA = \ + $(srcdir)/static/apple-app-site-association \ $(srcdir)/static/splash.css \ $(NULL) @@ -138,6 +149,7 @@ dist_webv2staticemailimg_DATA = \ webv2staticimgdir=$(webv2staticdir)/img dist_webv2staticimg_DATA = \ $(srcdir)/static/img/list-style-image.svg \ + $(srcdir)/static/img/new-dashboard.svg \ $(srcdir)/static/img/no-filter-results.png \ $(srcdir)/static/img/no-nodes-room.svg \ $(srcdir)/static/img/rack.png \ @@ -168,6 +180,7 @@ dist_webv2staticimglogosos_DATA = \ $(srcdir)/static/img/logos/os/placeholder.svg \ $(srcdir)/static/img/logos/os/raspberry-pi.svg \ $(srcdir)/static/img/logos/os/redhat.svg \ + $(srcdir)/static/img/logos/os/rocky.svg \ $(srcdir)/static/img/logos/os/suse.svg \ $(srcdir)/static/img/logos/os/ubuntu.svg \ $(NULL) diff --git a/web/gui/v2/agent.html b/web/gui/v2/agent.html index d63f049638531d..5ffbdb7505336e 100644 --- a/web/gui/v2/agent.html +++ b/web/gui/v2/agent.html @@ -24,13 +24,15 @@ apiUrl: "https://app.netdata.cloud", cloudUrl: "https://app.netdata.cloud", demoSlug: "netdata-demo", - demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"]}, + demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"],"ups":["upsd"]}, webpackPublicPath: "https://app.netdata.cloud", agentApiUrl: searchParams.get("agent") || getBasename(), - version: "6.29.0", + posthogToken: "phc_hnhlqe6D2Q4IcQNrFItaqdXJAxQ8RcHkPAFAp74pubv", + version: "6.66.1", tracking: false, cookieDomain: ".netdata.cloud", - onprem: false + onprem: false, + nodeEnv: "production" } function loadStyle(url, { media, insertAfter: aref, insertBefore: bref, rel, type } = {}) { @@ -66,7 +68,7 @@ } loadStyle(window.envSettings.agentApiUrl + "/v2/static/splash.css") - loadStyle(window.envSettings.agentApiUrl + "/v2/favicon.ico", {rel: "icon", type: "image/x-icon"})

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI
    \ No newline at end of file + }).catch(() => {}) \ No newline at end of file diff --git a/web/gui/v2/allFiles.6.29.0.json b/web/gui/v2/allFiles.6.66.1.json similarity index 69% rename from web/gui/v2/allFiles.6.29.0.json rename to web/gui/v2/allFiles.6.66.1.json index 4f10e921584a55..25d1b211989e19 100644 --- a/web/gui/v2/allFiles.6.29.0.json +++ b/web/gui/v2/allFiles.6.66.1.json @@ -1,5 +1,88 @@ { "bundlesManifest.json": "/bundlesManifest.6.json", +<<<<<<<< HEAD:web/gui/v2/allFiles.6.66.1.json + "app.css": "/app.cb2e9f9a81cf9533384e.css", + "app.js": "/app.efc4ebf75cd13cc4f5f4.js", + "runtime.js": "/runtime.e7f678759af562297436.js", + "9360.chunk.js": "/9360.de29630b4dcacbeb5ecd.chunk.js", + "3169.chunk.js": "/3169.9aa93f115e8a19171f69.chunk.js", + "4934.chunk.js": "/4934.3ffbe62edd432c5a801d.chunk.js", + "9895.chunk.js": "/9895.3d7f7d9acb001bf41011.chunk.js", + "6817.chunk.js": "/6817.d23c368d61fe2ff69315.chunk.js", + "1178.chunk.js": "/1178.b54a742702a74832f653.chunk.js", + "8977.chunk.js": "/8977.321a7a13b267546a6e7c.chunk.js", + "9768.chunk.js": "/9768.3e539b24a5eb4979ea85.chunk.js", + "7154.chunk.js": "/7154.b209cde4e1813acc2cd4.chunk.js", + "9510.chunk.js": "/9510.dec77b81a86e7cd2ff86.chunk.js", + "6723.css": "/6723.cc9fa5f3bdc0bf3ab2fc.css", + "6723.chunk.js": "/6723.080a1abe2d9291d1909e.chunk.js", + "4814.chunk.js": "/4814.4aa06ec12491f116137a.chunk.js", + "4744.chunk.js": "/4744.2f7f0a1a4febd58f933a.chunk.js", + "2934.chunk.js": "/2934.787f7366de588d5e4212.chunk.js", + "9594.css": "/9594.89070793921be1288bb5.css", + "9594.chunk.js": "/9594.b5b73051fd8e1b9901f1.chunk.js", + "3017.chunk.js": "/3017.7a600c57c76d95b7de22.chunk.js", + "88.chunk.js": "/88.fbc9567094081f35e29c.chunk.js", + "2713.chunk.js": "/2713.2f8abb33ec163445f81d.chunk.js", + "5969.chunk.js": "/5969.a5bd320bfdea9db1c60f.chunk.js", + "8102.chunk.js": "/8102.3b2b80fe00d67e577367.chunk.js", + "3283.chunk.js": "/3283.88c006d10b31f8ed8088.chunk.js", + "1729.chunk.js": "/1729.b8957abec2922839104a.chunk.js", + "6848.css": "/6848.89070793921be1288bb5.css", + "6848.chunk.js": "/6848.6bbd4e759ec15bf74d51.chunk.js", + "2337.chunk.js": "/2337.40d93ac60f1ccbb653ae.chunk.js", + "3253.css": "/3253.89070793921be1288bb5.css", + "3253.chunk.js": "/3253.48f218e54858427e997b.chunk.js", + "5112.chunk.js": "/5112.93d13acdc6a1c411810a.chunk.js", + "9886.chunk.js": "/9886.7d6c2ffca80bd567e52d.chunk.js", + "6914.chunk.js": "/6914.bd8de0fb338f16858f0f.chunk.js", + "8276.chunk.js": "/8276.cb877f5ee79184a9e0fb.chunk.js", + "6288.chunk.js": "/6288.3316ccd0d95f1d801d18.chunk.js", + "6610.chunk.js": "/6610.2be4959755f28fc7f452.chunk.js", + "9201.chunk.js": "/9201.7f3fc933d8e1520a2f6a.chunk.js", + "6347.chunk.js": "/6347.886fd82050c23f2e2db2.chunk.js", + "6876.chunk.js": "/6876.b74cba4369d8345b7f95.chunk.js", + "2357.chunk.js": "/2357.cd84c87ed27d878a2371.chunk.js", + "6181.chunk.js": "/6181.007ffc3728cc5a74130a.chunk.js", + "1452.chunk.js": "/1452.c2c843e34d17591ebafe.chunk.js", + "597.chunk.js": "/597.787ec8d8543dac902833.chunk.js", + "2785.chunk.js": "/2785.b5a9958703db6fd26399.chunk.js", + "editor.js": "/editor.ae2bfa9dd703149cdd28.chunk.js", + "9020.chunk.js": "/9020.618bce1feb9efd7ead50.chunk.js", + "npm.react.dom.js": "/npm.react.dom.443419261632ac7d4e78.js", + "2406.js": "/2406.f2927edc2116c8e03b3b.js", + "1277.chunk.js": "/1277.884a6e09fbb50c214d8e.chunk.js", + "7068.chunk.js": "/7068.5b656c774fade4cea185.chunk.js", + "285.chunk.js": "/285.40bdfb7ccf71eab2df71.chunk.js", + "7514.chunk.js": "/7514.ea3b186181ecff7f8ae5.chunk.js", + "3241.chunk.js": "/3241.fe7d3a8eca88df253a60.chunk.js", + "8459.css": "/8459.576da4e194a7e4007f03.css", + "8459.chunk.js": "/8459.add89d7bb0434b110cd3.chunk.js", + "498.chunk.js": "/498.8ac2616b1f317e54ddd6.chunk.js", + "923.chunk.js": "/923.00170bf7762cedfa762e.chunk.js", + "1193.chunk.js": "/1193.efd539c053944de2599b.chunk.js", + "5575.chunk.js": "/5575.d6c19cc08835f9f741cf.chunk.js", + "9893.chunk.js": "/9893.b40d1bffe447771a2e43.chunk.js", + "6502.chunk.js": "/6502.94c9d91d866d8a75769b.chunk.js", + "8663.chunk.js": "/8663.c6d53b400dd7461702e6.chunk.js", + "6129.chunk.js": "/6129.bd0a9b3cc7f4c6f95728.chunk.js", + "90.chunk.js": "/90.c0f1d633c6e943af5628.chunk.js", + "2773.chunk.js": "/2773.36292453483e1b60ea2b.chunk.js", + "9975.chunk.js": "/9975.e14f6e47cf3025af8628.chunk.js", + "3212.chunk.js": "/3212.ef9057f6ac28ed55d62a.chunk.js", + "5100.chunk.js": "/5100.70bf09de4a0223d97932.chunk.js", + "5756.chunk.js": "/5756.5bcfd00c65bb1f62ded1.chunk.js", + "3981.css": "/3981.ccb665950325037c0dda.css", + "281.chunk.js": "/281.93a7bf4372306eb81264.chunk.js", + "2533.js": "/2533.2b6849df953c0d8de977.js", + "6152.js": "/6152.d7e1e2bfdb9cb0ae978f.js", + "8099.js": "/8099.4838cb5caf85574b5c9d.js", + "4586.js": "/4586.b2ee472b3fc901d6c88b.js", + "1282.js": "/1282.8f29fe3a597e5ef695e5.js", + "9513.js": "/9513.68ac17c54e2a98d13112.js", + "5158.js": "/5158.76b96a61b88ac13e64fb.js", + "6187.js": "/6187.0b79ef0afe565349e1bc.js", +======== "app.css": "/app.0917ff2bf5d3b8b0678d.css", "app.js": "/app.7bf3bd12482ad161443d.js", "runtime.js": "/runtime.e3716b90b888609b7a5c.js", @@ -72,6 +155,7 @@ "4523.js": "/4523.e41d6aac9a6433f9efb2.js", "4480.js": "/4480.acae0ad582eb5265622a.js", "4890.js": "/4890.24af5fbe5015c0b06c90.js", +>>>>>>>> agora-main:web/gui/v2/allFiles.6.29.0.json "3D_PARTY_LICENSES.txt": "/3D_PARTY_LICENSES.txt", "static/site/pages/holding-page-503/holding-page-503.svg": "/static/site/pages/holding-page-503/holding-page-503.svg", "favicon.ico": "/favicon.ico", @@ -104,6 +188,10 @@ "static/img/logos/services/golang.svg": "/static/img/logos/services/golang.svg", "agent.html": "/agent.html", "local-agent.html": "/local-agent.html", +<<<<<<<< HEAD:web/gui/v2/allFiles.6.66.1.json + "static/img/new-dashboard.svg": "/static/img/new-dashboard.svg", +======== +>>>>>>>> agora-main:web/gui/v2/allFiles.6.29.0.json "static/img/logos/services/activemq.svg": "/static/img/logos/services/activemq.svg", "static/img/logos/os/kubernetes.svg": "/static/img/logos/os/kubernetes.svg", "static/img/logos/services/kubernetes.svg": "/static/img/logos/services/kubernetes.svg", @@ -113,6 +201,7 @@ "static/email/img/isotype_600.png": "/static/email/img/isotype_600.png", "static/img/logos/services/irc.svg": "/static/img/logos/services/irc.svg", "static/img/logos/services/tomcat.svg": "/static/img/logos/services/tomcat.svg", + "registry-alert-redirect.html": "/registry-alert-redirect.html", "static/img/logos/services/mariadb.svg": "/static/img/logos/services/mariadb.svg", "static/img/logos/services/openzfs.svg": "/static/img/logos/services/openzfs.svg", "static/img/logos/services/veritas.svg": "/static/img/logos/services/veritas.svg", @@ -124,7 +213,6 @@ "static/img/logos/services/btrfs.svg": "/static/img/logos/services/btrfs.svg", "static/img/logos/services/adaptec.svg": "/static/img/logos/services/adaptec.svg", "static/img/logos/services/icecast.svg": "/static/img/logos/services/icecast.svg", - "registry-alert-redirect.html": "/registry-alert-redirect.html", "static/img/logos/services/xen.svg": "/static/img/logos/services/xen.svg", "static/img/logos/services/haproxy.svg": "/static/img/logos/services/haproxy.svg", "static/img/logos/services/tor.svg": "/static/img/logos/services/tor.svg", @@ -259,12 +347,100 @@ "static/site/pages/holding-page-503/reset.svg": "/static/site/pages/holding-page-503/reset.svg", "static/img/logos/services/twilio.svg": "/static/img/logos/services/twilio.svg", "static/img/logos/services/lighthttpd.svg": "/static/img/logos/services/lighthttpd.svg", + "static/img/logos/os/rocky.svg": "/static/img/logos/os/rocky.svg", "static/img/list-style-image.svg": "/static/img/list-style-image.svg", "static/img/logos/services/opensips.svg": "/static/img/logos/services/opensips.svg", "static/img/logos/services/logstash.svg": "/static/img/logos/services/logstash.svg", + ".well-known/assetlinks.json": "/.well-known/assetlinks.json", + "static/.well-known/assetlinks.json": "/static/.well-known/assetlinks.json", "static/img/mail/isotype.svg": "/static/img/mail/isotype.svg", "static/site/pages/holding-page-503/netdata-logo-white.svg": "/static/site/pages/holding-page-503/netdata-logo-white.svg", "static/img/logos/services/hub.svg": "/static/img/logos/services/hub.svg", +<<<<<<<< HEAD:web/gui/v2/allFiles.6.66.1.json + "apple-app-site-association": "/apple-app-site-association", + "static/apple-app-site-association": "/static/apple-app-site-association", + "app.css.map": "/app.cb2e9f9a81cf9533384e.css.map", + "app.js.map": "/app.efc4ebf75cd13cc4f5f4.js.map", + "runtime.js.map": "/runtime.e7f678759af562297436.js.map", + "9360.chunk.js.map": "/9360.de29630b4dcacbeb5ecd.chunk.js.map", + "3169.chunk.js.map": "/3169.9aa93f115e8a19171f69.chunk.js.map", + "4934.chunk.js.map": "/4934.3ffbe62edd432c5a801d.chunk.js.map", + "9895.chunk.js.map": "/9895.3d7f7d9acb001bf41011.chunk.js.map", + "6817.chunk.js.map": "/6817.d23c368d61fe2ff69315.chunk.js.map", + "1178.chunk.js.map": "/1178.b54a742702a74832f653.chunk.js.map", + "8977.chunk.js.map": "/8977.321a7a13b267546a6e7c.chunk.js.map", + "9768.chunk.js.map": "/9768.3e539b24a5eb4979ea85.chunk.js.map", + "7154.chunk.js.map": "/7154.b209cde4e1813acc2cd4.chunk.js.map", + "9510.chunk.js.map": "/9510.dec77b81a86e7cd2ff86.chunk.js.map", + "6723.css.map": "/6723.cc9fa5f3bdc0bf3ab2fc.css.map", + "6723.chunk.js.map": "/6723.080a1abe2d9291d1909e.chunk.js.map", + "4814.chunk.js.map": "/4814.4aa06ec12491f116137a.chunk.js.map", + "4744.chunk.js.map": "/4744.2f7f0a1a4febd58f933a.chunk.js.map", + "2934.chunk.js.map": "/2934.787f7366de588d5e4212.chunk.js.map", + "9594.css.map": "/9594.89070793921be1288bb5.css.map", + "9594.chunk.js.map": "/9594.b5b73051fd8e1b9901f1.chunk.js.map", + "3017.chunk.js.map": "/3017.7a600c57c76d95b7de22.chunk.js.map", + "88.chunk.js.map": "/88.fbc9567094081f35e29c.chunk.js.map", + "2713.chunk.js.map": "/2713.2f8abb33ec163445f81d.chunk.js.map", + "5969.chunk.js.map": "/5969.a5bd320bfdea9db1c60f.chunk.js.map", + "8102.chunk.js.map": "/8102.3b2b80fe00d67e577367.chunk.js.map", + "3283.chunk.js.map": "/3283.88c006d10b31f8ed8088.chunk.js.map", + "1729.chunk.js.map": "/1729.b8957abec2922839104a.chunk.js.map", + "6848.css.map": "/6848.89070793921be1288bb5.css.map", + "6848.chunk.js.map": "/6848.6bbd4e759ec15bf74d51.chunk.js.map", + "2337.chunk.js.map": "/2337.40d93ac60f1ccbb653ae.chunk.js.map", + "3253.css.map": "/3253.89070793921be1288bb5.css.map", + "3253.chunk.js.map": "/3253.48f218e54858427e997b.chunk.js.map", + "5112.chunk.js.map": "/5112.93d13acdc6a1c411810a.chunk.js.map", + "9886.chunk.js.map": "/9886.7d6c2ffca80bd567e52d.chunk.js.map", + "6914.chunk.js.map": "/6914.bd8de0fb338f16858f0f.chunk.js.map", + "8276.chunk.js.map": "/8276.cb877f5ee79184a9e0fb.chunk.js.map", + "6288.chunk.js.map": "/6288.3316ccd0d95f1d801d18.chunk.js.map", + "6610.chunk.js.map": "/6610.2be4959755f28fc7f452.chunk.js.map", + "9201.chunk.js.map": "/9201.7f3fc933d8e1520a2f6a.chunk.js.map", + "6347.chunk.js.map": "/6347.886fd82050c23f2e2db2.chunk.js.map", + "6876.chunk.js.map": "/6876.b74cba4369d8345b7f95.chunk.js.map", + "2357.chunk.js.map": "/2357.cd84c87ed27d878a2371.chunk.js.map", + "6181.chunk.js.map": "/6181.007ffc3728cc5a74130a.chunk.js.map", + "1452.chunk.js.map": "/1452.c2c843e34d17591ebafe.chunk.js.map", + "597.chunk.js.map": "/597.787ec8d8543dac902833.chunk.js.map", + "2785.chunk.js.map": "/2785.b5a9958703db6fd26399.chunk.js.map", + "editor.chunk.js.map": "/editor.ae2bfa9dd703149cdd28.chunk.js.map", + "9020.chunk.js.map": "/9020.618bce1feb9efd7ead50.chunk.js.map", + "npm.react.dom.js.map": "/npm.react.dom.443419261632ac7d4e78.js.map", + "2406.js.map": "/2406.f2927edc2116c8e03b3b.js.map", + "1277.chunk.js.map": "/1277.884a6e09fbb50c214d8e.chunk.js.map", + "7068.chunk.js.map": "/7068.5b656c774fade4cea185.chunk.js.map", + "285.chunk.js.map": "/285.40bdfb7ccf71eab2df71.chunk.js.map", + "7514.chunk.js.map": "/7514.ea3b186181ecff7f8ae5.chunk.js.map", + "3241.chunk.js.map": "/3241.fe7d3a8eca88df253a60.chunk.js.map", + "8459.css.map": "/8459.576da4e194a7e4007f03.css.map", + "8459.chunk.js.map": "/8459.add89d7bb0434b110cd3.chunk.js.map", + "498.chunk.js.map": "/498.8ac2616b1f317e54ddd6.chunk.js.map", + "923.chunk.js.map": "/923.00170bf7762cedfa762e.chunk.js.map", + "1193.chunk.js.map": "/1193.efd539c053944de2599b.chunk.js.map", + "5575.chunk.js.map": "/5575.d6c19cc08835f9f741cf.chunk.js.map", + "9893.chunk.js.map": "/9893.b40d1bffe447771a2e43.chunk.js.map", + "6502.chunk.js.map": "/6502.94c9d91d866d8a75769b.chunk.js.map", + "8663.chunk.js.map": "/8663.c6d53b400dd7461702e6.chunk.js.map", + "6129.chunk.js.map": "/6129.bd0a9b3cc7f4c6f95728.chunk.js.map", + "90.chunk.js.map": "/90.c0f1d633c6e943af5628.chunk.js.map", + "2773.chunk.js.map": "/2773.36292453483e1b60ea2b.chunk.js.map", + "9975.chunk.js.map": "/9975.e14f6e47cf3025af8628.chunk.js.map", + "3212.chunk.js.map": "/3212.ef9057f6ac28ed55d62a.chunk.js.map", + "5100.chunk.js.map": "/5100.70bf09de4a0223d97932.chunk.js.map", + "5756.chunk.js.map": "/5756.5bcfd00c65bb1f62ded1.chunk.js.map", + "3981.css.map": "/3981.ccb665950325037c0dda.css.map", + "281.chunk.js.map": "/281.93a7bf4372306eb81264.chunk.js.map", + "2533.js.map": "/2533.2b6849df953c0d8de977.js.map", + "6152.js.map": "/6152.d7e1e2bfdb9cb0ae978f.js.map", + "8099.js.map": "/8099.4838cb5caf85574b5c9d.js.map", + "4586.js.map": "/4586.b2ee472b3fc901d6c88b.js.map", + "1282.js.map": "/1282.8f29fe3a597e5ef695e5.js.map", + "9513.js.map": "/9513.68ac17c54e2a98d13112.js.map", + "5158.js.map": "/5158.76b96a61b88ac13e64fb.js.map", + "6187.js.map": "/6187.0b79ef0afe565349e1bc.js.map" +======== "app.css.map": "/app.0917ff2bf5d3b8b0678d.css.map", "app.js.map": "/app.7bf3bd12482ad161443d.js.map", "runtime.js.map": "/runtime.e3716b90b888609b7a5c.js.map", @@ -337,4 +513,5 @@ "4523.js.map": "/4523.e41d6aac9a6433f9efb2.js.map", "4480.js.map": "/4480.acae0ad582eb5265622a.js.map", "4890.js.map": "/4890.24af5fbe5015c0b06c90.js.map" +>>>>>>>> agora-main:web/gui/v2/allFiles.6.29.0.json } \ No newline at end of file diff --git a/web/gui/v2/allFiles.6.json b/web/gui/v2/allFiles.6.json index 4f10e921584a55..e7b1efddac6696 100644 --- a/web/gui/v2/allFiles.6.json +++ b/web/gui/v2/allFiles.6.json @@ -1,77 +1,86 @@ { "bundlesManifest.json": "/bundlesManifest.6.json", - "app.css": "/app.0917ff2bf5d3b8b0678d.css", - "app.js": "/app.7bf3bd12482ad161443d.js", - "runtime.js": "/runtime.e3716b90b888609b7a5c.js", - "9360.chunk.js": "/9360.eda00d2b12ba6fe04e3e.chunk.js", - "6252.chunk.js": "/6252.c8a3dda4559b4b1a290f.chunk.js", - "4934.chunk.js": "/4934.565896e76ef20d10f992.chunk.js", - "6817.chunk.js": "/6817.a41c740ef4ad290ddc09.chunk.js", - "7241.chunk.js": "/7241.dae29a2c5dba9d8b64c6.chunk.js", - "8977.chunk.js": "/8977.1e728c5c7e9af0e0089b.chunk.js", - "8086.chunk.js": "/8086.9d0c359423067e788807.chunk.js", - "8282.chunk.js": "/8282.85c31db36364366177ab.chunk.js", - "9510.chunk.js": "/9510.dfc219c382691661c69a.chunk.js", + "app.css": "/app.cb2e9f9a81cf9533384e.css", + "app.js": "/app.efc4ebf75cd13cc4f5f4.js", + "runtime.js": "/runtime.e7f678759af562297436.js", + "9360.chunk.js": "/9360.de29630b4dcacbeb5ecd.chunk.js", + "3169.chunk.js": "/3169.9aa93f115e8a19171f69.chunk.js", + "4934.chunk.js": "/4934.3ffbe62edd432c5a801d.chunk.js", + "9895.chunk.js": "/9895.3d7f7d9acb001bf41011.chunk.js", + "6817.chunk.js": "/6817.d23c368d61fe2ff69315.chunk.js", + "1178.chunk.js": "/1178.b54a742702a74832f653.chunk.js", + "8977.chunk.js": "/8977.321a7a13b267546a6e7c.chunk.js", + "9768.chunk.js": "/9768.3e539b24a5eb4979ea85.chunk.js", + "7154.chunk.js": "/7154.b209cde4e1813acc2cd4.chunk.js", + "9510.chunk.js": "/9510.dec77b81a86e7cd2ff86.chunk.js", "6723.css": "/6723.cc9fa5f3bdc0bf3ab2fc.css", - "6723.chunk.js": "/6723.c82b4d5b9c7d8207b985.chunk.js", - "4814.chunk.js": "/4814.31d804681a19b084daa5.chunk.js", - "4744.chunk.js": "/4744.38c08ef7e8943fa44006.chunk.js", - "2934.chunk.js": "/2934.47ca322b2e59e64a0dae.chunk.js", - "2701.css": "/2701.89070793921be1288bb5.css", - "2701.chunk.js": "/2701.98a4d24406e365a6ddf2.chunk.js", - "6264.chunk.js": "/6264.900c132d66035feb8143.chunk.js", - "5969.chunk.js": "/5969.f77624ecac93d1a600f5.chunk.js", - "8102.chunk.js": "/8102.0d5c0d9f32667fc42e0c.chunk.js", - "5091.chunk.js": "/5091.07dfc76b1d5c1623c330.chunk.js", - "5765.chunk.js": "/5765.a33732202b95bbb627db.chunk.js", - "8447.chunk.js": "/8447.37fff40af8864776d155.chunk.js", - "6654.chunk.js": "/6654.1a629783ec67ee7b2535.chunk.js", - "6610.chunk.js": "/6610.af47b6cda809af7dc878.chunk.js", - "9201.chunk.js": "/9201.3b4bde3431aac911f02e.chunk.js", - "7359.chunk.js": "/7359.47dc8a0852f6cefdf8e4.chunk.js", - "5451.chunk.js": "/5451.b7da2b924e4d74fa28fc.chunk.js", - "3564.chunk.js": "/3564.ba0e994ade7f97d72c01.chunk.js", - "4193.chunk.js": "/4193.f5c9a2d9750a5bd2762d.chunk.js", - "4324.chunk.js": "/4324.cbc343a58b942aec5218.chunk.js", - "597.chunk.js": "/597.f721ec431cd86411331e.chunk.js", - "editor.js": "/editor.b20cc786651a0c83801c.chunk.js", - "9020.chunk.js": "/9020.afb7f9501284f53ab885.chunk.js", - "npm.react.dom.js": "/npm.react.dom.6431597f0353cbef2a34.js", - "3173.js": "/3173.aedc1e477983499117c7.js", - "161.chunk.js": "/161.c33d27d7097fd45f278a.chunk.js", - "7514.chunk.js": "/7514.685fae6aee82518a9737.chunk.js", - "3241.chunk.js": "/3241.c7a7e5d69626a9fb46d7.chunk.js", - "1115.chunk.js": "/1115.6acb1d00b53342cf4a94.chunk.js", - "1193.chunk.js": "/1193.3f76ed755c2417f01c88.chunk.js", - "6613.css": "/6613.384da655707f4c3b6153.css", - "6613.chunk.js": "/6613.b8903cda67bd33100ce4.chunk.js", - "4581.chunk.js": "/4581.a60c1ffca04af99239c9.chunk.js", - "9851.chunk.js": "/9851.cd13a054c85cef198291.chunk.js", - "3018.chunk.js": "/3018.6eb82186a4656d2fce5d.chunk.js", - "5575.chunk.js": "/5575.f2affb99b534dc6b7f3c.chunk.js", - "6502.chunk.js": "/6502.7c1716799823661c447d.chunk.js", - "8663.chunk.js": "/8663.defe390dbe87f8ebb98f.chunk.js", - "6129.chunk.js": "/6129.b1dace954d671f303383.chunk.js", - "2008.chunk.js": "/2008.abd553afe7a6bed8cfc0.chunk.js", - "4915.chunk.js": "/4915.245eefea4f250bc84a58.chunk.js", - "5316.chunk.js": "/5316.0471244afc59c0d0d688.chunk.js", - "1655.chunk.js": "/1655.f1c01cc3ba8b07dd8fae.chunk.js", - "8837.chunk.js": "/8837.c7fd14cf3df616fdcc8f.chunk.js", - "6143.chunk.js": "/6143.43acacdf8b2b70da410f.chunk.js", - "3071.chunk.js": "/3071.91b1f856187aeafde398.chunk.js", + "6723.chunk.js": "/6723.080a1abe2d9291d1909e.chunk.js", + "4814.chunk.js": "/4814.4aa06ec12491f116137a.chunk.js", + "4744.chunk.js": "/4744.2f7f0a1a4febd58f933a.chunk.js", + "2934.chunk.js": "/2934.787f7366de588d5e4212.chunk.js", + "9594.css": "/9594.89070793921be1288bb5.css", + "9594.chunk.js": "/9594.b5b73051fd8e1b9901f1.chunk.js", + "3017.chunk.js": "/3017.7a600c57c76d95b7de22.chunk.js", + "88.chunk.js": "/88.fbc9567094081f35e29c.chunk.js", + "2713.chunk.js": "/2713.2f8abb33ec163445f81d.chunk.js", + "5969.chunk.js": "/5969.a5bd320bfdea9db1c60f.chunk.js", + "8102.chunk.js": "/8102.3b2b80fe00d67e577367.chunk.js", + "3283.chunk.js": "/3283.88c006d10b31f8ed8088.chunk.js", + "1729.chunk.js": "/1729.b8957abec2922839104a.chunk.js", + "6848.css": "/6848.89070793921be1288bb5.css", + "6848.chunk.js": "/6848.6bbd4e759ec15bf74d51.chunk.js", + "2337.chunk.js": "/2337.40d93ac60f1ccbb653ae.chunk.js", + "3253.css": "/3253.89070793921be1288bb5.css", + "3253.chunk.js": "/3253.48f218e54858427e997b.chunk.js", + "5112.chunk.js": "/5112.93d13acdc6a1c411810a.chunk.js", + "9886.chunk.js": "/9886.7d6c2ffca80bd567e52d.chunk.js", + "6914.chunk.js": "/6914.bd8de0fb338f16858f0f.chunk.js", + "8276.chunk.js": "/8276.cb877f5ee79184a9e0fb.chunk.js", + "6288.chunk.js": "/6288.3316ccd0d95f1d801d18.chunk.js", + "6610.chunk.js": "/6610.2be4959755f28fc7f452.chunk.js", + "9201.chunk.js": "/9201.7f3fc933d8e1520a2f6a.chunk.js", + "6347.chunk.js": "/6347.886fd82050c23f2e2db2.chunk.js", + "6876.chunk.js": "/6876.b74cba4369d8345b7f95.chunk.js", + "2357.chunk.js": "/2357.cd84c87ed27d878a2371.chunk.js", + "6181.chunk.js": "/6181.007ffc3728cc5a74130a.chunk.js", + "1452.chunk.js": "/1452.c2c843e34d17591ebafe.chunk.js", + "597.chunk.js": "/597.787ec8d8543dac902833.chunk.js", + "2785.chunk.js": "/2785.b5a9958703db6fd26399.chunk.js", + "editor.js": "/editor.ae2bfa9dd703149cdd28.chunk.js", + "9020.chunk.js": "/9020.618bce1feb9efd7ead50.chunk.js", + "npm.react.dom.js": "/npm.react.dom.443419261632ac7d4e78.js", + "2406.js": "/2406.f2927edc2116c8e03b3b.js", + "1277.chunk.js": "/1277.884a6e09fbb50c214d8e.chunk.js", + "7068.chunk.js": "/7068.5b656c774fade4cea185.chunk.js", + "285.chunk.js": "/285.40bdfb7ccf71eab2df71.chunk.js", + "7514.chunk.js": "/7514.ea3b186181ecff7f8ae5.chunk.js", + "3241.chunk.js": "/3241.fe7d3a8eca88df253a60.chunk.js", + "8459.css": "/8459.576da4e194a7e4007f03.css", + "8459.chunk.js": "/8459.add89d7bb0434b110cd3.chunk.js", + "498.chunk.js": "/498.8ac2616b1f317e54ddd6.chunk.js", + "923.chunk.js": "/923.00170bf7762cedfa762e.chunk.js", + "1193.chunk.js": "/1193.efd539c053944de2599b.chunk.js", + "5575.chunk.js": "/5575.d6c19cc08835f9f741cf.chunk.js", + "9893.chunk.js": "/9893.b40d1bffe447771a2e43.chunk.js", + "6502.chunk.js": "/6502.94c9d91d866d8a75769b.chunk.js", + "8663.chunk.js": "/8663.c6d53b400dd7461702e6.chunk.js", + "6129.chunk.js": "/6129.bd0a9b3cc7f4c6f95728.chunk.js", + "90.chunk.js": "/90.c0f1d633c6e943af5628.chunk.js", + "2773.chunk.js": "/2773.36292453483e1b60ea2b.chunk.js", + "9975.chunk.js": "/9975.e14f6e47cf3025af8628.chunk.js", + "3212.chunk.js": "/3212.ef9057f6ac28ed55d62a.chunk.js", + "5100.chunk.js": "/5100.70bf09de4a0223d97932.chunk.js", + "5756.chunk.js": "/5756.5bcfd00c65bb1f62ded1.chunk.js", "3981.css": "/3981.ccb665950325037c0dda.css", - "2097.chunk.js": "/2097.d9ade1233ce20401ea8c.chunk.js", - "2833.js": "/2833.78752757c7ac33d196dc.js", - "5623.js": "/5623.d08ebc475a57a44d926c.js", - "3495.js": "/3495.7af81a22f9d135da8cbe.js", - "5176.js": "/5176.9ecb50692b5be2b8a5e2.js", - "1282.js": "/1282.f65cc3329e7e3eb8e645.js", - "3032.js": "/3032.7b4a2db28af84cd77c29.js", - "4532.js": "/4532.0b0105ffbdd6db6f5d9a.js", - "7707.js": "/7707.d32bdcf8038b7eebaa97.js", - "4523.js": "/4523.e41d6aac9a6433f9efb2.js", - "4480.js": "/4480.acae0ad582eb5265622a.js", - "4890.js": "/4890.24af5fbe5015c0b06c90.js", + "281.chunk.js": "/281.93a7bf4372306eb81264.chunk.js", + "2533.js": "/2533.2b6849df953c0d8de977.js", + "6152.js": "/6152.d7e1e2bfdb9cb0ae978f.js", + "8099.js": "/8099.4838cb5caf85574b5c9d.js", + "4586.js": "/4586.b2ee472b3fc901d6c88b.js", + "1282.js": "/1282.8f29fe3a597e5ef695e5.js", + "9513.js": "/9513.68ac17c54e2a98d13112.js", + "5158.js": "/5158.76b96a61b88ac13e64fb.js", + "6187.js": "/6187.0b79ef0afe565349e1bc.js", "3D_PARTY_LICENSES.txt": "/3D_PARTY_LICENSES.txt", "static/site/pages/holding-page-503/holding-page-503.svg": "/static/site/pages/holding-page-503/holding-page-503.svg", "favicon.ico": "/favicon.ico", @@ -104,6 +113,7 @@ "static/img/logos/services/golang.svg": "/static/img/logos/services/golang.svg", "agent.html": "/agent.html", "local-agent.html": "/local-agent.html", + "static/img/new-dashboard.svg": "/static/img/new-dashboard.svg", "static/img/logos/services/activemq.svg": "/static/img/logos/services/activemq.svg", "static/img/logos/os/kubernetes.svg": "/static/img/logos/os/kubernetes.svg", "static/img/logos/services/kubernetes.svg": "/static/img/logos/services/kubernetes.svg", @@ -113,6 +123,7 @@ "static/email/img/isotype_600.png": "/static/email/img/isotype_600.png", "static/img/logos/services/irc.svg": "/static/img/logos/services/irc.svg", "static/img/logos/services/tomcat.svg": "/static/img/logos/services/tomcat.svg", + "registry-alert-redirect.html": "/registry-alert-redirect.html", "static/img/logos/services/mariadb.svg": "/static/img/logos/services/mariadb.svg", "static/img/logos/services/openzfs.svg": "/static/img/logos/services/openzfs.svg", "static/img/logos/services/veritas.svg": "/static/img/logos/services/veritas.svg", @@ -124,7 +135,6 @@ "static/img/logos/services/btrfs.svg": "/static/img/logos/services/btrfs.svg", "static/img/logos/services/adaptec.svg": "/static/img/logos/services/adaptec.svg", "static/img/logos/services/icecast.svg": "/static/img/logos/services/icecast.svg", - "registry-alert-redirect.html": "/registry-alert-redirect.html", "static/img/logos/services/xen.svg": "/static/img/logos/services/xen.svg", "static/img/logos/services/haproxy.svg": "/static/img/logos/services/haproxy.svg", "static/img/logos/services/tor.svg": "/static/img/logos/services/tor.svg", @@ -259,82 +269,96 @@ "static/site/pages/holding-page-503/reset.svg": "/static/site/pages/holding-page-503/reset.svg", "static/img/logos/services/twilio.svg": "/static/img/logos/services/twilio.svg", "static/img/logos/services/lighthttpd.svg": "/static/img/logos/services/lighthttpd.svg", + "static/img/logos/os/rocky.svg": "/static/img/logos/os/rocky.svg", "static/img/list-style-image.svg": "/static/img/list-style-image.svg", "static/img/logos/services/opensips.svg": "/static/img/logos/services/opensips.svg", "static/img/logos/services/logstash.svg": "/static/img/logos/services/logstash.svg", + ".well-known/assetlinks.json": "/.well-known/assetlinks.json", + "static/.well-known/assetlinks.json": "/static/.well-known/assetlinks.json", "static/img/mail/isotype.svg": "/static/img/mail/isotype.svg", "static/site/pages/holding-page-503/netdata-logo-white.svg": "/static/site/pages/holding-page-503/netdata-logo-white.svg", "static/img/logos/services/hub.svg": "/static/img/logos/services/hub.svg", - "app.css.map": "/app.0917ff2bf5d3b8b0678d.css.map", - "app.js.map": "/app.7bf3bd12482ad161443d.js.map", - "runtime.js.map": "/runtime.e3716b90b888609b7a5c.js.map", - "9360.chunk.js.map": "/9360.eda00d2b12ba6fe04e3e.chunk.js.map", - "6252.chunk.js.map": "/6252.c8a3dda4559b4b1a290f.chunk.js.map", - "4934.chunk.js.map": "/4934.565896e76ef20d10f992.chunk.js.map", - "6817.chunk.js.map": "/6817.a41c740ef4ad290ddc09.chunk.js.map", - "7241.chunk.js.map": "/7241.dae29a2c5dba9d8b64c6.chunk.js.map", - "8977.chunk.js.map": "/8977.1e728c5c7e9af0e0089b.chunk.js.map", - "8086.chunk.js.map": "/8086.9d0c359423067e788807.chunk.js.map", - "8282.chunk.js.map": "/8282.85c31db36364366177ab.chunk.js.map", - "9510.chunk.js.map": "/9510.dfc219c382691661c69a.chunk.js.map", + "apple-app-site-association": "/apple-app-site-association", + "static/apple-app-site-association": "/static/apple-app-site-association", + "app.css.map": "/app.cb2e9f9a81cf9533384e.css.map", + "app.js.map": "/app.efc4ebf75cd13cc4f5f4.js.map", + "runtime.js.map": "/runtime.e7f678759af562297436.js.map", + "9360.chunk.js.map": "/9360.de29630b4dcacbeb5ecd.chunk.js.map", + "3169.chunk.js.map": "/3169.9aa93f115e8a19171f69.chunk.js.map", + "4934.chunk.js.map": "/4934.3ffbe62edd432c5a801d.chunk.js.map", + "9895.chunk.js.map": "/9895.3d7f7d9acb001bf41011.chunk.js.map", + "6817.chunk.js.map": "/6817.d23c368d61fe2ff69315.chunk.js.map", + "1178.chunk.js.map": "/1178.b54a742702a74832f653.chunk.js.map", + "8977.chunk.js.map": "/8977.321a7a13b267546a6e7c.chunk.js.map", + "9768.chunk.js.map": "/9768.3e539b24a5eb4979ea85.chunk.js.map", + "7154.chunk.js.map": "/7154.b209cde4e1813acc2cd4.chunk.js.map", + "9510.chunk.js.map": "/9510.dec77b81a86e7cd2ff86.chunk.js.map", "6723.css.map": "/6723.cc9fa5f3bdc0bf3ab2fc.css.map", - "6723.chunk.js.map": "/6723.c82b4d5b9c7d8207b985.chunk.js.map", - "4814.chunk.js.map": "/4814.31d804681a19b084daa5.chunk.js.map", - "4744.chunk.js.map": "/4744.38c08ef7e8943fa44006.chunk.js.map", - "2934.chunk.js.map": "/2934.47ca322b2e59e64a0dae.chunk.js.map", - "2701.css.map": "/2701.89070793921be1288bb5.css.map", - "2701.chunk.js.map": "/2701.98a4d24406e365a6ddf2.chunk.js.map", - "6264.chunk.js.map": "/6264.900c132d66035feb8143.chunk.js.map", - "5969.chunk.js.map": "/5969.f77624ecac93d1a600f5.chunk.js.map", - "8102.chunk.js.map": "/8102.0d5c0d9f32667fc42e0c.chunk.js.map", - "5091.chunk.js.map": "/5091.07dfc76b1d5c1623c330.chunk.js.map", - "5765.chunk.js.map": "/5765.a33732202b95bbb627db.chunk.js.map", - "8447.chunk.js.map": "/8447.37fff40af8864776d155.chunk.js.map", - "6654.chunk.js.map": "/6654.1a629783ec67ee7b2535.chunk.js.map", - "6610.chunk.js.map": "/6610.af47b6cda809af7dc878.chunk.js.map", - "9201.chunk.js.map": "/9201.3b4bde3431aac911f02e.chunk.js.map", - "7359.chunk.js.map": "/7359.47dc8a0852f6cefdf8e4.chunk.js.map", - "5451.chunk.js.map": "/5451.b7da2b924e4d74fa28fc.chunk.js.map", - "3564.chunk.js.map": "/3564.ba0e994ade7f97d72c01.chunk.js.map", - "4193.chunk.js.map": "/4193.f5c9a2d9750a5bd2762d.chunk.js.map", - "4324.chunk.js.map": "/4324.cbc343a58b942aec5218.chunk.js.map", - "597.chunk.js.map": "/597.f721ec431cd86411331e.chunk.js.map", - "editor.chunk.js.map": "/editor.b20cc786651a0c83801c.chunk.js.map", - "9020.chunk.js.map": "/9020.afb7f9501284f53ab885.chunk.js.map", - "npm.react.dom.js.map": "/npm.react.dom.6431597f0353cbef2a34.js.map", - "3173.js.map": "/3173.aedc1e477983499117c7.js.map", - "161.chunk.js.map": "/161.c33d27d7097fd45f278a.chunk.js.map", - "7514.chunk.js.map": "/7514.685fae6aee82518a9737.chunk.js.map", - "3241.chunk.js.map": "/3241.c7a7e5d69626a9fb46d7.chunk.js.map", - "1115.chunk.js.map": "/1115.6acb1d00b53342cf4a94.chunk.js.map", - "1193.chunk.js.map": "/1193.3f76ed755c2417f01c88.chunk.js.map", - "6613.css.map": "/6613.384da655707f4c3b6153.css.map", - "6613.chunk.js.map": "/6613.b8903cda67bd33100ce4.chunk.js.map", - "4581.chunk.js.map": "/4581.a60c1ffca04af99239c9.chunk.js.map", - "9851.chunk.js.map": "/9851.cd13a054c85cef198291.chunk.js.map", - "3018.chunk.js.map": "/3018.6eb82186a4656d2fce5d.chunk.js.map", - "5575.chunk.js.map": "/5575.f2affb99b534dc6b7f3c.chunk.js.map", - "6502.chunk.js.map": "/6502.7c1716799823661c447d.chunk.js.map", - "8663.chunk.js.map": "/8663.defe390dbe87f8ebb98f.chunk.js.map", - "6129.chunk.js.map": "/6129.b1dace954d671f303383.chunk.js.map", - "2008.chunk.js.map": "/2008.abd553afe7a6bed8cfc0.chunk.js.map", - "4915.chunk.js.map": "/4915.245eefea4f250bc84a58.chunk.js.map", - "5316.chunk.js.map": "/5316.0471244afc59c0d0d688.chunk.js.map", - "1655.chunk.js.map": "/1655.f1c01cc3ba8b07dd8fae.chunk.js.map", - "8837.chunk.js.map": "/8837.c7fd14cf3df616fdcc8f.chunk.js.map", - "6143.chunk.js.map": "/6143.43acacdf8b2b70da410f.chunk.js.map", - "3071.chunk.js.map": "/3071.91b1f856187aeafde398.chunk.js.map", + "6723.chunk.js.map": "/6723.080a1abe2d9291d1909e.chunk.js.map", + "4814.chunk.js.map": "/4814.4aa06ec12491f116137a.chunk.js.map", + "4744.chunk.js.map": "/4744.2f7f0a1a4febd58f933a.chunk.js.map", + "2934.chunk.js.map": "/2934.787f7366de588d5e4212.chunk.js.map", + "9594.css.map": "/9594.89070793921be1288bb5.css.map", + "9594.chunk.js.map": "/9594.b5b73051fd8e1b9901f1.chunk.js.map", + "3017.chunk.js.map": "/3017.7a600c57c76d95b7de22.chunk.js.map", + "88.chunk.js.map": "/88.fbc9567094081f35e29c.chunk.js.map", + "2713.chunk.js.map": "/2713.2f8abb33ec163445f81d.chunk.js.map", + "5969.chunk.js.map": "/5969.a5bd320bfdea9db1c60f.chunk.js.map", + "8102.chunk.js.map": "/8102.3b2b80fe00d67e577367.chunk.js.map", + "3283.chunk.js.map": "/3283.88c006d10b31f8ed8088.chunk.js.map", + "1729.chunk.js.map": "/1729.b8957abec2922839104a.chunk.js.map", + "6848.css.map": "/6848.89070793921be1288bb5.css.map", + "6848.chunk.js.map": "/6848.6bbd4e759ec15bf74d51.chunk.js.map", + "2337.chunk.js.map": "/2337.40d93ac60f1ccbb653ae.chunk.js.map", + "3253.css.map": "/3253.89070793921be1288bb5.css.map", + "3253.chunk.js.map": "/3253.48f218e54858427e997b.chunk.js.map", + "5112.chunk.js.map": "/5112.93d13acdc6a1c411810a.chunk.js.map", + "9886.chunk.js.map": "/9886.7d6c2ffca80bd567e52d.chunk.js.map", + "6914.chunk.js.map": "/6914.bd8de0fb338f16858f0f.chunk.js.map", + "8276.chunk.js.map": "/8276.cb877f5ee79184a9e0fb.chunk.js.map", + "6288.chunk.js.map": "/6288.3316ccd0d95f1d801d18.chunk.js.map", + "6610.chunk.js.map": "/6610.2be4959755f28fc7f452.chunk.js.map", + "9201.chunk.js.map": "/9201.7f3fc933d8e1520a2f6a.chunk.js.map", + "6347.chunk.js.map": "/6347.886fd82050c23f2e2db2.chunk.js.map", + "6876.chunk.js.map": "/6876.b74cba4369d8345b7f95.chunk.js.map", + "2357.chunk.js.map": "/2357.cd84c87ed27d878a2371.chunk.js.map", + "6181.chunk.js.map": "/6181.007ffc3728cc5a74130a.chunk.js.map", + "1452.chunk.js.map": "/1452.c2c843e34d17591ebafe.chunk.js.map", + "597.chunk.js.map": "/597.787ec8d8543dac902833.chunk.js.map", + "2785.chunk.js.map": "/2785.b5a9958703db6fd26399.chunk.js.map", + "editor.chunk.js.map": "/editor.ae2bfa9dd703149cdd28.chunk.js.map", + "9020.chunk.js.map": "/9020.618bce1feb9efd7ead50.chunk.js.map", + "npm.react.dom.js.map": "/npm.react.dom.443419261632ac7d4e78.js.map", + "2406.js.map": "/2406.f2927edc2116c8e03b3b.js.map", + "1277.chunk.js.map": "/1277.884a6e09fbb50c214d8e.chunk.js.map", + "7068.chunk.js.map": "/7068.5b656c774fade4cea185.chunk.js.map", + "285.chunk.js.map": "/285.40bdfb7ccf71eab2df71.chunk.js.map", + "7514.chunk.js.map": "/7514.ea3b186181ecff7f8ae5.chunk.js.map", + "3241.chunk.js.map": "/3241.fe7d3a8eca88df253a60.chunk.js.map", + "8459.css.map": "/8459.576da4e194a7e4007f03.css.map", + "8459.chunk.js.map": "/8459.add89d7bb0434b110cd3.chunk.js.map", + "498.chunk.js.map": "/498.8ac2616b1f317e54ddd6.chunk.js.map", + "923.chunk.js.map": "/923.00170bf7762cedfa762e.chunk.js.map", + "1193.chunk.js.map": "/1193.efd539c053944de2599b.chunk.js.map", + "5575.chunk.js.map": "/5575.d6c19cc08835f9f741cf.chunk.js.map", + "9893.chunk.js.map": "/9893.b40d1bffe447771a2e43.chunk.js.map", + "6502.chunk.js.map": "/6502.94c9d91d866d8a75769b.chunk.js.map", + "8663.chunk.js.map": "/8663.c6d53b400dd7461702e6.chunk.js.map", + "6129.chunk.js.map": "/6129.bd0a9b3cc7f4c6f95728.chunk.js.map", + "90.chunk.js.map": "/90.c0f1d633c6e943af5628.chunk.js.map", + "2773.chunk.js.map": "/2773.36292453483e1b60ea2b.chunk.js.map", + "9975.chunk.js.map": "/9975.e14f6e47cf3025af8628.chunk.js.map", + "3212.chunk.js.map": "/3212.ef9057f6ac28ed55d62a.chunk.js.map", + "5100.chunk.js.map": "/5100.70bf09de4a0223d97932.chunk.js.map", + "5756.chunk.js.map": "/5756.5bcfd00c65bb1f62ded1.chunk.js.map", "3981.css.map": "/3981.ccb665950325037c0dda.css.map", - "2097.chunk.js.map": "/2097.d9ade1233ce20401ea8c.chunk.js.map", - "2833.js.map": "/2833.78752757c7ac33d196dc.js.map", - "5623.js.map": "/5623.d08ebc475a57a44d926c.js.map", - "3495.js.map": "/3495.7af81a22f9d135da8cbe.js.map", - "5176.js.map": "/5176.9ecb50692b5be2b8a5e2.js.map", - "1282.js.map": "/1282.f65cc3329e7e3eb8e645.js.map", - "3032.js.map": "/3032.7b4a2db28af84cd77c29.js.map", - "4532.js.map": "/4532.0b0105ffbdd6db6f5d9a.js.map", - "7707.js.map": "/7707.d32bdcf8038b7eebaa97.js.map", - "4523.js.map": "/4523.e41d6aac9a6433f9efb2.js.map", - "4480.js.map": "/4480.acae0ad582eb5265622a.js.map", - "4890.js.map": "/4890.24af5fbe5015c0b06c90.js.map" + "281.chunk.js.map": "/281.93a7bf4372306eb81264.chunk.js.map", + "2533.js.map": "/2533.2b6849df953c0d8de977.js.map", + "6152.js.map": "/6152.d7e1e2bfdb9cb0ae978f.js.map", + "8099.js.map": "/8099.4838cb5caf85574b5c9d.js.map", + "4586.js.map": "/4586.b2ee472b3fc901d6c88b.js.map", + "1282.js.map": "/1282.8f29fe3a597e5ef695e5.js.map", + "9513.js.map": "/9513.68ac17c54e2a98d13112.js.map", + "5158.js.map": "/5158.76b96a61b88ac13e64fb.js.map", + "6187.js.map": "/6187.0b79ef0afe565349e1bc.js.map" } \ No newline at end of file diff --git a/web/gui/v2/app.0917ff2bf5d3b8b0678d.css b/web/gui/v2/app.cb2e9f9a81cf9533384e.css similarity index 64% rename from web/gui/v2/app.0917ff2bf5d3b8b0678d.css rename to web/gui/v2/app.cb2e9f9a81cf9533384e.css index c4fd6e2c6a19fa..1dd1701783d8a3 100644 --- a/web/gui/v2/app.0917ff2bf5d3b8b0678d.css +++ b/web/gui/v2/app.cb2e9f9a81cf9533384e.css @@ -1,2 +1,2 @@ -html{font-size:16px !important}body{margin:0}.react-grid-item{display:block;overflow-y:hidden;text-overflow:ellipsis;width:fit-content}.react-grid-item:hover{overflow-y:visible}.react-grid-item .netdata-container-with-legend{overflow:visible}.react-grid-item:hover{z-index:10}.react-grid-item>.react-resizable-handle::after{display:table-row;content:"";position:absolute;right:3px;bottom:3px;width:5px;height:5px;cursor:nwse-resize;border-right:1px solid rgba(0,0,0,0.4);border-bottom:1px solid rgba(0,0,0,0.4)}.ReactVirtualized__Grid{padding-right:8px}#hs-eu-cookie-confirmation{display:none} +html{font-size:16px !important}body{margin:0}.react-grid-item{display:block;overflow-y:hidden;text-overflow:ellipsis;width:fit-content}.react-grid-item:hover{overflow-y:visible}.react-grid-item .netdata-container-with-legend{overflow:visible}.react-grid-item:hover{z-index:10}.react-grid-item>.react-resizable-handle::after{display:table-row;content:"";position:absolute;right:3px;bottom:3px;width:5px;height:5px;cursor:se-resize;border-right:1px solid rgba(0,0,0,0.4);border-bottom:1px solid rgba(0,0,0,0.4)}#hs-eu-cookie-confirmation{display:none} diff --git a/web/gui/v2/app.efc4ebf75cd13cc4f5f4.js b/web/gui/v2/app.efc4ebf75cd13cc4f5f4.js new file mode 100644 index 00000000000000..667560e5ea1475 --- /dev/null +++ b/web/gui/v2/app.efc4ebf75cd13cc4f5f4.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="731bd492-719a-4a98-bcee-a04eeff9b5fe",e._sentryDebugIdIdentifier="sentry-dbid-731bd492-719a-4a98-bcee-a04eeff9b5fe")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[2143],{64637:function(e,t,n){"use strict";n.d(t,{a:function(){return o},db:function(){return i},fm:function(){return a},h2:function(){return u},lV:function(){return c}});n(47042),n(74916),n(15306),n(73210);var r=n(53087),o=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",t=arguments.length>1?arguments[1]:void 0;return(0,r.default)(e,t)},a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",t=arguments.length>1&&void 0!==arguments[1]&&arguments[1]?e.toLowerCase():e;return t.charAt(0).toUpperCase()+t.slice(1)},i=function(e){return e.replace(/([a-z])([A-Z])/g,"$1 $2")},u=function(e){return e.replace(/([^:]\/)\/+/g,"$1")},c=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return e=(e=(e=(e=e.trim()).replace(/\s+/g,"-")).replace(/[^a-zA-Z0-9-]/g,"")).toLowerCase()}},63346:function(e,t,n){"use strict";n.d(t,{O$:function(){return v},Ht:function(){return h},Xe:function(){return b},ZP:function(){return y}});var r=n(29439),o=n(4942),a=n(45987),i=(n(69826),n(41539),n(31672),n(2490),n(59461),n(91038),n(78783),n(38862),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(67294)),u=n(54005),c=n(87462),s=n(16294),l=["children","logImpression","options","delay"];function f(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function d(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:0;return new Date(new Date(e).getTime()+60*parseInt(t)*60*1e3)},h=function(e,t){var n=t.locale,r=(0,o.Z)(t,l);return new Intl.DateTimeFormat(null!==n&&void 0!==n?n:navigator.language,function(e){var t=e.long,n=e.isTime,r=e.secs,a=e.timezone,i=(0,o.Z)(e,s);return d(d(d({hourCycle:"h23"},n?{}:t?{weekday:"short",year:"numeric",month:"short",day:"2-digit"}:{dateStyle:"short"}),n&&{timeStyle:r?"medium":"short"}),{},{timeZone:a},i)}(r)).format(e)},y=function(){var e=function(){var e=(0,u.m$)().utc;return(0,c.E)("default",e)}(),t=e.utc,n=e.offset;return{localeDateString:(0,a.useMemo)((function(){return p?function(e,n){return h(e,d({long:!0,timezone:t},n))}:m}),[t]),localeTimeString:(0,a.useMemo)((function(){return p?function(e,n){return h(e,d({secs:!0,isTime:!0,timezone:t},n))}:g}),[t]),utcOffset:n}}},41331:function(e,t,n){"use strict";n.d(t,{hX:function(){return o}});var r=n(71002),o=(n(41539),n(68216),n(2490),n(79433),n(91038),n(78783),n(57327),n(88449),n(59849),n(26699),n(32023),n(47941),function(e,t){var n=t.omit,r=void 0===n?[]:n,o=t.keep,a=void 0===o?[]:o;return r.length||a.length?e.filter((function(e){return a.length?a.includes(e):!r.includes(e)})):e});t.ZP=function e(t,n){var a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if(t===n)return!0;if("object"!==(0,r.Z)(t)||null===t||"object"!==(0,r.Z)(n)||null===n)return!1;var i=o(Object.keys(t),a),u=o(Object.keys(n),a);if(i.length!==u.length)return!1;var c=Object.prototype.hasOwnProperty.bind(n);return i.every((function(r){return!!c(r)&&e(t[r],n[r])}))}},38609:function(e,t,n){"use strict";var r=n(71002),o=n(93433),a=(n(85827),n(41539),n(25387),n(2490),n(72608),n(66992),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(47941),n(41331)),i=function(e,t,n){var r=(0,a.hX)(e,n);return r.reduce((function(e,t,o){return e.push(c(t,r[o],n)),e}),[])},u=function(e,t,n){var r=(0,a.hX)((0,o.Z)(new Set([].concat((0,o.Z)(Object.keys(e)),(0,o.Z)(Object.keys(t))))),n),i=Object.prototype.hasOwnProperty.bind(t);return r.reduce((function(r,o){return i(o)?r[o]=c(e[o],t[o],n):r[o]=e[o],r}),{})},c=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};return e===t||"object"!==(0,r.Z)(e)||null===e||"object"!==(0,r.Z)(t)||null===t?t:Array.isArray(t)?i(e,0,n):u(e,t,n)};t.Z=c},58591:function(e,t,n){"use strict";n.d(t,{bp:function(){return g},kG:function(){return p},m$:function(){return f},np:function(){return m},xs:function(){return v}});var r=n(29439),o=(n(74916),n(23123),n(85827),n(41539),n(25387),n(2490),n(72608),n(82772),n(69720),n(21249),n(57640),n(9924),n(92222),n(59242)),a=n.n(o),i=n(82492),u=n.n(i),c=n(57557),s=n.n(c),l=/[&;]/,f=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:decodeURIComponent(window.location.hash.substr(1));return 0===e.length?{}:e.split(l).reduce((function(e,t){var n=t.indexOf("=");-1!==n&&(e[t.substring(0,n)]=t.substring(n+1));return e}),{})},d=function(e){var t=Object.entries(e);return 0===t.length?"":t.map((function(e){var t=(0,r.Z)(e,2),n=t[0],o=t[1];return"".concat(n,"=").concat(encodeURIComponent(o))})).join("&")},p=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:decodeURIComponent(window.location.hash.substr(1)),n=s()(f(t),e);return d(n)},v=(a()([f,d]),function(e){var t=f();u()(t,e);var n="#".concat(d(t)),r=history.state;location.hash=n,history.replaceState(r,"",n)}),m=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:decodeURIComponent(window.location.hash.substr(1));return f(t)[e]},g=function(e){history.replaceState(history.state,"","#".concat(p(e)))}},78710:function(e,t,n){"use strict";n.d(t,{xZ:function(){return y},Fb:function(){return h},aD:function(){return b},HM:function(){return g},Xh:function(){return m},ZP:function(){return w},vT:function(){return S},iM:function(){return d},Ly:function(){return O},tj:function(){return p},se:function(){return v}});var r,o,a,i=n(4942),u=(n(74916),n(15306),n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(75674)),c=n(64637);function s(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function l(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"",n="refreshed-after-lazy-import",o=t?"".concat(t,"-").concat(n):n;return(0,i.lazy)((0,r.Z)(a().mark((function t(){var n,r;return a().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return n=JSON.parse(window.sessionStorage.getItem(o)||"false"),t.prev=1,t.next=4,e();case 4:return r=t.sent,window.sessionStorage.setItem(o,"false"),t.abrupt("return",r);case 9:if(t.prev=9,t.t0=t.catch(1),n){t.next=14;break}return window.sessionStorage.setItem(o,"true"),t.abrupt("return",window.location.reload());case 14:throw t.t0;case 15:case"end":return t.stop()}}),t,null,[[1,9]])}))))}},66152:function(e,t,n){"use strict";n.d(t,{Tg:function(){return v},k5:function(){return p},v:function(){return d}});n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(71002),o=n(4942);n(74916),n(15306),n(21249),n(57640),n(9924),n(85827),n(41539),n(25387),n(2490),n(72608),n(47941),n(26699),n(32023);function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},r=n.omit,o=n.depth;return f(t,{func:e,action:c,omit:r,depth:o})},p=function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=n.omit,o=n.depth;return f(t,{func:e,action:s,omit:r,depth:o})},v=function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=n.omit,o=n.depth;return f(t,{func:e,action:l,omit:r,depth:o})}},26398:function(e,t,n){"use strict";n.d(t,{Z:function(){return m}});var r=n(4942),o=n(29439),a=(n(74916),n(77601),n(47941),n(4723),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(52861)),i=n(46189),u=n(78710),c=(n(15306),n(92222),n(64765),n(64637)),s=(n(26699),n(32023),function(e,t,n){var r=n.includes("join-callback")?n:encodeURIComponent(n);return"".concat(t).concat(t?"&":"?","cloudRoute=").concat(e).concat(r)});function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function f(e){for(var t=1;t1&&void 0!==arguments[1]&&arguments[1];return function(e){return function(){for(var t=arguments.length,n=new Array(t),r=0;r1?l-1:0),m=1;m1&&void 0!==arguments[1]?arguments[1]:new Date)-e;if(t<0)return"0 seconds ago";var r=Math.floor(t/864e5);if(r>1)return"".concat(r," days ago");var o=Math.floor(t/n);if(o>0)return"".concat(o," hours ago");var a=Math.floor(t/6e4);if(a>1)return"".concat(a," mins ago");var i=Math.floor(t/1e3);return"".concat(i," seconds ago")}},91008:function(e,t,n){"use strict";var r,o=n(87462),a=n(45987),i=(n(92222),n(67294)),u=n(79655),c=n(71893),s=n(93416),l=n(82351),f=["alignItems","disabledColor","color","flavour","hoverColor","visitedColor","gap","strong"],d=["Component","as","disabled","onClick","to","href","isBasic"],p=["boxProps","showToolTip","content","align","isBasic"],v={default:{initial:"primary",visited:"accent",hover:"primary"},tableLink:{initial:"text",visited:"text",hover:"primary"}},m=function(e){return(0,c.default)(e).withConfig({displayName:"anchor",componentId:"sc-oaxxs6-0"})(["",""],(function(e){var t=e.disabled,n=e.color,r=e.hoverColor,o=e.disabledColor,a=e.visitedColor,i=e.theme,u=e.flavour,c=void 0===u?"default":u;return t?function(e){var t,n,r,o=e.disabledColor,a=e.color,i=e.theme,u=e.flavour,c=e.visitedColor;return"\n color: ".concat((0,s.getColor)(o||a||(null===(t=v[u])||void 0===t?void 0:t.initial))({theme:i}),";\n ").concat(o?"":"opacity: 0.4;","\n pointer-events: none;\n\n & > svg > use {\n fill: ").concat((0,s.getColor)(o||a||(null===(n=v[u])||void 0===n?void 0:n.initial))({theme:i}),";\n }\n \n &:visited {\n color: ").concat((0,s.getColor)(c||o||a||(null===(r=v[u])||void 0===r?void 0:r.visited))({theme:i}),";\n }\n")}({disabledColor:o,color:n,theme:i,flavour:c}):function(e){var t,n,r,o,a,i,u=e.color,c=e.theme,l=e.hoverColor,f=e.visitedColor,d=e.flavour;return"\n color: ".concat((0,s.getColor)(u||(null===(t=v[d])||void 0===t?void 0:t.initial))({theme:c}),";\n & > svg > use {\n fill: ").concat((0,s.getColor)(u||(null===(n=v[d])||void 0===n?void 0:n.initial))({theme:c}),";\n }\n &:hover {\n color: ").concat((0,s.getColor)(l||(null===(r=v[d])||void 0===r?void 0:r.hover))({theme:c})," !important;\n & > svg > use {\n fill: ").concat((0,s.getColor)(l||(null===(o=v[d])||void 0===o?void 0:o.hover))({theme:c})," !important;\n }\n }\n &:visited {\n color: ").concat((0,s.getColor)(f||u||(null===(a=v[d])||void 0===a?void 0:a.visited))({theme:c}),";\n & > svg > use {\n fill: ").concat((0,s.getColor)(f||u||(null===(i=v[d])||void 0===i?void 0:i.visited))({theme:c}),";\n }\n }\n cursor:pointer\n")}({color:n,theme:i,hoverColor:r,visitedColor:a,flavour:c})}))},g=m("a"),b=m((0,i.forwardRef)((function(e,t){e.alignItems,e.disabledColor,e.color,e.flavour,e.hoverColor,e.visitedColor,e.gap,e.strong;var n=(0,a.Z)(e,f);return i.createElement(u.rU,(0,o.Z)({ref:t},n))}))),h=function(e){return e.preventDefault()};t.Z=(r=function(e){var t=e.Component,n=void 0===t?s.Text:t,r=e.as,u=void 0===r?"a":r,c=e.disabled,l=e.onClick,f=e.to,p=e.href,v=(e.isBasic,(0,a.Z)(e,d));return i.createElement(n,(0,o.Z)({as:"a"===u?g:b,disabled:c,onClick:c?h:l},f&&!c&&{to:f},p&&!c&&{href:p},v))},function(e){var t=e.boxProps,n=e.showToolTip,o=e.content,u=e.align,c=e.isBasic,f=(0,a.Z)(e,p);return n?i.createElement(l.Z,{plain:!0,content:o,align:u,isBasic:c},i.createElement(s.Box,t,i.createElement(r,f))):i.createElement(r,f)})},92501:function(e,t,n){"use strict";n.d(t,{NY:function(){return p},Pf:function(){return g}});n(41539),n(81299),n(12419);var r=n(15671),o=n(43144),a=n(60136),i=n(82963),u=n(61120),c=n(24975),s=n(67294),l=n(93416),f=n(91008);function d(e){var t=function(){if("undefined"===typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"===typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var n,r=(0,u.Z)(e);if(t){var o=(0,u.Z)(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return(0,i.Z)(this,n)}}var p=function(e){var t=e.title,n=void 0===t?"Oops, something unexpected happened!":t,r=e.message,o=void 0===r?"":r;return s.createElement(l.Flex,{alignItems:"center",justifyContent:"center","data-testid":"error-boundary",flex:!0},s.createElement(l.Flex,{flex:!0,column:!0,alignItems:"center",gap:4,width:{max:115}},s.createElement(l.H3,null,n),!!o&&s.createElement(l.Text,null,o),s.createElement(l.Text,null,"We track these errors automatically, but if the problem persists feel free to contact us with a"," ",s.createElement(f.Z,{href:"https://github.com/netdata/netdata-cloud/issues/new/choose",target:"_blank",rel:"noopener noreferrer"},"ticket"),", a"," ",s.createElement(f.Z,{href:"https://community.netdata.cloud/",target:"_blank",rel:"noopener noreferrer"},"post in the forum")," ","or through"," ",s.createElement(f.Z,{href:"https://discord.com/invite/mPZ6WZKKG2",target:"_blank",rel:"noopener noreferrer"},"Discord"),"."),s.createElement(l.Text,null,"In the meantime you can refresh this page"),s.createElement(l.Button,{label:"Refresh",icon:"refresh",onClick:function(){return window.location.reload()},"data-ga":"error-boundary::click-reload::reload-on-error","data-testid":"error-boundary-reload"})))},v=function(e){(0,a.Z)(n,e);var t=d(n);function n(e){var o;return(0,r.Z)(this,n),(o=t.call(this,e)).state={hasError:!1},o}return(0,o.Z)(n,[{key:"componentDidCatch",value:function(e,t){console.log(e,t)}},{key:"render",value:function(){return this.state.hasError?s.createElement(p,null):this.props.children}}],[{key:"getDerivedStateFromError",value:function(e){return{hasError:!0}}}]),n}(s.Component),m=function(e){(0,a.Z)(n,e);var t=d(n);function n(){return(0,r.Z)(this,n),t.apply(this,arguments)}return(0,o.Z)(n)}(c.SV);m.defaultProps={fallback:p};var g=function(e){var t=window.envSettings.tracking?m:v;return function(n){return s.createElement(t,{fallback:p},s.createElement(e,n))}};t.ZP=m},14600:function(e,t,n){"use strict";n.d(t,{E:function(){return v},K:function(){return d}});var r=n(45987),o=n(4942),a=n(29439),i=(n(92222),n(41539),n(39714),n(74916),n(15306),n(85827),n(25387),n(2490),n(72608),n(4723),n(15581),n(34514),n(54747),n(2707),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(49337),n(33321),n(69070),[{value:"Dateline Standard Time",abbr:"DST",text:"International Date Line West",utc:["Etc/GMT+12"]},{value:"UTC-11",abbr:"U",text:"Coordinated Universal Time-11",utc:["Etc/GMT+11","Pacific/Midway","Pacific/Niue","Pacific/Pago_Pago"]},{value:"Hawaiian Standard Time",abbr:"HST",text:"Hawaii",utc:["Etc/GMT+10","Pacific/Honolulu","Pacific/Johnston","Pacific/Rarotonga","Pacific/Tahiti"]},{value:"Alaskan Standard Time",abbr:"AKDT",text:"Alaska",utc:["America/Anchorage","America/Juneau","America/Nome","America/Sitka","America/Yakutat"]},{value:"Pacific Standard Time (Mexico)",abbr:"PDT",text:"Baja California",utc:["America/Santa_Isabel"]},{value:"Pacific Standard Time",abbr:"PST",text:"Pacific Time (US & Canada)",utc:["America/Los_Angeles","America/Dawson","America/Tijuana","America/Vancouver","America/Whitehorse","PST8PDT"]},{value:"US Mountain Standard Time",abbr:"UMST",text:"Arizona",utc:["America/Creston","America/Dawson_Creek","America/Hermosillo","America/Phoenix","Etc/GMT+7"]},{value:"Mountain Standard Time (Mexico)",abbr:"MDT",text:"Chihuahua, La Paz, Mazatlan",utc:["America/Chihuahua","America/Mazatlan"]},{value:"Mountain Standard Time",abbr:"MDT",text:"Mountain Time (US & Canada)",utc:["America/Boise","America/Cambridge_Bay","America/Denver","America/Edmonton","America/Inuvik","America/Ojinaga","America/Yellowknife","MST7MDT"]},{value:"Central America Standard Time",abbr:"CAST",text:"Central America",utc:["America/Belize","America/Costa_Rica","America/El_Salvador","America/Guatemala","America/Managua","America/Tegucigalpa","Etc/GMT+6","Pacific/Galapagos"]},{value:"Central Standard Time",abbr:"CDT",text:"Central Time (US & Canada)",utc:["America/Chicago","America/Indiana/Knox","America/Indiana/Tell_City","America/Matamoros","America/Menominee","America/North_Dakota/Beulah","America/North_Dakota/Center","America/North_Dakota/New_Salem","America/Rainy_River","America/Rankin_Inlet","America/Resolute","America/Winnipeg","CST6CDT"]},{value:"Central Standard Time (Mexico)",abbr:"CDT",text:"Guadalajara, Mexico City, Monterrey",utc:["America/Bahia_Banderas","America/Cancun","America/Merida","America/Mexico_City","America/Monterrey"]},{value:"Canada Central Standard Time",abbr:"CCST",text:"Saskatchewan",utc:["America/Regina","America/Swift_Current"]},{value:"SA Pacific Standard Time",abbr:"SPST",text:"Bogota, Lima, Quito",utc:["America/Bogota","America/Cayman","America/Coral_Harbour","America/Eirunepe","America/Guayaquil","America/Jamaica","America/Lima","America/Panama","America/Rio_Branco","Etc/GMT+5"]},{value:"Eastern Standard Time",abbr:"EDT",text:"Eastern Time (US & Canada)",utc:["America/Detroit","America/Havana","America/Indiana/Petersburg","America/Indiana/Vincennes","America/Indiana/Winamac","America/Iqaluit","America/Kentucky/Monticello","America/Louisville","America/Montreal","America/Nassau","America/New_York","America/Nipigon","America/Pangnirtung","America/Port-au-Prince","America/Thunder_Bay","America/Toronto","EST5EDT"]},{value:"US Eastern Standard Time",abbr:"UEDT",text:"Indiana (East)",utc:["America/Indiana/Marengo","America/Indiana/Vevay","America/Indianapolis"]},{value:"Venezuela Standard Time",abbr:"VST",text:"Caracas",utc:["America/Caracas"]},{value:"Paraguay Standard Time",abbr:"PYT",text:"Asuncion",utc:["America/Asuncion"]},{value:"Atlantic Standard Time",abbr:"ADT",text:"Atlantic Time (Canada)",utc:["America/Glace_Bay","America/Goose_Bay","America/Halifax","America/Moncton","America/Thule","Atlantic/Bermuda"]},{value:"Central Brazilian Standard Time",abbr:"CBST",text:"Cuiaba",utc:["America/Campo_Grande","America/Cuiaba"]},{value:"SA Western Standard Time",abbr:"SWST",text:"Georgetown, La Paz, Manaus, San Juan",utc:["America/Anguilla","America/Antigua","America/Aruba","America/Barbados","America/Blanc-Sablon","America/Boa_Vista","America/Curacao","America/Dominica","America/Grand_Turk","America/Grenada","America/Guadeloupe","America/Guyana","America/Kralendijk","America/La_Paz","America/Lower_Princes","America/Manaus","America/Marigot","America/Martinique","America/Montserrat","America/Port_of_Spain","America/Porto_Velho","America/Puerto_Rico","America/Santo_Domingo","America/St_Barthelemy","America/St_Kitts","America/St_Lucia","America/St_Thomas","America/St_Vincent","America/Tortola","Etc/GMT+4"]},{value:"Pacific SA Standard Time",abbr:"PSST",text:"Santiago",utc:["America/Santiago","Antarctica/Palmer"]},{value:"Newfoundland Standard Time",abbr:"NDT",text:"Newfoundland",utc:["America/St_Johns"]},{value:"E. South America Standard Time",abbr:"ESAST",text:"Brasilia",utc:["America/Sao_Paulo"]},{value:"Argentina Standard Time",abbr:"AST",text:"Buenos Aires",utc:["America/Argentina/La_Rioja","America/Argentina/Rio_Gallegos","America/Argentina/Salta","America/Argentina/San_Juan","America/Argentina/San_Luis","America/Argentina/Tucuman","America/Argentina/Ushuaia","America/Buenos_Aires","America/Catamarca","America/Cordoba","America/Jujuy","America/Mendoza"]},{value:"SA Eastern Standard Time",abbr:"SEST",text:"Cayenne, Fortaleza",utc:["America/Araguaina","America/Belem","America/Cayenne","America/Fortaleza","America/Maceio","America/Paramaribo","America/Recife","America/Santarem","Antarctica/Rothera","Atlantic/Stanley","Etc/GMT+3"]},{value:"Greenland Standard Time",abbr:"GDT",text:"Greenland",utc:["America/Godthab"]},{value:"Montevideo Standard Time",abbr:"MST",text:"Montevideo",utc:["America/Montevideo"]},{value:"Bahia Standard Time",abbr:"BST",text:"Salvador",utc:["America/Bahia"]},{value:"UTC-02",abbr:"U",text:"Coordinated Universal Time-02",utc:["America/Noronha","Atlantic/South_Georgia","Etc/GMT+2"]},{value:"Mid-Atlantic Standard Time",abbr:"MDT",text:"Mid-Atlantic - Old",utc:[]},{value:"Azores Standard Time",abbr:"ADT",text:"Azores",utc:["America/Scoresbysund","Atlantic/Azores"]},{value:"Cape Verde Standard Time",abbr:"CVST",text:"Cape Verde Is.",utc:["Atlantic/Cape_Verde","Etc/GMT+1"]},{value:"Morocco Standard Time",abbr:"MDT",text:"Casablanca",utc:["Africa/Casablanca","Africa/El_Aaiun"]},{value:"UTC",abbr:"UTC",text:"Coordinated Universal Time",utc:["America/Danmarkshavn","Etc/GMT"]},{value:"GMT Standard Time",abbr:"GMT",text:"Edinburgh, London",utc:["Europe/Isle_of_Man","Europe/Guernsey","Europe/Jersey","Europe/London"]},{value:"GMT Standard Time",abbr:"GDT",text:"Dublin, Lisbon",utc:["Atlantic/Canary","Atlantic/Faeroe","Atlantic/Madeira","Europe/Dublin","Europe/Lisbon"]},{value:"Greenwich Standard Time",abbr:"GST",text:"Monrovia, Reykjavik",utc:["Africa/Abidjan","Africa/Accra","Africa/Bamako","Africa/Banjul","Africa/Bissau","Africa/Conakry","Africa/Dakar","Africa/Freetown","Africa/Lome","Africa/Monrovia","Africa/Nouakchott","Africa/Ouagadougou","Africa/Sao_Tome","Atlantic/Reykjavik","Atlantic/St_Helena"]},{value:"W. Europe Standard Time",abbr:"WEDT",text:"Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna",utc:["Arctic/Longyearbyen","Europe/Amsterdam","Europe/Andorra","Europe/Berlin","Europe/Busingen","Europe/Gibraltar","Europe/Luxembourg","Europe/Malta","Europe/Monaco","Europe/Oslo","Europe/Rome","Europe/San_Marino","Europe/Stockholm","Europe/Vaduz","Europe/Vatican","Europe/Vienna","Europe/Zurich"]},{value:"Central Europe Standard Time",abbr:"CEDT",text:"Belgrade, Bratislava, Budapest, Ljubljana, Prague",utc:["Europe/Belgrade","Europe/Bratislava","Europe/Budapest","Europe/Ljubljana","Europe/Podgorica","Europe/Prague","Europe/Tirane"]},{value:"Romance Standard Time",abbr:"RDT",text:"Brussels, Copenhagen, Madrid, Paris",utc:["Africa/Ceuta","Europe/Brussels","Europe/Copenhagen","Europe/Madrid","Europe/Paris"]},{value:"Central European Standard Time",abbr:"CEDT",text:"Sarajevo, Skopje, Warsaw, Zagreb",utc:["Europe/Sarajevo","Europe/Skopje","Europe/Warsaw","Europe/Zagreb"]},{value:"W. Central Africa Standard Time",abbr:"WCAST",text:"West Central Africa",utc:["Africa/Algiers","Africa/Bangui","Africa/Brazzaville","Africa/Douala","Africa/Kinshasa","Africa/Lagos","Africa/Libreville","Africa/Luanda","Africa/Malabo","Africa/Ndjamena","Africa/Niamey","Africa/Porto-Novo","Africa/Tunis","Etc/GMT-1"]},{value:"Namibia Standard Time",abbr:"NST",text:"Windhoek",utc:["Africa/Windhoek"]},{value:"GTB Standard Time",abbr:"GDT",text:"Athens, Bucharest",utc:["Europe/Athens","Asia/Nicosia","Europe/Bucharest","Europe/Chisinau"]},{value:"Middle East Standard Time",abbr:"MEDT",text:"Beirut",utc:["Asia/Beirut"]},{value:"Egypt Standard Time",abbr:"EST",text:"Cairo",utc:["Africa/Cairo"]},{value:"Syria Standard Time",abbr:"SDT",text:"Damascus",utc:["Asia/Damascus"]},{value:"E. Europe Standard Time",abbr:"EEDT",text:"E. Europe",utc:["Asia/Nicosia","Europe/Athens","Europe/Bucharest","Europe/Chisinau","Europe/Helsinki","Europe/Kiev","Europe/Mariehamn","Europe/Nicosia","Europe/Riga","Europe/Sofia","Europe/Tallinn","Europe/Uzhgorod","Europe/Vilnius","Europe/Zaporozhye"]},{value:"South Africa Standard Time",abbr:"SAST",text:"Harare, Pretoria",utc:["Africa/Blantyre","Africa/Bujumbura","Africa/Gaborone","Africa/Harare","Africa/Johannesburg","Africa/Kigali","Africa/Lubumbashi","Africa/Lusaka","Africa/Maputo","Africa/Maseru","Africa/Mbabane","Etc/GMT-2"]},{value:"FLE Standard Time",abbr:"FDT",text:"Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius",utc:["Europe/Helsinki","Europe/Kiev","Europe/Mariehamn","Europe/Riga","Europe/Sofia","Europe/Tallinn","Europe/Uzhgorod","Europe/Vilnius","Europe/Zaporozhye"]},{value:"Turkey Standard Time",abbr:"TDT",text:"Istanbul",utc:["Europe/Istanbul"]},{value:"Israel Standard Time",abbr:"JDT",text:"Jerusalem",utc:["Asia/Jerusalem"]},{value:"Libya Standard Time",abbr:"LST",text:"Tripoli",utc:["Africa/Tripoli"]},{value:"Jordan Standard Time",abbr:"JST",text:"Amman",utc:["Asia/Amman"]},{value:"Arabic Standard Time",abbr:"AST",text:"Baghdad",utc:["Asia/Baghdad"]},{value:"Kaliningrad Standard Time",abbr:"KST",text:"Kaliningrad",utc:["Europe/Kaliningrad"]},{value:"Arab Standard Time",abbr:"AST",text:"Kuwait, Riyadh",utc:["Asia/Aden","Asia/Bahrain","Asia/Kuwait","Asia/Qatar","Asia/Riyadh"]},{value:"E. Africa Standard Time",abbr:"EAST",text:"Nairobi",utc:["Africa/Addis_Ababa","Africa/Asmera","Africa/Dar_es_Salaam","Africa/Djibouti","Africa/Juba","Africa/Kampala","Africa/Khartoum","Africa/Mogadishu","Africa/Nairobi","Antarctica/Syowa","Etc/GMT-3","Indian/Antananarivo","Indian/Comoro","Indian/Mayotte"]},{value:"Moscow Standard Time",abbr:"MSK",text:"Moscow, St. Petersburg, Volgograd, Minsk",utc:["Europe/Kirov","Europe/Moscow","Europe/Simferopol","Europe/Volgograd","Europe/Minsk"]},{value:"Samara Time",abbr:"SAMT",text:"Samara, Ulyanovsk, Saratov",utc:["Europe/Astrakhan","Europe/Samara","Europe/Ulyanovsk"]},{value:"Iran Standard Time",abbr:"IDT",text:"Tehran",utc:["Asia/Tehran"]},{value:"Arabian Standard Time",abbr:"AST",text:"Abu Dhabi, Muscat",utc:["Asia/Dubai","Asia/Muscat","Etc/GMT-4"]},{value:"Azerbaijan Standard Time",abbr:"ADT",text:"Baku",utc:["Asia/Baku"]},{value:"Mauritius Standard Time",abbr:"MST",text:"Port Louis",utc:["Indian/Mahe","Indian/Mauritius","Indian/Reunion"]},{value:"Georgian Standard Time",abbr:"GET",text:"Tbilisi",utc:["Asia/Tbilisi"]},{value:"Caucasus Standard Time",abbr:"CST",text:"Yerevan",utc:["Asia/Yerevan"]},{value:"Afghanistan Standard Time",abbr:"AST",text:"Kabul",utc:["Asia/Kabul"]},{value:"West Asia Standard Time",abbr:"WAST",text:"Ashgabat, Tashkent",utc:["Antarctica/Mawson","Asia/Aqtau","Asia/Aqtobe","Asia/Ashgabat","Asia/Dushanbe","Asia/Oral","Asia/Samarkand","Asia/Tashkent","Etc/GMT-5","Indian/Kerguelen","Indian/Maldives"]},{value:"Yekaterinburg Time",abbr:"YEKT",text:"Yekaterinburg",utc:["Asia/Yekaterinburg"]},{value:"Pakistan Standard Time",abbr:"PKT",text:"Islamabad, Karachi",utc:["Asia/Karachi"]},{value:"India Standard Time",abbr:"IST",text:"Chennai, Kolkata, Mumbai, New Delhi",utc:["Asia/Kolkata"]},{value:"India Standard Time",abbr:"IST",text:"Chennai, Kolkata, Mumbai, New Delhi",utc:["Asia/Calcutta"]},{value:"Sri Lanka Standard Time",abbr:"SLST",text:"Sri Jayawardenepura",utc:["Asia/Colombo"]},{value:"Nepal Standard Time",abbr:"NST",text:"Kathmandu",utc:["Asia/Kathmandu"]},{value:"Central Asia Standard Time",abbr:"CAST",text:"Nur-Sultan (Astana)",utc:["Antarctica/Vostok","Asia/Almaty","Asia/Bishkek","Asia/Qyzylorda","Asia/Urumqi","Etc/GMT-6","Indian/Chagos"]},{value:"Bangladesh Standard Time",abbr:"BST",text:"Dhaka",utc:["Asia/Dhaka","Asia/Thimphu"]},{value:"Myanmar Standard Time",abbr:"MST",text:"Yangon (Rangoon)",utc:["Asia/Rangoon","Indian/Cocos"]},{value:"SE Asia Standard Time",abbr:"SAST",text:"Bangkok, Hanoi, Jakarta",utc:["Antarctica/Davis","Asia/Bangkok","Asia/Hovd","Asia/Jakarta","Asia/Phnom_Penh","Asia/Pontianak","Asia/Saigon","Asia/Vientiane","Etc/GMT-7","Indian/Christmas"]},{value:"N. Central Asia Standard Time",abbr:"NCAST",text:"Novosibirsk",utc:["Asia/Novokuznetsk","Asia/Novosibirsk","Asia/Omsk"]},{value:"China Standard Time",abbr:"CST",text:"Beijing, Chongqing, Hong Kong, Urumqi",utc:["Asia/Hong_Kong","Asia/Macau","Asia/Shanghai"]},{value:"North Asia Standard Time",abbr:"NAST",text:"Krasnoyarsk",utc:["Asia/Krasnoyarsk"]},{value:"Singapore Standard Time",abbr:"MPST",text:"Kuala Lumpur, Singapore",utc:["Asia/Brunei","Asia/Kuala_Lumpur","Asia/Kuching","Asia/Makassar","Asia/Manila","Asia/Singapore","Etc/GMT-8"]},{value:"W. Australia Standard Time",abbr:"WAST",text:"Perth",utc:["Australia/Perth","Antarctica/Casey"]},{value:"Taipei Standard Time",abbr:"TST",text:"Taipei",utc:["Asia/Taipei"]},{value:"Ulaanbaatar Standard Time",abbr:"UST",text:"Ulaanbaatar",utc:["Asia/Choibalsan","Asia/Ulaanbaatar"]},{value:"North Asia East Standard Time",abbr:"NAEST",text:"Irkutsk",utc:["Asia/Irkutsk"]},{value:"Japan Standard Time",abbr:"JST",text:"Osaka, Sapporo, Tokyo",utc:["Asia/Dili","Asia/Jayapura","Asia/Tokyo","Etc/GMT-9","Pacific/Palau"]},{value:"Korea Standard Time",abbr:"KST",text:"Seoul",utc:["Asia/Pyongyang","Asia/Seoul"]},{value:"Cen. Australia Standard Time",abbr:"CAST",text:"Adelaide",utc:["Australia/Adelaide","Australia/Broken_Hill"]},{value:"AUS Central Standard Time",abbr:"ACST",text:"Darwin",utc:["Australia/Darwin"]},{value:"E. Australia Standard Time",abbr:"EAST",text:"Brisbane",utc:["Australia/Brisbane","Australia/Lindeman"]},{value:"AUS Eastern Standard Time",abbr:"AEST",text:"Canberra, Melbourne, Sydney",utc:["Australia/Melbourne","Australia/Sydney"]},{value:"West Pacific Standard Time",abbr:"WPST",text:"Guam, Port Moresby",utc:["Antarctica/DumontDUrville","Etc/GMT-10","Pacific/Guam","Pacific/Port_Moresby","Pacific/Saipan","Pacific/Truk"]},{value:"Tasmania Standard Time",abbr:"TST",text:"Hobart",utc:["Australia/Currie","Australia/Hobart"]},{value:"Yakutsk Standard Time",abbr:"YST",text:"Yakutsk",utc:["Asia/Chita","Asia/Khandyga","Asia/Yakutsk"]},{value:"Central Pacific Standard Time",abbr:"CPST",text:"Solomon Is., New Caledonia",utc:["Etc/GMT-11"]},{value:"Vladivostok Standard Time",abbr:"VST",text:"Vladivostok",utc:["Asia/Sakhalin","Asia/Ust-Nera","Asia/Vladivostok"]},{value:"New Zealand Standard Time",abbr:"NZST",text:"Auckland, Wellington",utc:["Antarctica/McMurdo","Pacific/Auckland"]},{value:"UTC+12",abbr:"U",text:"Coordinated Universal Time+12",utc:["Etc/GMT-12","Pacific/Funafuti","Pacific/Kwajalein","Pacific/Majuro","Pacific/Nauru","Pacific/Tarawa","Pacific/Wake","Pacific/Wallis"]},{value:"Fiji Standard Time",abbr:"FST",text:"Fiji",utc:["Pacific/Fiji"]},{value:"Magadan Standard Time",abbr:"MST",text:"Magadan",utc:["Asia/Anadyr","Asia/Kamchatka","Asia/Magadan","Asia/Srednekolymsk"]},{value:"Kamchatka Standard Time",abbr:"KDT",text:"Petropavlovsk-Kamchatsky - Old",utc:["Asia/Kamchatka"]},{value:"Tonga Standard Time",abbr:"TST",text:"Nuku'alofa",utc:["Etc/GMT-13","Pacific/Enderbury","Pacific/Fakaofo","Pacific/Tongatapu"]},{value:"Samoa Standard Time",abbr:"SST",text:"Samoa",utc:["Pacific/Apia"]}]),u=["utc"];function c(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function s(e){for(var t=1;t1?"".concat(t[0]).concat((t[1]/60).toString().substr(1)):t[0]}(i);return e[i]=u,t.concat(s(s({},n),{},{offset:u}))}catch(c){return t}}),[])}().sort((function(e,t){return e.offset-t.offset})),p=function(e){return e.reduce((function(e,t){var n=t.utc,o=(0,r.Z)(t,u);return n.forEach((function(t){return e[t]=s(s({},o),{},{utc:t})})),e}),{})}(d),v=function(e,t){var n=t||("default"===e?f():e);return p[n in p?n:f()]||{}}},98469:function(e,t,n){"use strict";n.r(t),n.d(t,{isTryingToJoinWorkspace:function(){return s}});n(26699),n(32023),n(66992),n(41539),n(78783),n(33948),n(41637);var r=n(67294),o=n(89250),a=n(62200),i=n(78266),u=n(13477),c=n(33427),s=function(){return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"").includes("/join-space")},l="Successfully joined space!";t.default=function(){var e=(0,u.Iy)("isAnonymous"),t=(0,o.s0)(),n=new URLSearchParams(window.location.hash.substr(1));return(0,r.useEffect)((function(){if(!e){var r=n.has("error_msg_key")?decodeURIComponent(n.get("error_msg_key")||""):null;if(r){var o=n.has("error_message")?decodeURIComponent(n.get("error_message")||""):null;r===a.Sq?(0,c.P4)(l):(0,c.P4)(o||"Error joining space",!0),t("/spaces",{replace:!0})}else(0,c.P4)(l)}}),[e]),r.createElement(i.Z,{title:"Adding you to the space..."})}},78266:function(e,t,n){"use strict";n.d(t,{H:function(){return d}});var r=n(87462),o=n(45987),a=n(67294),i=n(71893),u=n(93416),c=["title","width","height"],s=["title","body","iconProps","animate"],l=(0,i.keyframes)(["from{opacity:0.4;}to{opacity:1;}"]),f=(0,i.default)(u.Icon).withConfig({displayName:"loader__StyledIcon",componentId:"sc-a76ek6-0"})(["width:",";height:",";animation:",";"],(function(e){return e.width}),(function(e){return e.height}),(function(e){return e.animate?(0,i.css)([""," 1.6s ease-in infinite"],l):""})),d=function(e){var t=e.title,n=void 0===t?"Loading":t,i=e.width,u=void 0===i?"208px":i,s=e.height,l=void 0===s?"177px":s,d=(0,o.Z)(e,c);return a.createElement(f,(0,r.Z)({name:"netdata",color:"primary",title:n,"data-testid":"loading-logo",width:u,height:l},d))};t.Z=function(e){var t=e.title,n=e.body,i=e.iconProps,c=e.animate,l=void 0===c||c,f=(0,o.Z)(e,s);return a.createElement(u.Flex,(0,r.Z)({column:!0,height:"100vh",background:"mainBackground",width:"100%",justifyContent:"center",alignItems:"center"},f),a.createElement(d,(0,r.Z)({},i,{animate:l})),t&&a.createElement(u.H3,{color:"text",margin:[1,0,0]},t),n&&a.createElement(u.Text,{color:"text",margin:[4.5,0,0]},n))}},74855:function(e,t,n){"use strict";n.d(t,{P_:function(){return l},sc:function(){return d},t_:function(){return f}});var r=n(87462),o=n(45987),a=n(67294),i=n(93416),u=n(28234),c=["errorCode","errorMessage","errorMsgKey"],s={success:"success",error:"error",warning:"warning",default:"border"},l=function(e){var t=e.header,n=e.text,r=e.icon,o=e.renderContent,u=e.success,c=e.error,l=e.warning,f=(u?"success":c&&"error")||l&&"warning"||"default";return a.createElement(i.Flex,{padding:[2],alignItems:"center",justifyContent:"between"},a.createElement(i.Flex,{alignItems:"center",gap:3},r&&a.createElement(i.Flex,{flex:!1},a.createElement(i.Icon,{color:s[f],name:r,size:"large"})),a.createElement(i.Flex,{column:!0,gap:1},t&&a.createElement(i.H5,{color:s[f]},t),n&&a.createElement(i.TextSmall,{color:s[f]},n),o&&o(e))),a.createElement(i.Flex,null,a.createElement(i.Icon,{name:"x",size:"large",color:s[f]})))},f=function(e){e.errorCode;var t=e.errorMessage,n=e.errorMsgKey,i=(0,o.Z)(e,c);return a.createElement(l,(0,r.Z)({error:!0,icon:"error",text:t,header:(0,u.r)(n)},i))},d=function(e){return a.createElement(l,e)}},33427:function(e,t,n){"use strict";n.d(t,{$T:function(){return s},FR:function(){return g},OU:function(){return f},P4:function(){return p},Q5:function(){return c},R6:function(){return m},aN:function(){return d},cH:function(){return b},iL:function(){return v},oG:function(){return l}});n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(4942),o=n(52631),a=n(74855);function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function u(e){for(var t=1;t1&&void 0!==arguments[1]&&arguments[1]?"Users left the room!":"Users were added to room!")},n=(0,a.sc)(u(u({},t),{},{success:!0,icon:"space"}));o.Z.success(n,{context:"showUsersInRoomNotification"})},f=function(e){var t={header:"Space users",text:e||(arguments.length>1&&void 0!==arguments[1]&&arguments[1]?"Users left the space!":"Users were added to space!")},n=(0,a.sc)(u(u({},t),{},{success:!0,icon:"space"}));o.Z.success(n,{context:"showUsersInSpaceNotification"})},d=function(e){var t={header:"Rooms",text:e||(arguments.length>1&&void 0!==arguments[1]&&arguments[1]?"Rooms removed from Space!":"Rooms were added to Space!")},n=(0,a.sc)(u(u({},t),{},{success:!0,icon:"space"}));o.Z.success(n,{context:"showRoomsInSpaceNotification"})},p=function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n={header:t?"Error joining space":"Welcome!",text:e},r=(0,a.sc)(u(u({},n),{},{success:!t,icon:"gear"}));t?o.Z.error(r,{context:"showJoiningSpaceNotification"}):o.Z.success(r,{context:"showJoiningSpaceNotification"})},v=function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n={header:t?"Space removed":"Space added",text:e},r=(0,a.sc)(u(u({},n),{},{success:!t,icon:"gear"}));o.Z.success(r,{context:"showSpaceNotification"})},m=function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n={header:t?"Error creating room":"Rooms",text:e},r=(0,a.sc)(u(u({},n),{},{success:!t,icon:"gear"}));t?o.Z.error(r,{context:"showRoomCreationNotification"}):o.Z.success(r,{context:"showRoomCreationNotification"})},g=function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n={text:e},r=(0,a.sc)(u(u({},n),{},{text:e,success:!t,icon:"chart_added"}));t?o.Z.error(r,{context:"showDashboardCreatedNotification"}):o.Z.success(r,{context:"showDashboardCreatedNotification"})},b=function(){var e=(0,a.sc)({header:"Dashboard",text:"Invalid dashboard link. Please double-check the dashboard you are trying to access.",success:!1});o.Z.error(e,{context:"showInvalidDashboardSlugNotification"})}},52631:function(e,t,n){"use strict";n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(4942),o=n(45987),a=n(55678),i=["context"],u=["context"];function c(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function s(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=t.context,r=(0,o.Z)(t,i);n&&f[n]&&(a.Am.dismiss(f[n]),delete f[n]),f[n]=a.Am.success(e,s(s({},l),r))},error:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.context,r=(0,o.Z)(t,u);n&&f[n]&&(a.Am.dismiss(f[n]),delete f[n]),f[n]=a.Am.error(e,s(s({},l),r))},dismiss:function(e){return a.Am.dismiss(e)}}},9058:function(e,t,n){"use strict";n.d(t,{Z:function(){return S},e:function(){return y}});var r=n(4942),o=n(29439),a=(n(41539),n(15581),n(2490),n(34514),n(54747),n(92222),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(49337),n(33321),n(69070),n(67294)),i=n(33937),u=n(65565),c=n(25382),s=n(24298),l=n(4822),f=n(74059),d=n(12008),p=n(39840),v=n(58591),m=n(14600);function g(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function b(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{},t=e.after,n=e.before;return(0,a.useMemo)((function(){if(t=+t,n=+n,!isNaN(t)&&!isNaN(n)&&t&&n)return{type:"highlight",range:[Math.floor(t/1e3),Math.ceil(n/1e3)]}}),[t,n])}(k),j=(0,d.O0)(),x=(0,o.Z)(j,1)[0],C=(0,a.useMemo)((function(){return{after:O<0?O:Math.floor(O/1e3),before:O<0?0:Math.ceil(w/1e3)}}),[O,w]),I=(0,f.Q6)(),T=(0,a.useMemo)((function(){var e=function(){var e=(0,v.m$)().utc;return(0,m.E)("default",e).utc}(),t=(0,c.Z)({attributes:b(b({theme:r,timezone:e,overlays:b({},E&&{highlight:E}),autofetchOnWindowBlur:P},C),{},{agent:I,containerWidth:.8*window.innerWidth,expandable:!1})});return window.netdataSDK=t,t}),[]);(0,u.Z)((function(){T.getNodes().forEach((function(e){return e.updateAttribute("theme",r)}))}),[r]),(0,u.Z)((function(){var e=T.getRoot().getAttribute("timezone");A!==e&&T.getRoot().getApplicableNodes({syncTimezone:!0}).forEach((function(e){return e.updateAttributes({timezone:A})}))}),[A]),(0,u.Z)((function(){var e=T.getRoot().getAttribute("overlays");if(E)T.getRoot().updateAttribute("overlays",b(b({},e),{},{highlight:E})),T.getRoot().getApplicableNodes({syncHighlight:!0}).forEach((function(e){return e.updateAttribute("overlays",b(b({},e.getAttribute("overlays")),{},{highlight:E}))}));else{var t=b({},e);delete t.highlight,T.getRoot().updateAttribute("overlays",t),T.getRoot().getApplicableNodes({syncHighlight:!0}).forEach((function(e){var t=b({},e.getAttribute("overlays"));delete t.highlight,e.updateAttribute("overlays",t)}))}}),[E]),(0,a.useEffect)((function(){if(x){T.trigger("goToLink",null,x.linkToGo);var e=T.getRoot().getNode({id:x.context});if(e){var t=e.getAttribute("overlays");e.updateAttribute("overlays",b(b({},t),{},{alarm:{type:"alarm",status:x.status,value:x.formattedLastStatusChangeValue,when:x.lastStatusChange}})),e.updateAttributes({selectedInstances:["".concat(x.instance,"@").concat(x.nodeId)],selectedNodes:[x.nodeId]})}var n=!!x&&T.getRoot().on("chartLoaded",(function(e){var t=e.getAttribute("id");if(x.context===t){var r=e.getAttribute("overlays");e.updateAttribute("overlays",b(b({},r),{},{alarm:{type:"alarm",status:x.status,value:x.formattedLastStatusChangeValue,when:x.lastStatusChange}})),e.updateAttributes({selectedInstances:["".concat(x.instance,"@").concat(x.nodeId)],selectedNodes:[x.nodeId]}),n()}}));return(0,s.unregister)(n)}}),[x]),(0,a.useEffect)((function(){var e=C.after,t=C.before;T.getRoot().moveX(e,t)}),[C]),(0,p.Wi)(x);var D=localStorage.getItem("netdataJWT"),N=null===(t=window.localNetdataRegistry)||void 0===t?void 0:t.mg,M=localStorage.getItem("agentJWT:".concat(N)),R=I?M:D;return(0,a.useEffect)((function(){T.getNodes().forEach((function(e){var t;return e.updateAttributes(b(b({},I&&null!==(t=window.localNetdataRegistry)&&void 0!==t&&t.xNetdataAuthHeader?{xNetdataBearer:R,bearer:null}:{xNetdataBearer:null,bearer:R}),{},{agent:I}))}))}),[I,R]),(0,a.useEffect)((function(){return(0,s.unregister)(T.getRoot().onAttributeChange("after",(function(){var e=T.getRoot().getAttributes(),t=e.after,n=e.before;Z({after:t<0?t:1e3*t,before:t<0?0:1e3*n})})),T.getRoot().onAttributeChange("overlays",(function(e,t){var n=e.highlight;if(n!==t.highlight){var r=null===n||void 0===n?void 0:n.range;if(r){var a=(0,o.Z)(r,2),i=a[0],u=a[1];Z({highlight:{after:1e3*i,before:1e3*u}})}else Z({highlight:{after:null,before:null}})}})))}),[T]),T?a.createElement(h.Provider,{value:T},n):n}},15794:function(e,t,n){"use strict";n.d(t,{k:function(){return p}});n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(36459),o=n(87462),a=n(4942),i=(n(19601),n(67294)),u=n(71893),c=n(93416),s=n(33937);function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function f(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{};return function(t){var n=t.class,r=t.component,o=t.type,a=t.info,i=t.value,u=i.calc,s=void 0===u?"":u,l=i.update_every,f=i.units,d=i.db,p=t.status,v=t.notification||{},m=v.type,g=v.exec,b=v.to,h=v.delay,y=v.repeat||{},S=y.warn,O=y.crit,w=p||{},A=w.warn,k=w.crit,P=w.green,Z=w.red,E=d||{},j=E.dimensions,x=E.method,C=E.after,I=E.before,T=E.options;return c(c({info:a},e),{},{configInfo:a,class:n,component:r,type:o,calculation:s,updateEvery:l,units:f,warning:A,critical:k,lookupDimensions:j,lookupMethod:x,lookupAfter:C,lookupBefore:I,lookupOptions:T,notificationType:m,exec:g,recipient:b,delay:h,warnRepeatEvery:S,critRepeatEvery:O,green:P,red:Z})}},w=function(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};return(0,i.Ly)(e)?o.Z.get("/api/v2/alert_config?options=minify&config=".concat(n),{baseURL:window.envSettings.agentApiUrl,transform:O(r)}):o.Z.post("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/alert_config"),{config:n},{transform:O(r)})}},84074:function(e,t,n){"use strict";n.d(t,{E3:function(){return i},Gs:function(){return f},HI:function(){return l},IG:function(){return s},bc:function(){return u},p$:function(){return c}});var r=n(4480),o=n(80239),a=n(48450),i={id:null,loaded:!1,nodeId:null,instance:null,instanceName:null,name:"",calculation:"",lookupAfter:0,lookupBefore:0,status:"unknown",date:"",fullyLoaded:!1,fullyLoading:!1,info:"",configInfo:"",warning:"",critical:"",updateEvery:0,source:"",recipient:"Unknown",units:"",delayUpDuration:null,delayDownDuration:null,delayMaxDuration:null,multiplier:null,delayUpToTimestamp:null,lookupDimensions:"",lookupMethod:"",lookupOptions:"",class:"Unknown",type:"Unknown",component:"Unknown"};t.ZP=(0,r.cn)({key:"alert",default:{}});var u=(0,r.cn)({key:"selectedAlert",default:null}),c=(0,r.cn)({key:"alertsTabsAtom",default:o.U}),s=(0,r.xu)({key:"alertConfigurations",default:function(e){var t=e.spaceId,n=e.roomId;return(0,a.jc)(t,n)}}),l=(0,r.xu)({key:"alertInstances",default:function(e){var t=e.spaceId,n=e.roomId,r=e.name;return(0,a.MN)(t,n,r)}}),f=(0,r.xu)({key:"selectedAlertConfiguration",default:{alertName:null,nodeId:null}})},80239:function(e,t,n){"use strict";n.d(t,{U:function(){return o},UV:function(){return a},nb:function(){return r}});var r={alertStatus:"alertStatus",os:"os"},o=0,a="https://learn.netdata.cloud/docs/alerting/health-configuration-reference#edit-health-configuration-files"},39840:function(e,t,n){"use strict";n.d(t,{Wi:function(){return b},e8:function(){return m},m9:function(){return p},pK:function(){return v},yi:function(){return d}});var r=n(29439),o=(n(92222),n(67294)),a=n(89250),i=n(37518),u=n(4822),c=n(96104),s=n(45771),l=n(20709),f=n(89405),d=function(e){var t=(0,a.s0)(),n=(0,i.A3)(),r=(0,c.XT)({extraKey:"alerts"}),s=(0,u.Sf)("alertStatuses",{extraKey:"alerts"});return(0,o.useCallback)((function(o){e&&r([e]),o&&"string"===typeof o&&s([o]),t("".concat(n,"/alerts"))}),[e,n,r])},p=function(e,t){if(void 0===e)return"-";var n=(0,s.J)(e);return t?"".concat(n," ").concat(t):n},v=function(e,t){return(0,o.useMemo)((function(){return p(e,t)}),[e,t])},m=function(e){var t=e.rawTime,n=e.secs,r=void 0!==n&&n,a=e.long,i=void 0!==a&&a,u=(0,f.rA)(),c=u.localeTimeString,s=u.localeDateString;return(0,o.useMemo)((function(){var e=new Date(1e3*t);return isNaN(e.valueOf())?{}:{timeAgo:(0,l.Z)(e,new Date),formattedDate:"".concat(s(e,{long:i})," ").concat(c(e,{secs:r}))}}),[t,c,s])},g=function(){return Math.floor((new Date).getTime()/1e3)},b=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],n=e||{},a=n.lastUpdated,i=n.lastStatusChange,c=(0,u.TQ)(),s=function(e){var t=e||{},n=t.lastUpdated,r=t.lastStatusChange;return(0,o.useMemo)((function(){if(!n||!r)return[];var e=n||g(),t=e-r,o=Math.round(r-t),a=Math.round(e+t);return[o,a>g()?g():a]}),[n,r])}({lastUpdated:a,lastStatusChange:i}),l=(0,r.Z)(s,2),f=l[0],d=l[1];(0,o.useEffect)((function(){t&&(f||d)&&c({after:1e3*f,before:1e3*d})}),[t,f,d])}},12008:function(e,t,n){"use strict";n.d(t,{Bk:function(){return P},E5:function(){return h},ER:function(){return O},E_:function(){return k},JU:function(){return x},O0:function(){return y},__:function(){return C},fp:function(){return b},n_:function(){return E},sb:function(){return A},v3:function(){return I},w4:function(){return j},yx:function(){return w}});var r=n(4942),o=(n(21249),n(57640),n(9924),n(57327),n(41539),n(88449),n(2490),n(59849),n(85827),n(25387),n(72608),n(47941),n(82526),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(67294)),a=n(4480),i=n(37518),u=n(96929),c=n(99322),s=n(97945),l=n(80239),f=n(74059),d=n(84074),p=n(48450),v=n(45771);function m(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function g(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=t.spaceId,r=t.roomId,u=(0,f.th)();n=n||u;var c=(0,i.UL)();r=r||c;var s=(0,a._8)((function(t){var o=t.set;return function(){o(b({id:e,key:"fullyLoading"}),!0);var t=(0,p.hN)(n,r,e);return t.then((function(t){var n=t.data;return o(b({id:e}),g(g({},n),{},{fullyLoaded:!0,fullyLoading:!1,loaded:!0}))})).catch((function(){return o(b({id:e,key:"fullyLoading"}),!1)})),function(){return t.cancel()}}}),[r,n,e]);(0,o.useEffect)((function(){e&&s()}),[e,s])},A=function(){var e=function(){var e=(0,i.UL)();return(0,a.sJ)(k(e))}(),t=(0,u.zN)(),n=(0,s.Ne)(t);return(0,o.useMemo)((function(){return n.map((function(t){var n=e.filter((function(e){return e.nodeId===t.id}));if(!n.length)return t;var o=n.map((function(e){return e.status}));return g(g({},t),{},(0,r.Z)({},l.nb.alertStatus,(0,v.j)(o)))}))}),[e,n])},k=(0,a.CG)({key:"roomAlerts",get:function(e){return function(t){var n=t.get,r=n((0,c.e)({id:e,key:"ids"}));return n(S(r))}}}),P=function(){var e=(0,i.UL)();return(0,a.sJ)((0,c.e)({id:e,key:"loaded"}))},Z={warning:0,critical:0,clear:0},E=function(e){var t=O(e);return(0,o.useMemo)((function(){return t.reduce((function(e,t){var n=t.status;return e[n]=e[n]+1,e}),g({},Z))}),[e])},j=function(){var e=(0,f.th)(),t=(0,i.UL)();return(0,a.sJ)((0,d.IG)({spaceId:e,roomId:t}))},x=function(e,t){var n=(0,f.th)(),r=(0,i.UL)(),u=(0,a.sJ)((0,d.HI)({spaceId:n,roomId:r,name:e}));return(0,o.useMemo)((function(){return t?u.filter((function(e){return e.nodeId===t})):u}),[u,t])},C=function(){var e=(0,i.UL)();return(0,a.FV)((0,d.Gs)(e))},I=function(){var e=(0,i.UL)();return(0,a.rb)((0,d.Gs)(e))}},45771:function(e,t,n){"use strict";n.d(t,{J:function(){return r},j:function(){return o}});n(9653),n(61874),n(26699),n(32023);var r=function(e){var t=e<1?Number(e).toPrecision(3):Number(e).toFixed(2),n=Number.parseFloat(t);return Math.abs(n)>=1e9||Math.abs(n)<=1e-5&&0!==n?n.toExponential(3):n},o=function(e){return e.includes("critical")?"critical":e.includes("warning")?"warning":e.includes("clear")?"clear":e[0]}},71992:function(e,t,n){"use strict";n.d(t,{h:function(){return v},Z:function(){return g}});var r=n(29439),o=n(87462),a=n(45987),i=(n(92222),n(67294)),u=n(93416),c=n(46667),s=n(71893),l=(0,s.default)(u.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,background:"dropdown",column:!0,margin:[.5,0,0],overflow:{vertical:"auto"},padding:[2,0],round:1}).withConfig({displayName:"styled__Drop",componentId:"sc-1x9syns-0"})(["box-sizing:content-box;"]),f=(0,s.default)(u.TextSmall).withConfig({displayName:"styled__DropdownItemClickable",componentId:"sc-1x9syns-1"})(["cursor:pointer;pointer-events:",";"," &:hover{background-color:",";}"],(function(e){return e.isDisabled?"none":"auto"}),(function(e){return e.isDisabled&&"color: ".concat((0,u.getColor)("textLite")(e),";")}),(0,u.getColor)("selected")),d=["children","color","disabled","icon"],p=["Component","category","context"],v=function(e){var t=e.children,n=e.color,r=void 0===n?"text":n,c=e.disabled,s=e.icon,l=(0,a.Z)(e,d);return i.createElement(f,(0,o.Z)({as:u.Flex,"data-testid":"dropdownItem",gap:2,isDisabled:c,padding:[2,4]},l),i.createElement(u.Icon,{color:c?"textLite":r,"data-testid":"dropdownItem-icon",height:"16px",name:s,width:"16px"}),i.createElement(u.Text,{color:r,"data-testid":"dropdownItem-text",whiteSpace:"nowrap"},t))},m=(0,i.forwardRef)((function(e,t){var n=e.Component,r=e.category,u=e.context,c=(0,a.Z)(e,p);return i.createElement(n,(0,o.Z)({"data-ga":"".concat(r,"::").concat(u,"::options"),"data-testid":"chartOptions",icon:"nav_dots",flavour:"borderless",neutral:!0,small:!0,padding:0,ref:t,title:"Options"},c))})),g=function(e){var t=e.Component,n=void 0===t?u.Button:t,o=e.category,a=e.context,s=e.children,f=e.testId,d=(0,c.Z)(),p=(0,r.Z)(d,4),v=p[0],g=p[1],b=p[3],h=(0,i.useRef)(),y=f||"chartsDropdown";return i.createElement(i.Fragment,null,i.createElement(m,{"data-testid":"".concat(y,"-button"),category:o,context:a,onClick:g,ref:h,Component:n}),h.current&&v&&i.createElement(l,{close:b,"data-testid":y,onClickOutside:b,onEsc:b,target:h.current},s({close:b})))}},1043:function(e,t,n){"use strict";n.d(t,{aJ:function(){return M},R2:function(){return U},O9:function(){return T},iM:function(){return D},UQ:function(){return L},T4:function(){return _},So:function(){return R}});var r=n(15861),o=n(4942),a=n(45987),i=n(29439),u=n(64687),c=n.n(u),s=(n(92222),n(74916),n(64765),n(21249),n(57640),n(9924),n(15306),n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(67294)),l=n(89250),f=n(28234),d=n(74059),p=n(17563),v=n(26398),m=n(5429),g=n(93017),b=n(68008),h=n(76201),y=n(78710),S=n(13477),O=n(94510),w=n(98469);function A(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function k(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{}).path,n=void 0===t?"/sign-up":t;if(window.envSettings.tracking)try{var r,o="".concat(n,"/thank-you");window.dataLayer.push({event:"pageview",virtualPage:"".concat(o).concat(e)}),window.posthog.setPersonPropertiesForFlags({netdata_cloud_account_created_days_ago:0}),(0,h.o)("".concat(null===(r=window)||void 0===r||null===(r=r.location)||void 0===r?void 0:r.origin).concat(o).concat(e))}catch(a){}},T=function(e){return(0,b.Z)((function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return v.Z.patch("/api/v1/accounts/".concat(e),t)}),[e])},D=function(){var e=(0,l.TH)(),t=e.search,n=e.hash,r=(0,l.bS)("/sign-up"),o=(0,l.bS)("/sign-in"),a=r?r.pathname:o?o.pathname:"",u=(0,l.s0)(),c=(0,g.Z)(),d=(0,i.Z)(c,2),p=d[0],m=d[1],b=(0,j.Z)(),h=b.sendLog,y=b.isReady;return(0,s.useCallback)((function(e){var r=e.email,o=e.redirectURI,i=e.registerURI,c=e.isUnverifiedRegistration,s=e.resend;return v.Z.post("/api/v2/auth/account/magic-link",{email:r,redirectURI:o,registerURI:i,isUnverifiedRegistration:c}).then((function(){p({header:"Sign in email sent"}),s||(I(t,{path:a}),"/sign-up"==a&&h({feature:"SignUpThankYou"}),u("/sign-in/magic-link-sent".concat(t).concat(n),{state:{email:r}}))})).catch((function(e){var t,n,r=(0,f.r)((null===e||void 0===e||null===(t=e.response)||void 0===t||null===(t=t.data)||void 0===t?void 0:t.errorMsgKey)||(null===e||void 0===e||null===(n=e.response)||void 0===n||null===(n=n.data)||void 0===n?void 0:n.errorMessage)||"Something went wrong");throw m({header:"Sign in",text:r}),r}))}),[a,t,n,h,y])},N=function(e){var t=e.authorized_origins;return(void 0===t?[]:t).map((function(e){return{id:e.id,url:e.url,lastAccessedAt:e.last_accessed_at}}),[])},M=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.onSuccess,n=e.onFail,r=(0,a.Z)(e,x),i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return(0,m.Z)((function(){return function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{};n(s(e),(function(e){return(0,i.ZP)(e,r)?e:r}))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),h=function(e){var t=(0,u.th)(),n=(0,a.sJ)(b(e||t)),i=(0,a.$P)(d);return(0,o.useMemo)((function(){var e,t=v({},n);return"hasValue"===i.state&&(t=v(v({},t),i.contents)),v(v({},e=t),Object.keys(g).reduce((function(t,n){return g[n](e)?v(v({},t),{},(0,r.Z)({},n,n)):t}),{}))}),[n,i.state])},y=function(e,t){return!!h(t)[e]},S=function(){var e=h();return(0,o.useMemo)((function(){return(0,l.Jo)(e)}),[e])},O=function(){var e=h();return(0,o.useMemo)((function(){return(0,l.QR)(e)}),[e])}},33937:function(e,t,n){"use strict";n.d(t,{pI:function(){return w},kq:function(){return S},Ro:function(){return y}});n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(15861),o=n(29439),a=n(4942),i=n(64687),u=n.n(i),c=n(4480),s=n(23109),l=n(94510),f=(0,c.cn)({key:"currentUserSettings",default:(0,l.ZP)("settings")}),d=n(1043),p=n(13477),v=n(74059);function m(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function g(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=t.shouldPersist,a=t.onSuccess,c=t.onFail,l=(0,u.Zl)(d(e)),p=v(),m=(0,s.O9)(p);return(0,u._8)((function(t){var u=t.snapshot;return function(){var t=(0,r.Z)(i().mark((function t(r){var c,s;return i().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:if(l(r),n){t.next=3;break}return t.abrupt("return");case 3:return c=u.retain(),t.prev=4,t.next=7,m(f({},e?(0,o.Z)({},e,r):r));case 7:a&&a(),t.next=16;break;case 10:return t.prev=10,t.t0=t.catch(4),t.next=14,u.getPromise(d());case 14:s=t.sent,l(e?s[e]:s);case 16:return t.prev=16,c(),t.finish(16);case 19:case"end":return t.stop()}}),t,null,[[4,10,16,19]])})));return function(e){return t.apply(this,arguments)}}()}),[e,n,a,c])},g=function(e,t){return[p(e),m(e,t)]}},67336:function(e,t,n){"use strict";n.d(t,{B1:function(){return A},O9:function(){return P},R2:function(){return k},RO:function(){return w}});n(82526),n(57327),n(88449),n(59849),n(38880),n(49337),n(33321),n(69070);var r=n(45987),o=n(4942),a=n(29439),i=(n(41539),n(39714),n(21249),n(57640),n(9924),n(47941),n(85827),n(25387),n(2490),n(72608),n(15581),n(34514),n(54747),n(4480)),u=n(68008),c=n(93017),s=n(37518),l=n(74059),f=n(19665),d=n(47133),p=n(41943),v=n(22841),m=n(9058),g=n(45215),b=n(79619),h=n(39878),y=["pristine"];function S(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function O(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{},r=(0,f.DB)(e,t,n);return r.then((function(){return o({header:"Dashboards",text:"Dashboard successfully created!"})})).catch((function(e){return!e.isCancel&&i(e)})),r}),[e,t])},A=function(){var e=(0,c.Z)(),t=(0,a.Z)(e,2),n=t[0],r=t[1];return(0,u.Z)((function(e){var t=e.dashboards,o=t.map((function(e){return e.id})).toString(),a=t[0].roomId,i=t[0].spaceId;return(0,f.iM)(i,a,o).then((function(){return n({header:"Dashboards",text:"Dashboards successfully deleted!"})})).catch((function(e){return!e.isCancel&&r}))}),[])},k=function(e){var t=(0,d.LJ)(e,"spaceId"),n=(0,d.LJ)(e,"roomId"),r=(0,c.Z)(),o=(0,a.Z)(r,2),i=o[0],s=o[1];return(0,u.Z)((function(){var r=(0,f.iM)(t,n,e);return r.then((function(){return i({header:"Dashboards",text:"Dashboard successfully deleted!"})})).catch((function(e){return!e.isCancel&&s(e)})),r}),[e,t,n])},P=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.onSuccess,o=t.onFail,a=(0,m.e)(),u=(0,l.th)(),c=(0,s.UL)();return(0,i._8)((function(t){var i=t.snapshot,s=t.set;return function(){s((0,d.Y3)({id:e,key:"processing"}),!0);var t=i.getLoadable((0,d.Y3)({id:e})).contents,l=i.getLoadable((0,p.sX)({id:e,key:"layout"})).contents,m=i.getLoadable((0,v.oz)({id:e})).contents,S=(0,g.ZO)(a,e),w={version:t.version+1,name:t.name,snapshot:{uiState:l,items:Object.keys(m).map((function(e){if(!S||"text"===m[e].type)return O({},m[e]);var t=S.getNode({cardId:e},{inherit:!1},S.getChildren()).getAttributes(),n=t.pristine,o=(0,r.Z)(t,y),a=Object.keys(n).reduce((function(e,t){return e[t]=o[t],e}),O({},m[e]));return O(O({},m[e]),a)}))}},A=(0,f.Y)(u,c,e,w);return A.then((function(e){var t=e.data,r=t.createdAt,o=t.id,a=t.name,i=t.roomID,u=t.slug,c=t.spaceID,l=t.updatedAt,f=t.version,m=(0,b.m)(t).cards;s((0,v.oz)({id:o}),m),s((0,d.Y3)({id:o}),(function(e){return O(O({},e),{},{createdAt:r,id:o,name:a,roomId:i,slug:u,spaceId:c,updatedAt:l,version:f,processing:!1})})),s(h.UU,u),s((0,h.c7)([c,i,u]),o),s((0,p.Kl)(o)),S&&Object.keys(m).forEach((function(e){var t,n;"text"!==m[e].type&&(null===(t=S.getNode({cardId:e}))||void 0===t||null===(n=t.removePristine)||void 0===n||n.call(t))})),n&&n(t)})).catch((function(t){throw s((0,d.Y3)({id:e,key:"processing"},!1)),o&&o(t),t})),A}}))}},97723:function(e,t,n){"use strict";n.d(t,{CO:function(){return T},Di:function(){return C},EW:function(){return x},Kq:function(){return Z},Zj:function(){return j},jN:function(){return E},qN:function(){return I}});var r=n(15861),o=n(93433),a=n(4942),i=n(64687),u=n.n(i),c=(n(85827),n(41539),n(25387),n(2490),n(72608),n(92222),n(57327),n(88449),n(59849),n(15581),n(34514),n(54747),n(47941),n(82526),n(38880),n(49337),n(33321),n(69070),n(67294)),s=n(4480),l=n(28721),f=n(54227),d=n(5934),p=n(44850),v=n(98475),m=n(47133),g=n(41943),b=n(22841),h=n(19665),y=n(67336);function S(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function O(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{};return O(O(O({},p.js),p.v),{},{id:(0,l.Z)(),type:"chart",contextScope:e},t)},A=function(e){return function(t,n){var r=t.reduce((function(e,t){var n=t.y+t.h;return n>e?n:e}),0);return[].concat((0,o.Z)(t),[O(O({},e),{},{i:n,y:r})])}},k=A(v.Mk),P=A(v.b1),Z=function(e){return(0,s._8)((function(t){var n=t.set;return function(){var t=(0,l.Z)(),r=O(O(O({},p.js),p.Xw),{},{id:t,type:"text",editing:!0});n((0,m.Y3)({id:e,key:"cardIds"}),(function(e){return[].concat((0,o.Z)(e),[t])})),n((0,g.Fu)(e),(function(e){return P(e,t)})),n((0,b.oz)({id:e,resourceId:t}),r)}}))},E=function(e){return(0,s._8)((function(t){var n=t.set;return function(t){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){},a=w(t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}),i=a.id;n((0,m.Y3)({id:e,key:"cardIds"}),(function(e){return[].concat((0,o.Z)(e),[i])})),n((0,g.Fu)(e),(function(e){return k(e,i)})),n((0,b.oz)({id:e,resourceId:i}),a),r()}}))},j=function(e,t){return(0,s._8)((function(n){var r=n.set;return function(){r((0,m.Y3)({id:e,key:"cardIds"}),(function(e){return e.filter((function(e){return e!==t}))})),r((0,g.Fu)(e),(function(e){return e.filter((function(e){return e.i!==t}))})),r((0,b.oz)({id:e}),(function(e){var n=O({},e);return delete n[t],n}))}}))},x=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=n.onSuccess,a=n.onFail,i=(0,y.RO)(e,t);return(0,s._8)((function(n){var c=n.set;return function(){var n=(0,r.Z)(u().mark((function n(r){var s,l,f,p,v=arguments;return u().wrap((function(n){for(;;)switch(n.prev=n.next){case 0:return s=v.length>1&&void 0!==v[1]?v[1]:o,n.prev=1,n.next=4,i(r);case 4:l=n.sent,f=l.data,p=O(O(O({},r),{},{spaceId:e,roomId:t},f),{},{loaded:!0}),c((0,m.lz)(f.id),p),c((0,d.$3)(t),p),s&&s(p),n.next=15;break;case 12:n.prev=12,n.t0=n.catch(1),a?a(r):console.warn("failed",n.t0);case 15:case"end":return n.stop()}}),n,null,[[1,12]])})));return function(e){return n.apply(this,arguments)}}()}),[i,e,o,a])},C=function(e,t,n){var r=x(e,t,n);return(0,c.useCallback)((function(o,a,i){var u=w(a,i);return r({name:o},(function(r){var o;(0,h.Y)(e,t,r.id,{version:r.version+1||2,snapshot:{uiState:k([],u.id),items:[O(O({},u),{},{cardAttributes:O(O({},i),{},{id:u.id})})]}}),null===n||void 0===n||null===(o=n.onSuccess)||void 0===o||o.call(n,r)}))}),[r])},I=function(){return function(e){var t=(0,f.Le)();return(0,s._8)((function(n){var o=n.snapshot,a=n.set;return function(){var n=(0,r.Z)(u().mark((function n(r){var i,c,s,l,f,p=arguments;return u().wrap((function(n){for(;;)switch(n.prev=n.next){case 0:return i=r.ids,s=(c=p.length>1&&void 0!==p[1]?p[1]:{}).onSuccess,l=c.onFail,n.next=4,o.getPromise((0,m.NM)(i));case 4:return f=n.sent,n.prev=5,n.next=8,e({dashboards:f});case 8:f.forEach((function(e){var n=e.id,r=e.roomId;a((0,m._d)(n)),a((0,d.Mg)(r),[n]),t(n)})),s&&s(f),n.next=15;break;case 12:n.prev=12,n.t0=n.catch(5),l?l(f):console.warn("failed",n.t0);case 15:case"end":return n.stop()}}),n,null,[[5,12]])})));return function(e){return n.apply(this,arguments)}}()}),[e])}((0,y.B1)())},T=function(e,t){return function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=n.onSuccess,a=n.onFail,i=(0,m.LJ)(e,"spaceId"),c=(0,m.LJ)(e,"roomId"),l=(0,f.Le)();return(0,s._8)((function(n){var i=n.snapshot,s=n.set;return(0,r.Z)(u().mark((function n(){var r;return u().wrap((function(n){for(;;)switch(n.prev=n.next){case 0:return n.next=2,i.getPromise((0,m.Y3)({id:e}));case 2:return r=n.sent,n.prev=3,n.next=6,t();case 6:s((0,m._d)(e)),s((0,d.Mg)(c),[e]),l(e),o&&o(r),n.next=15;break;case 12:n.prev=12,n.t0=n.catch(3),a?a(r):console.warn("failed",n.t0);case 15:case"end":return n.stop()}}),n,null,[[3,12]])})))}),[t,i,c,e,o,a])}(e,(0,y.R2)(e),t)}},19665:function(e,t,n){"use strict";n.d(t,{AK:function(){return u},DB:function(){return a},Y:function(){return o},iM:function(){return i}});n(92222);var r=n(26398),o=function(e,t,n,o){return r.Z.patch("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards/").concat(n),o)},a=function(e,t,n){return r.Z.post("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards"),n)},i=function(e,t,n){return r.Z.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards?dashboard_ids=").concat(n))},u=function(e,t,n){return r.Z.get("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards/").concat(n))}},44850:function(e,t,n){"use strict";n.d(t,{Xw:function(){return a},js:function(){return i},v:function(){return o}});var r=n(4480),o={pristine:{},id:null},a={pristine:{},id:null,text:"


    ",editing:!1},i={id:"",type:""};t.ZP=(0,r.xu)({key:"dashboardCards",default:{}})},22841:function(e,t,n){"use strict";n.d(t,{IT:function(){return d},I_:function(){return f},_q:function(){return l},oz:function(){return c}});n(47941),n(82526),n(57327),n(41539),n(88449),n(2490),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(4942),o=n(4480),a=n(44850);function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function u(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:"pristine";return{updatePristine:function(t,n,o){if(!(n in(t[e]||{}))&&!(0,a.ZP)(t[n],o))return u(u({},t),{},(0,r.Z)({},e,u(u({},t[e]),{},(0,r.Z)({},n,t[n]))));if((0,a.ZP)(t[e][n],o)){var i=u({},t[e]);return delete i[n],u(u({},t),{},(0,r.Z)({},e,i))}return t},resetPristine:function(t){return u(u(u({},t),t[e]),{},(0,r.Z)({},e,{}))}}}(),d=f.updatePristine,p=(0,o.CG)({key:"dashboardLayoutState",get:function(e){var t=e.id,n=e.key;return function(e){var r=(0,e.get)((0,c.ZP)(t));return n?r[n]:r}},set:function(e){return function(t,n){(0,t.set)((0,c.ZP)(e),l(l({},c.E3),{},{layout:n}))}}}),v=(0,o.CG)({key:"updateDashboardLayoutState",get:function(e){return function(t){return(0,t.get)(p({id:e,key:"layout"}))}},set:function(e){return function(t,n){(0,t.set)((0,c.ZP)(e),(function(e){return l(l({},d(e,"layout",n)),{},{layout:n})}))}}}),m=(0,o.CG)({key:"removePristineDashboardLayoutState",set:function(e){return function(t){(0,t.set)((0,c.ZP)(e),(function(e){return l(l({},e),{},{pristine:{}})}))}}}),g=function(e){var t=function(e,t){return(0,o.sJ)(p({id:e,key:t}))}(e,"layout"),n=function(e){return(0,o.Zl)(v(e))}(e);return[t,n]}},47133:function(e,t,n){"use strict";n.d(t,{Z7:function(){return k},lz:function(){return A},g4:function(){return Z},_d:function(){return P},Y3:function(){return h},NM:function(){return O},r0:function(){return I},FV:function(){return T},DS:function(){return C},HI:function(){return y},LJ:function(){return S},QF:function(){return w}});n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070);var r=n(29439),o=n(4942),a=(n(21249),n(57640),n(9924),n(47941),n(41539),n(64211),n(2490),n(41874),n(67294)),i=n(4480),u=n(74059),c=n(37518),s=n(9058),l=n(41943),f=n(22841),d=n(39878),p={id:null,loaded:!1,fullyLoaded:!1,loading:!0,processing:!1,isOwner:!1,spaceSlug:"",roomSlug:"",spaceId:"",roomId:"",name:"",slug:"",version:0,createdAt:"",updatedAt:"",cardIds:[]},v=(0,i.xu)({key:"dashboard",default:p}),m=n(45215);function g(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function b(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:"";return e.trim().startsWith("

    ")?e:"

    ".concat(e,"

    ")},Z=function(e){var t=e.id,n=e.name,r=e.slug,o=e.version,i=e.createdAt,c=e.updatedAt,s=e.snapshot,l=s.uiState,f=s.items||[],d=[],v=f.reduce((function(e,t){var n=t.type,r=t.cardID,o=t.id,i=void 0===o?r:o,c=t.chartID,s=t.chartId,l=void 0===s?c:s,f=t.nodeID,v=t.nodeId,m=void 0===v?f:v,g=t.chartMetadata,y=t.context,A=t.attributes,k=t.cardAttributes,Z=void 0===k?A:k,E=t.contextScope,j=t.text,x=(0,u.Z)(t,h);if(E=E||[y||(null===g||void 0===g?void 0:g.context)||l],n=O[n]||n,e[i]={id:i,type:n,nodeId:m,contextScope:E},w.has(n)){if(d.push(i),"text"===n)return e[i]=S(S(S({},p.Xw),Z),{},{text:P(j||Z.text)},e[i]),e;e[i]=function(e){var t=e.aggregationMethod,n=e.chartType,r=e.dimensions,o=void 0===r?[]:r,i=e.selectedDimensions,c=void 0===i?o||[]:i,s=e.filteredLabels,l=void 0===s?[]:s,f=e.selectedLabels,d=void 0===f?l||[]:f,v=e.selectedNodeIds,m=void 0===v?[]:v,g=e.selectedNodes,h=void 0===g?m||[]:g,y=e.selectedInstances,O=void 0===y?[]:y,w=e.chartId,A=e.groupBy,k=e.groupingMethod,P=e.id,Z=(e.host,(0,u.Z)(e,b));return S(S({},p.v),{},{aggregationMethod:t||"avg",chartType:n||"line",selectedDimensions:Array.isArray(c)?c:[],selectedLabels:Array.isArray(d)?d:d&&"object"===(0,a.Z)(d)?Object.keys(d):[],selectedNodes:Array.isArray(h)?h:[],selectedInstances:Array.isArray(O)?O:w?[w]:[],groupBy:Array.isArray(A)?A:A?[A]:["dimension"],groupingMethod:k||"average",id:P},Z)}(S(S(S({},x),Z),{},{chartId:l},e[i]))}return e}),{});return{layout:l?l.map((function(e){return k(v,e)})):f.map((function(e){var t=e.layout;return k(v,t)})),dashboard:{id:t,name:n,slug:r,version:o,createdAt:i,updatedAt:c,cardIds:d},cards:v}};t.Z=function(e,t){var n=t.spaceId,a=t.spaceSlug,i=t.roomId,u=t.roomSlug,c=(0,l.useState)(!0),d=(0,o.Z)(c,2),p=d[0],g=d[1],b=(0,f._8)((function(t){var o=t.set;return(0,r.Z)(s().mark((function t(){var r,c,l,f,d,p;return s().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return g(!0),o((0,m.g4)(e),!0),t.next=4,(0,v.AK)(n,i,e);case 4:r=t.sent,c=r.data,l=Z(c),f=l.layout,d=l.dashboard,p=l.cards,o((0,m.Z7)(e),{layout:f,dashboard:S(S({},d),{},{spaceSlug:a,roomSlug:u,spaceId:n,roomId:i}),cards:p}),g(!1),o((0,m.g4)(e),!1);case 10:case"end":return t.stop()}}),t)})))}));return(0,l.useEffect)((function(){e&&b(e)}),[e]),p}},44197:function(e,t,n){"use strict";n.d(t,{E:function(){return o}});var r=n(4480),o={id:null,avatarURL:null,deactivated:!1,email:"",joinedAt:null,name:"",role:""};t.Z=(0,r.cn)({key:"member",default:{}})},34912:function(e,t,n){"use strict";n.d(t,{Bb:function(){return p},Vr:function(){return d},W3:function(){return f},mX:function(){return l}});var r=n(4942),o=(n(21249),n(57640),n(9924),n(41539),n(15581),n(2490),n(34514),n(54747),n(66992),n(33948),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(49337),n(33321),n(69070),n(4480)),a=n(38609),i=n(41331),u=n(44197);function c(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function s(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:[];try{var o=l({spaceSlug:t,roomSlug:n}),a=localStorage.getItem(o);return a?JSON.parse(a):r}catch(i){return r}}});function p(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function v(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{};return{fullyLoaded:!1,loaded:!1,id:(null===e||void 0===e?void 0:e.nd)||(null===e||void 0===e?void 0:e.mg)||null,mg:null,nd:null,isDeleted:!1,name:"",version:"",ni:null,labels:{},labelKeys:[],hw:{architecture:"",cpuFrequency:"",cpus:"",memory:"",diskSpace:"",virtualization:"",container:""},os:{id:"",nm:"",v:"",kernel:{nm:"",v:""}},capabilities:{},state:"",isProtobufCapable:!0,urls:[],accessCount:0,lastAccessTime:"",updateSeverity:"",hasAccessibleData:!1,isLive:!1,nodeStatus:null}};t.Z=(0,r.cn)({key:"nodes",default:{}})},96104:function(e,t,n){"use strict";n.d(t,{Lr:function(){return P},WA:function(){return h},XT:function(){return A},aQ:function(){return k},fX:function(){return w},jo:function(){return O},tN:function(){return S},wT:function(){return Z}});var r=n(4942),o=n(29439),a=(n(26699),n(32023),n(41539),n(64211),n(2490),n(41874),n(47941),n(92222),n(85827),n(25387),n(72608),n(15581),n(34514),n(54747),n(57327),n(88449),n(59849),n(21249),n(57640),n(9924),n(82526),n(38880),n(49337),n(33321),n(69070),n(4480)),i=n(4822),u=n(96929),c=n(37518),s=n(97945);function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function f(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=e.omit,o=e.keepAll,i=void 0!==o&&o,u=e.merge,s=void 0===u||u,l=(0,c.UL)();return(0,a.sJ)(b({extraKey:n,key:t||l,omit:r,keepAll:i,merge:s}))},w=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=e.omit,o=e.keepAll,i=void 0!==o&&o,u=e.emptyIfAll,s=void 0===u||u,l=e.merge,f=void 0!==l&&l,d=e.scoped,p=void 0!==d&&d,v=(0,c.UL)();return(0,a.sJ)(h({extraKey:n,key:t||v,omit:r,keepAll:i,emptyIfAll:s,merge:f,scoped:p}))},A=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=(0,c.UL)();return(0,i.TQ)("selectedNodeIds",{key:t||r,extraKey:n,flavour:"arr"})},k=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=e.merge,o=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=e.merge,o=void 0===r||r,a=(0,c.UL)();return(0,i.by)("selectedNodeIds",{key:t||a,extraKey:n,flavour:"arr",merge:o,defaultValue:m})}({key:t,extraKey:n,merge:void 0!==r&&r});return[o,A({key:t,extraKey:n})]},P=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=(0,c.UL)();return(0,i.by)("excludedNodeIds",{key:t||r,extraKey:n,flavour:"arr",defaultValue:m})}({key:t,extraKey:n}),o=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=(0,c.UL)();return(0,i.TQ)("excludedNodeIds",{key:t||r,extraKey:n,flavour:"arr"})}({key:t,extraKey:n});return[r,o]},Z=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.key,n=e.extraKey,r=e.omit,o=e.merge,i=void 0!==o&&o,u=(0,c.UL)();return(0,a.FV)(g({key:t||u,extraKey:n,omit:r,merge:i}))}},97945:function(e,t,n){"use strict";n.d(t,{$E:function(){return C},HZ:function(){return L},Hg:function(){return Q},Jz:function(){return Y},M2:function(){return H},Mm:function(){return R},Ne:function(){return T},Ng:function(){return W},S6:function(){return I},TQ:function(){return Z},WR:function(){return ee},Xw:function(){return N},Y0:function(){return J},d$:function(){return $},dJ:function(){return P},e5:function(){return B},iy:function(){return j},m3:function(){return q},mf:function(){return F},pG:function(){return te},ul:function(){return k},wU:function(){return x},zP:function(){return E}});var r=n(93433),o=n(4942),a=n(15861),i=(n(41539),n(88674),n(21249),n(57640),n(9924),n(85827),n(25387),n(2490),n(72608),n(92222),n(57327),n(88449),n(59849),n(26699),n(32023),n(66992),n(33948),n(47941),n(82526),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(64687)),u=n.n(i),c=n(67294),s=n(4480),l=n(89250),f=n(38609),d=n(41331),p=n(74059),v=n(58502),m=n(91850),g=n(96929),b=n(37518),h=n(48854),y=n(18761);function S(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function O(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{},t=e.ids,n=e.severity,r=L();return(0,s.sJ)(z(O({ids:t||r},n&&{severities:[n]})))},V=(0,s.CG)({key:"fnNodeIdsState",get:function(e){return function(t){var n=(0,t.get)(h.Z);return e.filter((function(e){var t;return null===(t=n[e])||void 0===t||null===(t=t.capabilities.funcs)||void 0===t?void 0:t.enabled}))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),H=function(e){var t=(0,g.zN)();return(0,s.sJ)(V(e||t))},Y=(0,s.nZ)({key:"nodesState/initialize",get:function(e){return(0,e.get)(h.Z)},set:function(e,t){var n=e.set,r=t.values,o=t.merge;return n(h.Z,(function(e){var t=o?(0,f.Z)(e,r):r;return(0,d.ZP)(t,e)?e:t}))}}),$=function(){return(0,s.sJ)(Y)},X=function(){var e=(0,b.A3)();return e&&"".concat(e,"/nodes")},q=function(e){var t=X();return"".concat(t,"/").concat(e)},Q=function(){var e=X();return function(t){return"".concat(e,"/").concat(t)}},ee=function(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).alertId,n=function(){var e=(0,l.s0)(),t=X();return(0,c.useCallback)((function(n,r){e("".concat(t,"/").concat(n),r?{state:r}:"")}),[t,e])}();return(0,c.useCallback)((function(){return n(e,{alertId:t})}),[n])},te=function(){var e;return null===(e=(0,l.bS)("/spaces/:spaceSlug/rooms/:roomSlug/nodes/:nodeId"))||void 0===e||null===(e=e.params)||void 0===e?void 0:e.nodeId}},82902:function(e,t,n){"use strict";n.d(t,{Ly:function(){return s},Ud:function(){return u},V6:function(){return a},XY:function(){return c}});var r=n(29439),o=(n(74916),n(15306),n(21249),n(57640),n(9924),n(9653),n(62200)),a=function(e){return 1===e?"Node":"Nodes"},i=[1,26],u=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:i;if(!e||"unknown"===e)return!1;var n=function(e){return e.replace(/^v/,"").split(".")}(e).map((function(e){return Number(e)})),o=(0,r.Z)(n,3),a=o[0],u=o[1],c=o[2];return!(a&&!isNaN(a)&&!isNaN(u))||(at[0])&&(ut[1])&&c0&&void 0!==arguments[0]?arguments[0]:{},t=e.all,n=e.internal,i=(0,a.J7)().map((function(e){var t=e.id;return{label:e.name,value:t}})),u=[];return n&&u.push(o.mN),t&&u.push(o.k_),u.push.apply(u,(0,r.Z)(i)),u}},4822:function(e,t,n){"use strict";n.d(t,{dz:function(){return R},D0:function(){return F},tk:function(){return B},I0:function(){return U},by:function(){return _},Sf:function(){return G},TQ:function(){return L}});var r={};n.r(r),n.d(r,{global:function(){return O},specific:function(){return j}});var o={};n.r(o),n.d(o,{global:function(){return I},specific:function(){return k}});var a=n(4942),i=n(84506),u=n(29439),c=(n(92222),n(47042),n(85827),n(41539),n(25387),n(2490),n(72608),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(4480)),s=n(37518),l={arr:"arr",bool:"bool",int:"int",val:"val",obj:"obj",dec:"dec"},f=n(45987),d=(n(21249),n(57640),n(9924),n(58591)),p=n(14600),v=["after","before","correlation","utc","forcePlay"],m=["correlation","forcePlay"],g=["highlight"];function b(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function h(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=[],r=Object.keys(e).reduce((function(r,a){var i=a.split("-").reverse()[0],u=i;if(l[u]||(l[u]||(u=E(e[a])),l[u]||(u=E(t[a]))),"undefined"===typeof e[a])return n.push(o+a),delete r[a],r;var c=Z[u]||Z[a]||Z.default,s="".concat(o).concat(a).concat(l[i]?"":"-".concat(u));return r[s]=c(e[a]),r[s]||n.push(s),r}),{});(0,d.xs)(r),(0,d.bp)(n)}))}]};n(9653);function x(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function C(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=t.key,r=t.extraKey,o=t.flavour,a=t.defaultValue,i=t.merge,u=(0,c.sJ)(R({key:n,param:e,extraKey:r,flavour:o,merge:i}));return"undefined"===typeof u?a:u},L=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.key,r=t.extraKey,o=t.flavour;return(0,c.Zl)(R({key:n,param:e,extraKey:r,flavour:o}))},U=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.key,r=t.extraKey,o=t.flavour,a=t.defaultValue,i=t.merge;return[_(e,{key:n,extraKey:r,flavour:o,defaultValue:a,merge:i}),L(e,{key:n,extraKey:r,flavour:o})]},B=function(e,t){var n=t.extraKey,r=t.defaultValue,o=t.flavour,a=t.merge,i=(0,s.UL)();return _(e,{key:i,extraKey:n,defaultValue:r,flavour:o,merge:a})},G=function(e,t){var n=t.extraKey,r=t.flavour,o=(0,s.UL)();return L(e,{key:o,extraKey:n,flavour:r})},F=function(e,t){var n=t.extraKey,r=t.defaultValue,o=t.flavour,a=t.merge;return[B(e,{extraKey:n,defaultValue:r,flavour:o,merge:a}),G(e,{extraKey:n,flavour:o})]}},8018:function(e,t,n){"use strict";n.d(t,{Bl:function(){return S},GT:function(){return O},O9:function(){return d},R2:function(){return p},RO:function(){return m},ar:function(){return g},og:function(){return y},qn:function(){return b},tn:function(){return v},xj:function(){return h}});var r=n(84506),o=n(29439),a=(n(41539),n(88674),n(47042),n(93017)),i=n(68008),u=n(13477),c=n(74059),s=n(37518),l=n(25819),f=n(1229),d=function(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).silent,n=void 0!==t&&t,r=(0,s.tE)(e,"spaceId"),f=(0,a.Z)(),d=(0,o.Z)(f,2),p=d[0],v=d[1],m=(0,u.Iy)("isAnonymous"),g=(0,c.Q6)();return(0,i.Z)((function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(m||g)return Promise.resolve().then((function(){return!n&&p({header:"Rooms",text:"Connect to cloud to be able to save your settings!"})}));var o=(0,l.R$)(r,e,t);return o.then((function(){return!n&&p({header:"Rooms",text:"Room successfully updated!"})})).catch((function(e){return!e.isCancel&&v(e)})),o}),[e,m,g,r])},p=function(e,t){var n=(0,a.Z)(),r=(0,o.Z)(n,2),u=r[0],c=r[1],f=(0,s.tE)(t,"name");return(0,i.Z)((function(){var n=(0,l.GX)(e,t);return n.then((function(){return u({header:"Rooms",text:"Room ".concat(f," was successfully deleted!")})})).catch((function(e){return!e.isCancel&&c(e)})),n}),[t,e])},v=function(e,t){var n=(0,a.Z)(),c=(0,o.Z)(n,2)[1],d=(0,u.jr)(),v=(0,i.Z)((function(){var n=(0,l.EP)(e,t,d);return n.catch((function(e){return!e.isCancel&&c(e)})),n}),[e,t,d]),m=(0,f.IT)(t,"ids"),g=(0,r.Z)(m),b=g[0],h=g.slice(1),y=p(e,t),S=(0,s.tE)(t,"untouchable");return h.length||b!==d||S?v:y},m=function(e){var t=(0,a.Z)(),n=(0,o.Z)(t,2),r=n[0],u=n[1];return(0,i.Z)((function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=(0,l.dB)(e,t);return n.then((function(){return r({header:"Rooms",text:"Room ".concat(t.name," was successfully created!")})})).catch((function(e){return!e.isCancel&&u(e)})),n}),[e])},g=function(e,t){var n=(0,a.Z)(),r=(0,o.Z)(n,2),u=r[0],c=r[1];return(0,i.Z)((function(n){var r=(0,l.r7)(e,t,n);return r.then((function(){return u({header:"Rooms",text:"Member".concat(n.length>1?"s":""," successfully added!")})})).catch((function(e){return!e.isCancel&&c(e)})),r}),[e])},b=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.onSuccess,c=n.onFail,f=(0,u.jr)(),d=(0,a.Z)(),p=(0,o.Z)(d,2),v=p[0],m=p[1],g=(0,s.s)(t);return(0,i.Z)((function(){var n=(0,l.r7)(e,t,[f]);return n.then((function(){g({isMember:!0}),v({header:"Rooms",text:"Successfully joined the room"}),null===r||void 0===r||r()})).catch((function(e){!e.isCancel&&m(e),null===c||void 0===c||c()})),n}),[t,g,e])},h=function(e,t){var n=(0,a.Z)(),r=(0,o.Z)(n,2),u=r[0],c=r[1];return(0,i.Z)((function(n){var r=(0,l.EP)(e,t,n);return r.then((function(){return u({header:"Rooms",text:"Member".concat(n.length>1?"s":""," successfully removed!")})})).catch((function(e){return!e.isCancel&&c(e)})),r}),[t,e])},y=function(e,t){var n=(0,a.Z)(),r=(0,o.Z)(n,2),u=r[0],c=r[1];return(0,i.Z)((function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},r=(0,l.Uf)(e,t,n);return r.then((function(){return u({header:"Rooms",text:"Nodes successfully added!"})})).catch((function(e){return!e.isCancel&&c(e)})),r}),[t,e])},S=function(e){var t=(0,a.Z)(),n=(0,o.Z)(t,2),r=n[0],u=n[1];return(0,i.Z)((function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=(0,l.OE)(e,t);return n.then((function(){return r({header:"Rooms",text:"Node successfully deleted!"})})).catch((function(e){return!e.isCancel&&u(e)})),n}),[e])},O=function(e,t){var n=(0,a.Z)(),r=(0,o.Z)(n,2),u=r[0],c=r[1];return(0,i.Z)((function(n){var r=(0,l.r4)(e,t,n);return r.then((function(){return u({header:"Rooms",text:"Nodes successfully removed!"})})).catch((function(e){return!e.isCancel&&c(e)})),r}),[t,e])}},99322:function(e,t,n){"use strict";n.d(t,{e:function(){return i},u:function(){return u}});var r=n(4480),o=n(37518),a={ids:(0,r.xu)({key:"roomAlertIds",default:[]}),error:(0,r.xu)({key:"roomAlertError",default:null}),updatedAt:(0,r.xu)({key:"roomAlertsUpdatedAt",default:""}),loaded:(0,r.xu)({key:"roomAlertsLoaded",default:!1})},i=(0,r.CG)({key:"roomAlertState",get:function(e){var t=e.id,n=e.key;return function(e){return(0,e.get)(a[n](t))}},set:function(e){var t=e.id,n=e.key;return function(e,r){(0,e.set)(a[n](t),r)}}}),u=function(e){return function(e,t){return(0,r.sJ)(i({id:e,key:t}))}((0,o.UL)(),e)}},25819:function(e,t,n){"use strict";n.d(t,{EP:function(){return E},Fz:function(){return b},GX:function(){return P},Mn:function(){return y},OE:function(){return x},R$:function(){return k},Uf:function(){return j},dB:function(){return w},fv:function(){return A},j2:function(){return O},mP:function(){return I},r4:function(){return C},r7:function(){return Z}});var r=n(4942),o=n(45987),a=(n(21249),n(57640),n(9924),n(85827),n(41539),n(25387),n(2490),n(72608),n(47941),n(92222),n(69826),n(31672),n(59461),n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(26398)),i=n(66152),u=n(53338),c=n(62200),s=n(82902),l=n(78710),f=["nm","capabilities","v"],d=["context","chartType"],p=["name"];function v(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function m(e){for(var t=1;t2&&void 0!==arguments[2]?arguments[2]:[],r=arguments.length>3?arguments[3]:void 0,o=r.after,u=r.before;return(0,l.Ly)(e)?a.Z.get("/api/v2/contexts?scope_nodes=".concat(n.join("|")||"*"),{baseURL:window.envSettings.agentApiUrl,transform:function(e){var t=e.contexts,n=void 0===t?{}:t,r=e.versions,o=void 0===r?{}:r;return{results:Object.keys(n).map((function(e){return h(m(m({},(0,i.k5)(n[e])),{},{id:e,context:e}))})),versions:(0,i.k5)(o)}}}):a.Z.post("/api/v3/spaces/".concat(e,"/rooms/").concat(t,"/contexts"),{scope:{contexts:["*"],nodes:n},selectors:{contexts:[],nodes:[]},window:{after:o,before:u}},{transform:function(e){var t=e.contexts,n=void 0===t?{}:t,r=e.versions,o=void 0===r?{}:r;return{results:Object.keys(n).map((function(e){return h(m(m({},(0,i.k5)(n[e])),{},{id:e,context:e}))})),versions:(0,i.k5)(o)}}})},S=function(e,t){return function(n){return{results:n.results.map((function(n){return m(m({},n),{},{spaceId:e,roomId:t})}))}}},O=function(e,t){return a.Z.get("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards"),{transform:S(e,t)})},w=function(e,t){return a.Z.post("/api/v1/spaces/".concat(e,"/rooms"),t)},A=function(e,t){return a.Z.get("/api/v1/spaces/".concat(e,"/rooms/").concat(t))},k=function(e,t,n){return a.Z.patch("/api/v1/spaces/".concat(e,"/rooms/").concat(t),n)},P=function(e,t){return a.Z.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t))},Z=function(e,t,n){return a.Z.post("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/members"),n)},E=function(e,t,n){return a.Z.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/members?account_ids=").concat(n))},j=function(e,t,n){return a.Z.post("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/claimed-nodes"),n)},x=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return a.Z.delete("/api/v1/spaces/".concat(e,"/nodes"),{data:{node_ids:t}})},C=function(e,t,n){return a.Z.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/claimed-nodes?node_ids=").concat(n))},I=function(e,t,n,u){var c,s=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=function(e){return e.length?{node_ids:e}:{}}(arguments.length>1&&void 0!==arguments[1]?arguments[1]:[]);return e.length?e.reduce((function(e,t){var n=t.name,r=(0,o.Z)(t,p);return e.provider[n]=r,e}),{filter:m({},t),provider:{}}):{filter:m({},t),provider:{nodes:{},charts:{},spaceroom:{},alerts:{},dashboards:{}}}}(n,u);return a.Z.post("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/overview/stats"),s,{transform:(c=n,function(e){var t=(0,i.k5)(e,{depth:3}),n=t.results,o=t.providers,a=new Date;return c.reduce((function(e,t){var i=t.name,u=t.projections;return m(m({},e),{},(0,r.Z)({},i,m(m(m({},o.find((function(e){return e.provider===i}))),n["".concat(i,"Stats")]),{},{updatedAt:a},u?(0,r.Z)({},"".concat(u,"_updatedAt"),a):{})))}),{})})})}},91850:function(e,t,n){"use strict";n.d(t,{IV:function(){return u},xh:function(){return c}});var r=n(29439),o=n(4480),a=n(78710),i=n(75674),u=(0,o.cn)({key:"currentRoomSlug",default:null}),c=(0,o.xu)({key:"roomIdBySlug",default:"",effects:function(e){var t=(0,r.Z)(e,2),n=t[0],o=t[1];return(0,a.Ly)(n)&&o===a.Fb?[function(e){var t=e.setSelf;"get"===e.trigger&&t(a.Fb)}]:[]}});t.ZP=(0,o.xu)({key:"room",default:i.Z,effects:function(e){return a.ZP&&e===a.Fb?[function(e){var t=e.setSelf;"get"===e.trigger&&t(a.xZ)}]:[]}})},53338:function(e,t,n){"use strict";n.d(t,{AS:function(){return u},TY:function(){return r},ce:function(){return a},ez:function(){return i},i_:function(){return o}});var r="All nodes",o="all-nodes",a={created:!1,reachable:!0,stale:!0,unreachable:!1},i={created:!0,reachable:!0,stale:!1,unreachable:!1},u={offline:"unreachable",unseen:"created",stale:"stale",live:"reachable"}},5934:function(e,t,n){"use strict";n.d(t,{oq:function(){return l},$3:function(){return v},Mg:function(){return m},ie:function(){return d},L:function(){return p}});var r=n(93433),o=(n(2707),n(66992),n(41539),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(4480)),a=n(91966),i=n.n(a),u=n(47133),c=n(37518),s={ids:(0,o.xu)({key:"roomDashboardIds",default:[]}),loaded:(0,o.xu)({key:"roomDashboardsLoaded",default:!1})},l=(0,o.CG)({key:"roomDashboardState",get:function(e){var t=e.id,n=e.key;return function(e){return(0,e.get)(s[n](t))}},set:function(e){var t=e.id,n=e.key;return function(e,r){return(0,e.set)(s[n](t),r)}}}),f=function(e,t){return(0,o.sJ)(l({id:e,key:t}))},d=function(){var e=(0,c.UL)();return f(e,"ids")},p=function(){var e=(0,c.UL)();return f(e,"loaded")},v=(0,o.CG)({key:"roomDashboardsState/add",set:function(e){return function(t,n){var o=t.set,a=t.get,i=function(e){return a((0,u.Y3)({id:e,key:"name"}))};o(s.ids(e),(function(e){return(0,r.Z)(new Set([].concat((0,r.Z)(e),[n.id]))).sort((function(e,t){return i(e).localeCompare(i(t),void 0,{sensitivity:"accent",ignorePunctuation:!0})}))}))}}}),m=(0,o.CG)({key:"roomDashboardsState/remove",set:function(e){return function(t,n){(0,t.set)(s.ids(e),(function(e){return i()(e,n)}))}}})},75674:function(e,t){"use strict";t.Z={loaded:!1,fullyLoaded:!1,id:null,isMember:!1,name:"",memberCount:null,nodeCount:null,description:"",private:!1,slug:"",spaceId:null,createdAt:"",untouchable:!0}},1229:function(e,t,n){"use strict";n.d(t,{Ci:function(){return f},r3:function(){return g},e1:function(){return p},lA:function(){return v},GA:function(){return h},IT:function(){return d}});var r=n(93433),o=(n(2707),n(66992),n(41539),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(4480)),a=n(91966),i=n.n(a),u=n(34912),c=n(37518),s={ids:(0,o.xu)({key:"roomMemberIds",default:[]}),error:(0,o.xu)({key:"roomMembersError",default:null}),updatedAt:(0,o.xu)({key:"roomMembersUpdatedAt",default:""}),loaded:(0,o.xu)({key:"roomMembersLoaded",default:!1})},l=n(8018),f=(0,o.CG)({key:"roomMemberState",get:function(e){var t=e.id,n=e.key;return function(e){return(0,e.get)(s[n](t))}},set:function(e){var t=e.id,n=e.key;return function(e,r){(0,e.set)(s[n](t),r)}}}),d=function(e,t){return(0,o.sJ)(f({id:e,key:t}))},p=function(){var e=(0,c.UL)();return d(e,"ids")},v=function(){var e=p();return(0,u.Vr)(e)},m=(0,o.CG)({key:"roomMembersState/add",set:function(e){return function(t,n){var o=t.set,a=t.get,i=function(e){return a((0,u.mX)({id:e,key:"name"}))};o(s.ids(e),(function(e){return(0,r.Z)(new Set([].concat((0,r.Z)(e),(0,r.Z)(n)))).sort((function(e,t){return i(e).localeCompare(i(t),void 0,{sensitivity:"accent",ignorePunctuation:!0})}))}))}}}),g=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.onSuccess,a=n.onFail,i=(0,l.ar)(e,t);return(0,o._8)((function(e){var n=e.set;return function(e){try{i(e),n(m(t),e),r&&r(e)}catch(o){a&&a(e)}}}),[i,e,r,a])},b=(0,o.CG)({key:"roomMembersState/remove",set:function(e){return function(t,n){return(0,t.set)(s.ids(e),(function(e){return i()(e,n)}))}}}),h=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.onSuccess,a=n.onFail,i=(0,l.xj)(e,t);return(0,o._8)((function(e){var n=e.set;return function(e){try{i(e),n(b(t),e),r&&r(e)}catch(o){a&&a(e)}}}),[i,e,t,r,a])}},29495:function(e,t,n){"use strict";n.d(t,{gl:function(){return A},c0:function(){return x},jN:function(){return w},xQ:function(){return O},dh:function(){return E},uA:function(){return P},VP:function(){return j},Y6:function(){return Z}});var r=n(93433),o=n(29439),a=n(15861),i=n(4942),u=n(64687),c=n.n(u),s=(n(21249),n(57640),n(9924),n(41539),n(15581),n(2490),n(34514),n(54747),n(57327),n(88449),n(59849),n(92222),n(47941),n(82526),n(38880),n(49337),n(33321),n(69070),n(4480)),l=n(28721),f=n(8018);n(85827);function d(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function p(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{}).shouldPersist,n=void 0===t||t,r=(0,f.O9)(e,{silent:!0});return(0,s._8)((function(t){var i=t.set,u=t.snapshot;return function(){var t=(0,a.Z)(c().mark((function t(a){var s,l,f,d,p,v,m,g,b=arguments;return c().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:for(s=b.length,l=new Array(s>1?s-1:0),f=1;f2&&void 0!==arguments[2]?arguments[2]:{},a=n.onSuccess,c=n.onFail,s=(0,d.og)(e,t);return(0,u._8)((function(e){var n=e.set;return function(){var e=(0,r.Z)(i().mark((function e(r){var u,l,f,d=arguments;return i().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return u=(d.length>1&&void 0!==d[1]?d[1]:{}).makeCallback,l=void 0===u||u,e.prev=1,e.next=4,s(r);case 4:(f=e.sent)&&r.reduce((function(e,t,n){var r;return null!==(r=f[n])&&void 0!==r&&r.errorCode?e:[].concat((0,o.Z)(e),[t])}),[]),n(w(t),r),l&&a&&a(r),e.next=13;break;case 10:e.prev=10,e.t0=e.catch(1),l&&c&&c(r);case 13:case"end":return e.stop()}}),e,null,[[1,10]])})));return function(t){return e.apply(this,arguments)}}()}),[s,e,a,c])},k=(0,u.CG)({key:"roomNodesState/obsolete",get:function(){return function(){return null}},set:function(e){var t=e.roomId,n=e.nodeIds;return function(e){var r=e.set;r(p.ids(t),(function(e){return s()(e,n)})),r(p.updatedAt(t),"")}}}),P=(0,u.CG)({key:"roomNodesState/remove",get:function(){return function(){return null}},set:function(e){return function(t,n){var r=t.set;r(p.ids(e),(function(e){return s()(e,n)})),r(p.updatedAt(e),"")}}}),Z=function(e,t){var n=(0,d.Bl)(e);return(0,u._8)((function(e){var o=e.set;return function(){var e=(0,r.Z)(i().mark((function e(r){var a,u,c,s=arguments;return i().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return u=(a=s.length>1&&void 0!==s[1]?s[1]:{}).onSuccess,c=a.onFail,e.prev=1,e.next=4,n(r);case 4:r.map((function(e){o((0,l.zP)({id:e,key:"state"}),"deleted")})),t.map((function(e){o(k({roomId:e,nodeIds:r}))})),u&&u(r),e.next=12;break;case 9:e.prev=9,e.t0=e.catch(1),c&&c();case 12:case"end":return e.stop()}}),e,null,[[1,9]])})));return function(t){return e.apply(this,arguments)}}()}),[n,e])},E=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=n.onSuccess,a=n.onFail,c=(0,d.GT)(e,t);return(0,u._8)((function(e){var n=e.set;return function(){var e=(0,r.Z)(i().mark((function e(r){var u,s,l,f=arguments;return i().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return s=(u=f.length>1&&void 0!==f[1]?f[1]:{}).onSuccess,l=u.onFail,e.prev=1,e.next=4,c(r);case 4:n(P(t),r),o&&o(r),s&&s(),e.next=13;break;case 9:e.prev=9,e.t0=e.catch(1),a&&a(r),l&&l();case 13:case"end":return e.stop()}}),e,null,[[1,9]])})));return function(t){return e.apply(this,arguments)}}()}),[c,e,t,o,a])}},37518:function(e,t,n){"use strict";n.d(t,{ZB:function(){return Z},n2:function(){return j},Dq:function(){return I},jW:function(){return F},UL:function(){return L},A3:function(){return B},vf:function(){return M},Hm:function(){return U},Jb:function(){return G},IX:function(){return N},iC:function(){return D},HM:function(){return _},tE:function(){return x},TF:function(){return R},s:function(){return C}});var r=n(15861),o=n(4942),a=n(45987),i=n(64687),u=n.n(i),c=(n(41539),n(15581),n(2490),n(34514),n(54747),n(57327),n(88449),n(59849),n(92222),n(47941),n(82526),n(38880),n(49337),n(33321),n(69070),n(4480)),s=n(95383),l=n(74059),f=n(53338),d=n(4474),p=n(91850),v=n(75674),m=n(8018),g=n(83338),b=n(29495),h=n(67294),y=n(19368),S=n(23383),O=n(71172),w=function(e){var t="".concat((0,y.P)()).concat((0,S.e)(e));return(0,h.useCallback)((function(e){return(0,O.c)({key:t,handleResults:function(t){return t.results.concat(e)}})}),[t])},A=["settings","metrics"];function k(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function P(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=t.key,a=t.shouldPersist,i=t.onSuccess,s=t.onFail,l=(0,c.Zl)(j({id:e,key:n})),f=(0,m.O9)(e);return(0,c._8)((function(t){var c=t.snapshot;return function(){var t=(0,r.Z)(u().mark((function t(r){var d,p;return u().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:if(d=n?(0,o.Z)({},n,r):r,l((function(e){return P(P({},e),d)})),a){t.next=4;break}return t.abrupt("return");case 4:return t.next=6,c.getPromise(j({id:e}));case 6:return p=t.sent,t.prev=7,t.next=10,f(d);case 10:i&&i(r),t.next=17;break;case 13:t.prev=13,t.t0=t.catch(7),l(n?p[n]:p),s&&s(r);case 17:case"end":return t.stop()}}),t,null,[[7,13]])})));return function(e){return t.apply(this,arguments)}}()}),[e,n,a,i,s])},I=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.onSuccess,o=t.onFail,a=(0,m.RO)(e),i=w(e);return(0,c._8)((function(t){var c=t.set;return function(){var t=(0,r.Z)(u().mark((function t(r){var l,f,d;return u().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return t.prev=0,t.next=3,a(r);case 3:return l=t.sent,f=l.data,d=P(P(P({},r),{},{spaceId:e},f),{},{loaded:!0}),t.next=8,i(d);case 8:c(E(),[d]),c((0,s.oC)(e),d),n&&n(d),t.next=16;break;case 13:t.prev=13,t.t0=t.catch(0),o&&o(r);case 16:case"end":return t.stop()}}),t,null,[[0,13]])})));return function(e){return t.apply(this,arguments)}}()}),[a,e,n,o,i])},T=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=n.onSuccess,a=n.onFail,i=(0,c.rb)(j({id:e})),l=x(e,"spaceId");return(0,c._8)((function(n){var c=n.snapshot,f=n.set,v=n.reset;return(0,r.Z)(u().mark((function n(){var r,m,g;return u().wrap((function(n){for(;;)switch(n.prev=n.next){case 0:return n.next=2,c.getPromise(j({id:e}));case 2:return r=n.sent,n.next=5,c.getPromise((0,s.q3)({id:r.spaceId}));case 5:return m=n.sent,g=(0,d.rZ)(m.filter((function(t){return t.id!==e}))),n.prev=7,n.next=10,t();case 10:v((0,p.xh)([l,r.slug])),f(p.IV,g),f((0,s.v0)(l),r.id),i(),o&&o(r),n.next=20;break;case 17:n.prev=17,n.t0=n.catch(7),a&&a(r);case 20:case"end":return n.stop()}}),n,null,[[7,17]])})))}),[t,l,e,o,a])},D=function(e,t){var n=x(e,"spaceId"),r=(0,m.tn)(n,e);return T(e,r,t)},N=function(e,t){var n=x(e,"spaceId"),r=(0,m.R2)(n,e);return T(e,r,t)},M=function(){return(0,c.sJ)(p.IV)},R=function(e){return(0,c.Zl)(p.IV,e)},_=function(e,t){return(0,c.sJ)((0,p.xh)([e,t]))},L=function(e){var t=(0,l.th)(),n=M();return _(t,e||n)},U=function(e){var t=L();return x(t,e)},B=function(){var e=(0,l.uk)(),t=M();return e&&t&&"/spaces/".concat(e,"/rooms/").concat(t)},G=function(){return(0,s.J7)().filter((function(e){return e.slug===f.i_}))[0]||{id:""}},F=function(){var e=G().id,t=void 0===e?"":e;return"member"===(0,l.OS)("roleInSpace")&&!!t}},83338:function(e,t,n){"use strict";var r=n(4480);t.Z=(0,r.xu)({key:"roomSettings",default:{groupMode:""}})},310:function(e,t,n){"use strict";n.d(t,{y:function(){return r}});var r=function(e,t){return t.untouchable?1:e.untouchable?-1:e.name.localeCompare(t.name,void 0,{sensitivity:"accent",ignorePunctuation:!0})}},30266:function(e,t,n){"use strict";n.d(t,{$F:function(){return b},A6:function(){return g},NX:function(){return p},Ns:function(){return v},Q0:function(){return y},oq:function(){return d},wh:function(){return m}});var r=n(4942),o=n(45987),a=(n(21249),n(57640),n(9924),n(85827),n(41539),n(25387),n(2490),n(72608),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(26398)),i=n(66152),u=n(62200),c=n(64358),s=["permissions"];function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function f(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{}).defaultParam,n=void 0!==t&&t;return a.Z.get((0,c.W)({spaceId:e}),{params:{default:n},transform:h(e)})}},58502:function(e,t,n){"use strict";n.d(t,{$V:function(){return i},E3:function(){return a},bo:function(){return u}});var r=n(4480),o=n(78710),a={createdAt:"",description:"",iconURL:"",id:"",error:null,loaded:!1,name:"",slug:""},i=(0,r.cn)({key:"currentSpaceSlug",default:null}),u=(0,r.xu)({key:"spaceIdBySlug",default:"",effects:function(e){return o.ZP&&e===o.Xh?[function(e){var t=e.setSelf;"get"===e.trigger&&t(o.HM)}]:[]}});t.ZP=(0,r.xu)({key:"space",default:a,effects:function(e){return(0,o.Ly)(e)?[function(e){var t=e.setSelf;"get"===e.trigger&&t(o.aD)}]:[]}})},49489:function(e,t,n){"use strict";n.d(t,{C6:function(){return u},FC:function(){return o},Fi:function(){return i},Hh:function(){return c},Vc:function(){return r},go:function(){return a}});var r={free:{level:0,title:"Community",features:{2023.11:["Max 5 Active Connected Nodes","Max 1 Active Custom Dashboards (per Room)"]}},earlyBird:{level:0,title:"Early Bird",features:{2023.02:['"Member" role available with existing permissions.','"Member" role with access to _All Nodes_ room.']}},pro:{level:10,title:"Pro",features:{2023.02:["7 days of alert history and auditing events.",'Unlock the "Troubleshooter" role and add members to the space without providing management permission.',"Enable webhook alert notification integration."]}},business:{level:20,title:"Business",features:{2023.02:["Up-to 90 days of alert history and topology events. Never miss an important event while troubleshooting.",'Unlock all user roles including "Manager", "Observer" and "Billing". Empower your teams to excel.',"Enable alert notification integrations (Slack, PagerDuty and more)."]}},enterprise:{features:{2023.11:["Host Netdata Cloud and all its components completely on your premises","Ideal for monitoring air gapped facilities and critical infrastructure","Enable the customization that your organization requires"]}}},o=["year","month"],a={year:"yearly",month:"monthly"},i={ErrInvalidPromotionCode:"Promotion code is invalid",ErrInactivePromotionCode:"Promotion code is inactive",ErrInvalidPromotionCodePlan:"Promotion code cannot be applied to this plan"},u="planEnrolmentError",c={free:{2023.11:{maxNodes:5,maxDashboards:1}}}},80699:function(e,t,n){"use strict";var r=n(29439),o=n(67294),a=n(49489);t.Z=function(){var e=(0,o.useState)(localStorage.getItem(a.C6)),t=(0,r.Z)(e,2),n=t[0],i=t[1];return{isFailure:n,setFailure:(0,o.useCallback)((function(){i(!0),localStorage.setItem(a.C6,!0)}),[]),reset:(0,o.useCallback)((function(){i(!1),localStorage.removeItem(a.C6)}),[])}}},46585:function(e,t,n){"use strict";n.d(t,{Eu:function(){return o},Pf:function(){return a},Qy:function(){return i},T$:function(){return u}});var r=n(67294),o="SPACE_NOT_FOUND",a=5,i=20,u={delete:r.createElement(r.Fragment,null,"Cannot delete last space of account.",r.createElement("br",null),"You can delete your account instead."),leave:r.createElement(r.Fragment,null,"Cannot leave last space of account.",r.createElement("br",null),"You can delete your account instead.")}},57079:function(e,t,n){"use strict";var r=(0,n(4480).cn)({key:"publicSpacesAtom",default:[]});t.Z=r},95383:function(e,t,n){"use strict";n.d(t,{r1:function(){return f},q3:function(){return b},Ad:function(){return l},oC:function(){return m},v0:function(){return g},J7:function(){return y},yE:function(){return v},Gn:function(){return p},WY:function(){return h},y3:function(){return d}});var r=n(93433),o=(n(41539),n(15581),n(2490),n(34514),n(54747),n(26833),n(2707),n(66992),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(57327),n(88449),n(59849),n(21249),n(57640),n(9924),n(4480)),a=n(37518),i=n(74059),u=n(78710),c={ids:(0,o.xu)({key:"spaceRoomIds",default:[],effects:function(e){return(0,u.Ly)(e)?[function(e){var t=e.onSet,n=e.trigger,o=e.setSelf;"get"===n&&o([u.Fb]),t((function(e){o([u.Fb].concat((0,r.Z)(e)))}))}]:[]}}),updatedAt:(0,o.xu)({key:"spaceRoomsUpdatedAt",default:""}),loaded:(0,o.xu)({key:"spaceRoomsLoaded",default:!1,effects:function(e){return(0,u.Ly)(e)?[function(e){var t=e.trigger,n=e.setSelf;"get"===t&&n(!0)}]:[]}})},s=n(310),l=(0,o.CG)({key:"spaceRoomState",get:function(e){var t=e.id,n=e.key;return function(e){return(0,e.get)(c[n](t))}},set:function(e){var t=e.id,n=e.key;return function(e,r){(0,e.set)(c[n](t),r)}}}),f=function(e,t){Object.values(c).forEach((function(n){return e(n(t))}))},d=function(e,t){return(0,o.sJ)(l({id:e,key:t}))},p=function(e){var t=(0,i.th)();return d(t,e)},v=function(){var e=(0,i.th)();return d(e,"ids")},m=(0,o.CG)({key:"spaceRoomsState/add",get:function(){return function(){return null}},set:function(e){return function(t,n){var o=t.set,i=t.get,u=function(e){return i((0,a.n2)({id:e}))};o(l({id:e,key:"ids"}),(function(e){return(0,r.Z)(new Set([].concat((0,r.Z)(e),[n.id]))).sort((function(e,t){return(0,s.y)(u(e),u(t))}))})),o(l({id:e,key:"loaded"}),!0),o(l({id:e,key:"updatedAt"}),"")}}}),g=(0,o.CG)({key:"spaceRoomsState/remove",get:function(){return function(){return null}},set:function(e){return function(t,n){var r=t.set;r(l({id:e,key:"ids"}),(function(e){return e.filter((function(e){return e!==n}))})),r(l({id:e,key:"loaded"}),!0),r(l({id:e,key:"updatedAt"}),"")}}}),b=(0,o.CG)({key:"spaceFullRooms",get:function(e){var t=e.id,n=e.key;return function(e){var r=e.get;return r(l({id:t,key:"ids"})).map((function(e){return r((0,a.n2)({id:e,key:n}))}))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),h=function(e,t){return(0,o.sJ)(b({id:e,key:t}))},y=function(e){var t=(0,i.th)();return h(t,e)}},74059:function(e,t,n){"use strict";n.d(t,{BN:function(){return b},YH:function(){return g},U7:function(){return m},st:function(){return p},Iw:function(){return O},th:function(){return k},uk:function(){return w},OS:function(){return Z},Zu:function(){return E},$v:function(){return A},VZ:function(){return S},Q6:function(){return P},vu:function(){return h},GM:function(){return y}});var r=n(4942),o=n(93433),a=(n(66992),n(41539),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(15581),n(2490),n(34514),n(54747),n(21249),n(57640),n(9924),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(49337),n(33321),n(69070),n(4480)),i=n(78710),u=n(49674),c={ids:(0,a.cn)({key:"spaceIds",default:[],effects:i.ZP?[function(e){var t=e.onSet,n=e.trigger,r=e.setSelf;"get"===n&&r([i.HM]),t((function(e){r([i.HM].concat((0,o.Z)(e)))}))}]:[]}),error:(0,a.cn)({key:"spacesError",default:null}),updatedAt:(0,a.cn)({key:"spacesUpdatedAt",default:""}),loaded:(0,a.cn)({key:"spacesLoaded",default:!1,effects:[function(e){var t,n=e.trigger,r=e.setSelf;null!==(t=(0,e.getLoadable)(u.Z).contents)&&void 0!==t&&t.isAnonymous&&"get"===n&&r(!0)}]})},s=n(58502),l=n(57079);function f(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function d(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:[];return(0,a._8)((function(t){var n=t.snapshot;return function(){return e.map((function(e){return n.getLoadable(m({id:e})).contents}))}}))()}},32950:function(e,t,n){"use strict";var r=n(74059),o=n(94979),a=n(46585),i=n(78710);t.Z=function(){var e=(0,o.V)(),t=(0,r.VZ)(e),n=(0,r.vu)(t||a.Eu),u=(0,r.GM)("error");return(0,i.Ly)(t)?{isFetching:!1,hasAccess:!0}:{isFetching:!!e&&!n.loaded&&!u&&!n.error,hasAccess:n.loaded&&!u&&!n.error}}},94979:function(e,t,n){"use strict";n.d(t,{Z:function(){return Z},V:function(){return P}});var r=n(93433),o=n(4942),a=n(45987),i=n(15861),u=n(29439),c=n(64687),s=n.n(c),l=(n(85827),n(41539),n(25387),n(2490),n(72608),n(66992),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(4480)),f=n(89250),d=n(30266),p=n(67294),v=n(74059),m="/spaces/:spaceSlug/*",g=n(13477),b=n(33335),h=n(57079),y=n(46585),S=n(16634),O=n(78710),w=["id","permissions"];function A(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function k(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=t.autorun,l=void 0===n||n,f=t.polling,d=void 0===f||f;return(0,r.Z)((function(){return{key:s(e),autorun:l&&!!e,polling:d,fetch:function(){return(0,a.Q0)(e)},association:{getIds:function(){return(0,i.Ad)({id:e,key:"ids"})},getLoaded:function(){return(0,i.Ad)({id:e,key:"loaded"})},getUpdatedAt:function(){return(0,i.Ad)({id:e,key:"updatedAt"})}},sort:u.y,getResource:o.ZB,pollingOptions:{pollingInterval:69e3},force:!c.ZP}}),[e])}},22648:function(e,t,n){"use strict";n.d(t,{m:function(){return o}});var r=n(14348),o=function(){return(0,r.Z)("(max-width: 767px)")}},5429:function(e,t,n){"use strict";var r=n(29439),o=(n(41539),n(88674),n(17727),n(67294)),a=function(){};t.Z=function(e,t){var n=(0,o.useMemo)(e,t),i=n.fetch,u=n.enabled,c=void 0===u||u,s=n.initialValue,l=n.onFail,f=void 0===l?a:l,d=n.onSettle,p=void 0===d?a:d,v=n.onSuccess,m=void 0===v?a:v,g=n.isDefaultLoading,b=void 0!==g&&g,h=(0,o.useState)(s),y=(0,r.Z)(h,2),S=y[0],O=y[1],w=(0,o.useState)(b),A=(0,r.Z)(w,2),k=A[0],P=A[1],Z=(0,o.useState)(null),E=(0,r.Z)(Z,2),j=E[0],x=E[1],C=(0,o.useRef)(!0);return(0,o.useEffect)((function(){if(c){O(s),P(!0),x(null);var e=i();return e.then((function(e){var t=e.data;C.current&&(O(t),m(t))})).catch((function(e){C.current&&(e.isCancel||(x(e),f(e)))})).finally((function(){C.current&&(P(!1),p())})),function(){var t;return null===e||void 0===e||null===(t=e.cancel)||void 0===t?void 0:t.call(e)}}}),t),(0,o.useEffect)((function(){return function(){return C.current=!1}}),[]),[S,k,j]}},6308:function(e,t,n){"use strict";n.d(t,{c:function(){return u}});n(74916),n(77601),n(85827),n(41539),n(25387),n(2490),n(72608);var r=n(89250),o=n(74059),a=n(95383),i=n(78710),u=function(){var e=(0,r.TH)().pathname;return{isIntegrationsPath:/integrate-anything$/.test(e)}};t.Z=function(){var e=(0,o.th)(),t=(0,a.WY)(e,"nodeCount").reduce((function(e,t){return t>e?t:e}),0),n=(0,a.Gn)("loaded"),r=(0,i.Ly)(e);return[!r&&n&&0===t,!r&&e&&!n,t]}},68008:function(e,t,n){"use strict";var r=n(67294);t.Z=function(e,t){var n=(0,r.useRef)();return(0,r.useCallback)((function(){var t,r;return n.current&&(null===(t=(r=n.current).cancel)||void 0===t||t.call(r)),n.current=e.apply(void 0,arguments),n.current}),t)}},23109:function(e,t,n){"use strict";var r=n(29439),o=(n(38862),n(47941),n(67294)),a=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=localStorage.getItem(e);return n?JSON.parse(n):t};t.Z=function(e,t){var n=(0,o.useState)((function(){return a(e,t)})),i=(0,r.Z)(n,2),u=i[0],c=i[1];return(0,o.useEffect)((function(){return localStorage.setItem(e,JSON.stringify(u))}),[u]),[u,c]}},14348:function(e,t,n){"use strict";var r=n(29439),o=n(67294),a=function(e){var t;return"function"===typeof(null===(t=window)||void 0===t?void 0:t.matchMedia)&&window.matchMedia(e).matches};t.Z=function(e){var t=(0,o.useState)((function(){return a(e)})),n=(0,r.Z)(t,2),i=n[0],u=n[1];function c(){u(a(e))}return(0,o.useEffect)((function(){if("function"===typeof window.matchMedia){var t=window.matchMedia(e);return c(),t.addListener?t.addListener(c):t.addEventListener("change",c),function(){t.removeListener?t.removeListener(c):t.removeEventListener("change",c)}}}),[e]),i}},9872:function(e,t,n){"use strict";var r=n(67294);t.Z=function(){var e=(0,r.useRef)(!1);return(0,r.useEffect)((function(){return e.current=!0,function(){return e.current=!1}}),[]),e}},93017:function(e,t,n){"use strict";var r=n(4942),o=(n(41539),n(15581),n(2490),n(34514),n(54747),n(47941),n(82526),n(57327),n(88449),n(59849),n(38880),n(49337),n(33321),n(69070),n(67294)),a=n(52631),i=n(74855);function u(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function c(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:"default",t=(0,o.useRef)({}),n=(0,o.useCallback)((function(n){var r=(0,i.sc)(c(c({},n),{},{success:!0}));t.current[e]&&a.Z.dismiss(t.current[e]),t.current[e]=a.Z.success(r)}),[]),r=(0,o.useCallback)((function(n){var r,o=null===n||void 0===n||null===(r=n.response)||void 0===r?void 0:r.data,u=(0,i.t_)(null!==o&&void 0!==o&&o.errorMessage?o:c({errorMessage:n.message,errorMsgKey:n.message},n));t.current[e]&&a.Z.dismiss(t.current[e]),t.current[e]=a.Z.error(u)}),[]),u=(0,o.useCallback)((function(e){return e.forEach(r)}),[]);return[n,r,u]}},76201:function(e,t,n){"use strict";n.d(t,{o:function(){return a}});var r=n(67294),o=n(89250),a=function(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:location.href;null!==(e=window.posthog)&&void 0!==e&&e.__loaded&&window.posthog.capture("$pageview",{$current_url:t})};t.Z=function(){var e=(0,o.TH)().pathname;(0,r.useEffect)((function(){a()}),[e])}},95192:function(e,t,n){"use strict";n.d(t,{Z:function(){return c},$:function(){return u}});var r=n(24678),o=n(50308),a=n.n(o),i=function(e){if(!e)throw new Error("Please pass a name for the idb store");var t=(0,r.MT)(e,"".concat(e,"-cache")),n=function(e,n){return(0,r.t8)(e,function(e){return{value:e,timestamp:Date.now()}}(n),t).catch(a())};return{store:t,set:n,get:function(e){var o,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},u=i.fetch,c=i.maxAge,s=void 0===c?86400:c,l=(0,r.U2)(e,t).then((function(t){var r=1e3*s;return t&&t.timestamp+r>Date.now()?t.value:u?(o=u()).then((function(t){return n(e,t),t})):null})).catch(a());return l.abort=function(){return o&&o.abort()},l},clear:function(){return(0,r.ZH)(t).catch(a())},del:function(e){return(0,r.IV)(e,t).catch(a())}}}("netdata"),u=86400,c=function(e,t){var n,r=(arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).maxAge,o=function(){if(n){var e=new Error("rejected");throw e.isCancel=!0,e}},a=i.get(e,{maxAge:r}).then((function(e){if(o(),e)return t(e)})).then((function(){return o(),function(t){return i.set(e,t)}}));return a.abort=function(){n=!0},a}},71172:function(e,t,n){"use strict";n.d(t,{c:function(){return u}});var r=n(15861),o=n(64687),a=n.n(o),i=n(95192),u=function(){var e=(0,r.Z)(a().mark((function e(t){var n,r,o,u,c;return a().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return n=t.key,r=t.handleResults,u=function(e){return o={updatedAt:"",results:r(e)}},e.next=4,(0,i.Z)(n,u);case 4:return c=e.sent,e.abrupt("return",c(o));case 6:case"end":return e.stop()}}),e)})));return function(t){return e.apply(this,arguments)}}()},19368:function(e,t,n){"use strict";n.d(t,{P:function(){return o}});var r=n(13477),o=function(){var e=(0,r.jr)();return"user.".concat(e,".")}},68035:function(e,t,n){"use strict";var r=n(45987),o=n(15861),a=n(29439),i=n(93433),u=n(4942),c=n(64687),s=n.n(c),l=(n(85827),n(41539),n(25387),n(2490),n(72608),n(2707),n(57327),n(88449),n(59849),n(26699),n(32023),n(66992),n(70189),n(78783),n(88921),n(96248),n(13599),n(11477),n(64362),n(15389),n(90401),n(45164),n(91238),n(54837),n(87485),n(56767),n(76651),n(61437),n(35285),n(39865),n(33948),n(92222),n(15581),n(34514),n(54747),n(21249),n(57640),n(9924),n(88674),n(47941),n(82526),n(38880),n(49337),n(33321),n(69070),n(67294)),f=n(4480),d=n(41331),p=n(6557),v=n.n(p),m=n(29283),g=n(9058),b=n(16634),h=n(46189),y=n(13477),S=n(95192),O=n(19368),w=n(93017),A=["fetchResources","onReceive","getUpdatedAt","getError","clearRef","onFail"],k=["polling","fetch","association","pollingOptions","keepPolling"],P=["key"];function Z(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function E(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:v();return e.reduce((function(e,n){return n.loaded=!0,"deleted"===n.state||n.isDeleted?e.deleted.push(n.id):(e.byId[n.id]=E(E({},t(n)),n),e.effected.push(n.id)),e}),{deleted:[],effected:[],byId:{}})}(o,l),P=k.effected,Z=k.deleted,j=k.byId,x=function(e){return e in j?j[e]:n(c(e))},C=[];r(m(),(function(e){return!a&&w||!w?(C=P.length?P:C,b&&C.sort((function(e,t){return b(x(e),x(t))})),C):(Z.length&&(C=e.filter((function(e){return!Z.includes(e)}))),a&&P.length&&(C=(0,i.Z)(new Set([].concat((0,i.Z)(e),(0,i.Z)(P)))),b&&C.sort((function(e,t){return b(x(e),x(t))}))),(0,d.ZP)(C,e)?e:C)})),s?r(s(),{values:j,merge:!0}):P.forEach((function(e){return r(c(e),(function(t){return E(E({},t),{},{loaded:!0},j[e])}))})),A(),h&&h({results:C.map(x),updatedAt:u})}}}});t.Z=function(e,t){var n=(0,O.P)(),u=(0,y.Iy)("isAnonymous"),c=(0,l.useMemo)((function(){var t=e(),o=t.key,a=(0,r.Z)(t,P);return E({key:"".concat(n).concat(o)},a)}),[e].concat((0,i.Z)(t),[u])),d=function(e){var t=(0,g.e)(),n=(0,l.useMemo)((function(){return t?t.getRoot():null}),[t]),i=(0,m.Z)(),u=(0,a.Z)(i,2)[1],c=(0,w.Z)(e.key),d=(0,a.Z)(c,2)[1],p=(0,f._8)((function(e){var t=e.snapshot,n=e.set;return function(){var e=(0,o.Z)(s().mark((function e(r,o){var a,i,u,c;return s().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return a=o.getError,i=o.onFail,u=Date.now(),e.next=4,t.getPromise(j);case 4:if(c=e.sent,!(r.isCancel||c+1e4>u)){e.next=7;break}throw r;case 7:throw a&&n(a(),(0,b.Z)(r)),n(j,u),i?i(r):d(r),r;case 11:case"end":return e.stop()}}),e)})));return function(t,n){return e.apply(this,arguments)}}()}),[e]),v=(0,f._8)((function(e){var t=e.snapshot,n=e.set;return function(){var e=(0,o.Z)(s().mark((function e(a,i){var u,c,l,f,d,m,g,b,h,y;return s().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return u=i.fetchResources,c=i.onReceive,l=i.getUpdatedAt,f=i.getError,d=i.clearRef,m=i.onFail,g=(0,r.Z)(i,A),e.next=3,t.getPromise(l());case 3:return b=e.sent,h=function(){return d.promise=u(b),d.promise.catch(function(){var e=(0,o.Z)(s().mark((function e(t){return s().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.abrupt("return",p(t,{onFail:m,getError:f}));case 1:case"end":return e.stop()}}),e)})));return function(t){return e.apply(this,arguments)}}())},y=function(e){var t=Array.isArray(e),r=t?e:e.results,o=t?"":e.updatedAt;return null===c||void 0===c||c(E(E({},t?{results:r}:E({results:[]},e)),{},{lastUpdated:b})),n(x(),E({fetchResources:u,results:r,wasDiff:!!b,updatedAt:o,getUpdatedAt:l,getError:f,clearRef:d,onFail:m},g)),new Promise((function(e){return setTimeout(e,0)}))},e.abrupt("return",h().then((function(){var e=(arguments.length>0&&void 0!==arguments[0]?arguments[0]:{}).data;return y(void 0===e?{}:e)})).catch((function(e){if(e.isCancel||1===a)throw e;return v(--a,E({fetchResources:u,onReceive:c,getUpdatedAt:l,getError:f,clearRef:d,onFail:m},g))})));case 7:case"end":return e.stop()}}),e)})));return function(t,n){return e.apply(this,arguments)}}()}),[e]);return(0,l.useCallback)((function(t){var o=e.polling,a=void 0===o||o,i=e.fetch,c=e.association,s=e.pollingOptions,l=e.keepPolling,f=(0,r.Z)(e,k),d={timeoutId:null,animationFrameId:null,promise:null,killed:!1},p=function(){return v(2,E(E({fetchResources:i,saveToCache:t,polling:a,clearRef:d},c),f))},m=function e(){d.killed||(d.timeoutId=setTimeout((function(){d.animationFrameId=requestAnimationFrame((function(){return!l&&n&&(n.getAttribute("hovering")||n.getAttribute("paused"))?e():l||u.current||n&&n.getAttribute("autofetchOnWindowBlur")?void p().then(e).catch((function(t){return!t.isCancel&&e()})):e()}))}),(null===s||void 0===s?void 0:s.pollingInterval)||h.Z.pollingInterval))};return p().then(a?m:null).catch((function(e){return a&&!e.isCancel&&m()})),function(){var e,t;d.killed=!0,null===(e=d.promise)||void 0===e||null===(t=e.cancel)||void 0===t||t.call(e),cancelAnimationFrame(d.animationFrameId),clearTimeout(d.timeoutId)}}),[e])}(c),p=(0,f._8)((function(e){var t=e.set;return function(e){var n,r=e.results,o=void 0===r?[]:r,a=e.updatedAt,i=void 0===a?"":a,u=(Array.isArray(o)&&o.length)>0;return null===(n=c.onReceive)||void 0===n||n.call(c,{results:o,lastUpdated:""}),t(x(),E(E(E({results:o,updatedAt:u?i:""},c),c.association),{},{restored:u})),new Promise((function(e){return setTimeout(e,0)}))}}),t),v=(0,f._8)((function(e){var t=e.snapshot;return function(){var e=c.association.getLoaded,n=c.key,r=c.cache,o=void 0===r||r,a=c.maxCacheAge,i=c.skip,u=t.getLoadable(e()).contents;if(!i&&(u||!o))return d();var s,l=(0,S.Z)(n,p,{maxAge:a||S.$});return l.then((function(e){s=d(e)})).catch((function(){})),function(){var e;i||(l.abort(),null===(e=s)||void 0===e||e())}}}),t);return(0,l.useEffect)((function(){var e=c.autorun,t=void 0===e||e,n=c.force,r=void 0!==n&&n,o=c.skip;if((!u||r)&&!(void 0!==o&&o)&&t)return v()}),[u,v]),d}},46667:function(e,t,n){"use strict";var r=n(29439),o=n(67294);t.Z=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.on,a=t.off,i=t.toggle,u=(0,o.useState)(!!e),c=(0,r.Z)(u,2),s=c[0],l=c[1];return[s,(0,o.useCallback)((function(e){return l((function(t){var r="boolean"===typeof e?e:!t;return i&&i(r),n&&r&&n(),a&&!r&&a(),r}))}),[i,n,a]),(0,o.useCallback)((function(){l(!0),n&&n()}),[n]),(0,o.useCallback)((function(){l(!1),a&&a()}),[a])]}},65565:function(e,t,n){"use strict";var r=n(67294),o=n(9872);t.Z=function(e,t){var n=(0,o.Z)();(0,r.useEffect)((function(){if(n.current)return e()}),t)}},4474:function(e,t,n){"use strict";n.d(t,{Pf:function(){return b},rZ:function(){return O}});var r=n(29439),o=(n(41539),n(64211),n(2490),n(41874),n(67294)),a=n(89250),i=n(12599),u=n(74059),c=n(95383),s=n(32950),l=n(53338),f=n(5934),d=n(37518),p=n(47133),v=n(46189),m=n(33427),g=n(78710),b=function(){var e=(0,a.s0)(),t=(0,a.UO)(),n=(0,d.A3)(),r=(0,p.r0)(t.dashboardSlug),i=(0,f.L)(),u=(0,o.useRef)(!1);(0,o.useEffect)((function(){!0===i&&(r?u.current=!0:(e("".concat(n,"/dashboards")),u.current||(0,m.cH)()))}),[i,r])},h=v.Z.demoSlug,y=v.Z.demoDefaultRoomViews,S=v.Z.defaultRoomView,O=function(e){var t;return e.length?e.some((function(e){return e.slug===l.i_}))?l.i_:null===(t=e[0])||void 0===t?void 0:t.slug:null};t.ZP=function(){(0,a.TH)();var e=(0,a.s0)(),t=(0,a.bS)({path:"/spaces/:spaceSlug/*"}),n=(0,a.bS)({path:"/spaces/:spaceSlug/rooms/:roomSlug/*"}),l=(0,a.bS)({path:"/*"}),f=(null===n||void 0===n?void 0:n.params)||(null===t||void 0===t?void 0:t.params)||(null===l||void 0===l?void 0:l.params),p=!!(0,a.bS)({path:"/spaces/:spaceSlug/settings/*"}),v=(0,u.VZ)(f.spaceSlug),m=(0,u.GM)("loaded"),b=(0,d.HM)(v,f.roomSlug),w=(0,c.Gn)("loaded"),A=(0,u.Iw)(),k=(0,r.Z)(A,1)[0],P=(0,u.vu)(k,"slug"),Z=(0,s.Z)().isFetching,E=(0,c.J7)(),j=(0,u.$v)(),x=(0,u.uk)();(0,o.useEffect)((function(){if(!Z&&(g.ZP||m&&"restored"!==m)&&(!p||!v))if(v&&x!==f.spaceSlug)j(f.spaceSlug);else if((!v||w)&&(!v||!b)){var t=v?f.spaceSlug:P,n=b?null===f||void 0===f?void 0:f.roomSlug:O(E);e(v&&w&&!n?(0,i.Gn)("/spaces/:spaceSlug/no-rooms",{spaceSlug:t}):function(e,t){if(!t)return(0,i.Gn)("/spaces/:spaceSlug/",{spaceSlug:e});var n=h===e?y[t]||y.default:g.ZP&&e===g.Xh&&t===g.Fb?g.vT:S;return(0,i.Gn)("/spaces/:spaceSlug/rooms/:roomSlug/".concat(n),{spaceSlug:e,roomSlug:t})}(t,n),{replace:!0})}}),[p,Z,f.roomSlug,f.spaceSlug,b,w,!!E.length,m,v,P,x])}},29283:function(e,t,n){"use strict";var r=n(29439),o=n(67294);t.Z=function(){var e=!(arguments.length>0&&void 0!==arguments[0])||arguments[0],t=(0,o.useState)(e),n=(0,r.Z)(t,2),a=n[0],i=n[1],u=(0,o.useRef)(a);return(0,o.useEffect)((function(){var e=function(){u.current=!1,i(!1)};window.addEventListener("blur",e);var t=function(){u.current=!0,i(!0)};return window.addEventListener("focus",t),function(){window.removeEventListener("blur",e),window.removeEventListener("focus",t)}}),[]),[a,u]}},85732:function(e,t,n){"use strict";n(66992),n(41539),n(88674),n(78783),n(33948),n(57327),n(88449),n(2490),n(59849),n(92222),n(74916),n(77601);var r=n(67294),o=n(2145),a=n(20745),i=n(4480),u=n(79655),c=n(89250),s=n(27923),l=n(95659),f=n(9058),d=n(15794),p=n(92501),v=n(22648),m=n(78266),g=n(78710),b=(n(4723),n(82772),Boolean("localhost"===window.location.hostname||"[::1]"===window.location.hostname||window.location.hostname.match(/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/)));function h(e,t){navigator.serviceWorker.register(e).then((function(e){n.g.registration=e,e.onupdatefound=function(){var n=e.installing;null!=n&&(n.onstatechange=function(){"installed"===n.state&&(navigator.serviceWorker.controller?(console.log("New content is available and will be used when all tabs for this page are closed. See http://bit.ly/CRA-PWA."),t&&t.onUpdate&&t.onUpdate(e)):(console.log("Content is cached for offline use."),t&&t.onSuccess&&t.onSuccess(e)))})}})).catch((function(e){console.error("Error during service worker registration:",e)}))}var y=n(4942),S=(n(64765),n(23157),n(15306),n(47042),n(24603),n(88386),n(39714),n(47941),n(82526),n(38880),n(15581),n(34514),n(54747),n(49337),n(33321),n(69070),n(47323));function O(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function w(e){for(var t=1;t=400&&c<=599?null:e},ignoreErrors:["Non-Error exception captured","Non-Error promise rejection captured","Request aborted",/ResizeObserver/,"timeout exceeded","this.get_config is not a function",/IndexSizeError/,"Invalid time value","not_found",/A mutation operation was attempted on a database that did not allow mutations/,/No data for this period/,/Network Error/]}),window.addEventListener("beforeunload",(function(){try{l.Gd().getClient().getOptions().enabled=!1}catch(e){}}))}),500):console.log("Running in development mode version:",C.u2,C.i8);var J,z=(0,d.k)(p.NY,"light"),W=function(){return null},V=function(){var e=(0,v.m)();return r.createElement(i.Wh,null,r.createElement(r.Suspense,{fallback:""},r.createElement(d.Z,null,r.createElement(f.Z,null,r.createElement(p.ZP,{fallback:z},r.createElement(r.Suspense,{fallback:""},r.createElement(B,{isScreenSmall:e}),r.createElement(G,null),r.createElement(F,null),r.createElement(u.M,{className:"router",history:x,basename:g.ZP?g.se:"/"},r.createElement(c.Z5,null,r.createElement(c.AW,{path:"/webviews/*",element:r.createElement(W,null)}),r.createElement(c.AW,{path:"*",element:r.createElement(L,null)})),r.createElement(r.Suspense,{fallback:r.createElement(m.Z,null)},r.createElement(c.Z5,null,!g.ZP&&r.createElement(r.Fragment,null,r.createElement(c.AW,{path:"/sign-in",element:r.createElement(T,null)}),r.createElement(c.AW,{path:"/trust",element:r.createElement(D,null)}),r.createElement(c.AW,{path:"/sign-up",element:r.createElement(c.Fg,{to:"/sign-in",replace:!0})}),r.createElement(c.AW,{path:"/sign-up/verify",element:r.createElement(_,null)}),r.createElement(c.AW,{path:"/sign-in/magic-link-sent",element:r.createElement(R,null)}),r.createElement(c.AW,{path:"/sign-in/mobile-app",element:r.createElement(N,null)}),r.createElement(c.AW,{path:"/webviews/*",element:r.createElement(K,null)})),r.createElement(c.AW,{path:"/cloud/origin/callback",element:r.createElement(M,null)}),r.createElement(c.AW,{path:"/redirects",element:r.createElement(I,null)}),r.createElement(c.AW,{path:"*",element:r.createElement(U,null)}))))))))))};(0,a.s)(document.getElementById("app")).render(r.createElement(V,null)),"serviceWorker"in navigator&&window.addEventListener("load",(function(){var e="".concat(window.envSettings.webpackPublicPath,"/sw.js");b?(function(e,t){fetch(e).then((function(n){var r=n.headers.get("content-type");404===n.status||null!=r&&-1===r.indexOf("javascript")?navigator.serviceWorker.ready.then((function(e){e.unregister().then((function(){window.location.reload()}))})):h(e,t)})).catch((function(){console.log("No internet connection found. App is running in offline mode.")}))}(e,J),navigator.serviceWorker.ready.then((function(){console.log("This web app is being served cache-first by a service worker. To learn more, visit http://bit.ly/CRA-PWA")}))):h(e,J)}))},95482:function(e,t,n){"use strict";n(74916),n(15306);n.p=window.envSettings.isAgent&&!window.envSettings.webpackPublicPath?window.envSettings.agentApiUrl+"/v2/":"".concat(window.envSettings.webpackPublicPath||"","/").replace(/([^:]\/)\/+/g,"$1")},32808:function(e,t,n){"use strict";n(66992),n(41539),n(88674),n(78783),n(33948);window.envSettings.tracking&&(window.posthog={},n.e(9893).then(n.bind(n,99893)).then((function(e){window.posthog=e.default,window.posthog.init(window.envSettings.posthogToken,{api_host:"https://app.posthog.com"})})),function(e,t,n,r,o){e[r]=e[r]||[],e[r].push({"gtm.start":(new Date).getTime(),event:"gtm.js"});var a=t.getElementsByTagName(n)[0],i=t.createElement(n);i.async=!0,i.src="https://www.googletagmanager.com/gtm.js?id=GTM-N6CBMJD",a.parentNode.insertBefore(i,a)}(window,document,"script","dataLayer"))},64358:function(e,t,n){"use strict";n.d(t,{W:function(){return o}});n(92222);var r=n(62200),o=function(e){var t=e.spaceId;return"".concat(r.li,"/spaces/").concat(t,"/rooms?show_all=true")}},24654:function(){}},function(e){e.O(0,[2143],(function(){e.E(9768),e.E(7154)}),5);var t=function(t){return e(e.s=t)};e.O(0,[1967,2533,6152,8099,4586,1282,9513,5158,6187,2406],(function(){return t(95482),t(32808),t(85732)}));e.O()}]); \ No newline at end of file diff --git a/web/gui/v2/apple-app-site-association b/web/gui/v2/apple-app-site-association new file mode 100644 index 00000000000000..c4593e0c78a293 --- /dev/null +++ b/web/gui/v2/apple-app-site-association @@ -0,0 +1,11 @@ +{ + "applinks": { + "apps": [], + "details": [ + { + "appID": "2T8GZ986CU.cloud.netdata.ios", + "paths": [ "*" ] + } + ] + } +} diff --git a/web/gui/v2/bundlesManifest.6.json b/web/gui/v2/bundlesManifest.6.json index 4de280af2702d2..9e5e5832429203 100644 --- a/web/gui/v2/bundlesManifest.6.json +++ b/web/gui/v2/bundlesManifest.6.json @@ -1,18 +1,15 @@ { - "app.css": "/app.0917ff2bf5d3b8b0678d.css", - "app.js": "/app.7bf3bd12482ad161443d.js", - "runtime.js": "/runtime.e3716b90b888609b7a5c.js", - "npm.react.dom.js": "/npm.react.dom.6431597f0353cbef2a34.js", - "3173.js": "/3173.aedc1e477983499117c7.js", - "2833.js": "/2833.78752757c7ac33d196dc.js", - "5623.js": "/5623.d08ebc475a57a44d926c.js", - "3495.js": "/3495.7af81a22f9d135da8cbe.js", - "5176.js": "/5176.9ecb50692b5be2b8a5e2.js", - "1282.js": "/1282.f65cc3329e7e3eb8e645.js", - "3032.js": "/3032.7b4a2db28af84cd77c29.js", - "4532.js": "/4532.0b0105ffbdd6db6f5d9a.js", - "7707.js": "/7707.d32bdcf8038b7eebaa97.js", - "4523.js": "/4523.e41d6aac9a6433f9efb2.js", - "4480.js": "/4480.acae0ad582eb5265622a.js", - "4890.js": "/4890.24af5fbe5015c0b06c90.js" + "app.css": "/app.cb2e9f9a81cf9533384e.css", + "app.js": "/app.efc4ebf75cd13cc4f5f4.js", + "runtime.js": "/runtime.e7f678759af562297436.js", + "npm.react.dom.js": "/npm.react.dom.443419261632ac7d4e78.js", + "2406.js": "/2406.f2927edc2116c8e03b3b.js", + "2533.js": "/2533.2b6849df953c0d8de977.js", + "6152.js": "/6152.d7e1e2bfdb9cb0ae978f.js", + "8099.js": "/8099.4838cb5caf85574b5c9d.js", + "4586.js": "/4586.b2ee472b3fc901d6c88b.js", + "1282.js": "/1282.8f29fe3a597e5ef695e5.js", + "9513.js": "/9513.68ac17c54e2a98d13112.js", + "5158.js": "/5158.76b96a61b88ac13e64fb.js", + "6187.js": "/6187.0b79ef0afe565349e1bc.js" } \ No newline at end of file diff --git a/web/gui/v2/editor.ae2bfa9dd703149cdd28.chunk.js b/web/gui/v2/editor.ae2bfa9dd703149cdd28.chunk.js new file mode 100644 index 00000000000000..ad178beefef994 --- /dev/null +++ b/web/gui/v2/editor.ae2bfa9dd703149cdd28.chunk.js @@ -0,0 +1 @@ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="da769308-9519-426d-8881-f7ca001ea710",e._sentryDebugIdIdentifier="sentry-dbid-da769308-9519-426d-8881-f7ca001ea710")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1189],{8937:function(e,t,n){n.r(t);var o=n(87462),d=n(45987),a=n(67294),i=n(5575),r=n(93416),l=n(71893),f=["autoFocus"],u=(0,l.default)(i.default).attrs({"data-testid":"editor"}).withConfig({displayName:"editor__StyledEditor",componentId:"sc-c7iwfz-0"})(["position:relative;outline:none;word-wrap:break-word;&.medium-editor-placeholder:after{color:",";content:attr(data-placeholder) !important;font-style:italic;left:0;position:absolute;top:0;white-space:pre;padding:inherit;margin:inherit;}"],(0,r.getColor)("border"));t.default=function(e){var t=e.autoFocus,n=(0,d.Z)(e,f),i=(0,a.useRef)();return(0,a.useLayoutEffect)((function(){if(t){var e=i.current.medium.elements[0].lastChild;e.scrollIntoView(),i.current.medium.selectElement(e),document.getSelection().collapseToEnd()}}),[]),a.createElement(u,(0,o.Z)({ref:i},n))}}}]); \ No newline at end of file diff --git a/web/gui/v2/index.html b/web/gui/v2/index.html index 7118c1ba7f85bf..f607322c1f5bc4 100644 --- a/web/gui/v2/index.html +++ b/web/gui/v2/index.html @@ -24,13 +24,15 @@ apiUrl: "https://app.netdata.cloud", cloudUrl: "https://app.netdata.cloud", demoSlug: "netdata-demo", - demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"]}, + demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"],"ups":["upsd"]}, webpackPublicPath: "", agentApiUrl: searchParams.get("agent") || getBasename(), - version: "6.29.0", + posthogToken: "phc_hnhlqe6D2Q4IcQNrFItaqdXJAxQ8RcHkPAFAp74pubv", + version: "6.66.1", tracking: false, cookieDomain: ".netdata.cloud", - onprem: false + onprem: false, + nodeEnv: "production" } function loadStyle(url, { media, insertAfter: aref, insertBefore: bref, rel, type } = {}) { @@ -66,7 +68,7 @@ } loadStyle(window.envSettings.agentApiUrl + "/v2/static/splash.css") - loadStyle(window.envSettings.agentApiUrl + "/v2/favicon.ico", {rel: "icon", type: "image/x-icon"})

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI
    \ No newline at end of file + }).catch(() => {}) \ No newline at end of file diff --git a/web/gui/v2/local-agent.html b/web/gui/v2/local-agent.html index 7118c1ba7f85bf..f607322c1f5bc4 100644 --- a/web/gui/v2/local-agent.html +++ b/web/gui/v2/local-agent.html @@ -24,13 +24,15 @@ apiUrl: "https://app.netdata.cloud", cloudUrl: "https://app.netdata.cloud", demoSlug: "netdata-demo", - demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"]}, + demoFavourites: {"postgresql":["postgres"],"redis":["redis"],"dns-query":["dns_query"],"http-endpoints":["httpcheck"],"nginx":["web_log","nginx"],"apache":["apache"],"host-reachability":["ping"],"cassandra":["cassandra"],"coredns":["coredns"],"logind":["logind"],"iis":["iis"],"active-directory":["ad"],"windows":["windows","ad","iis","mssql","exchange","netframework"],"docker":["cgroup","docker"],"ups":["upsd"]}, webpackPublicPath: "", agentApiUrl: searchParams.get("agent") || getBasename(), - version: "6.29.0", + posthogToken: "phc_hnhlqe6D2Q4IcQNrFItaqdXJAxQ8RcHkPAFAp74pubv", + version: "6.66.1", tracking: false, cookieDomain: ".netdata.cloud", - onprem: false + onprem: false, + nodeEnv: "production" } function loadStyle(url, { media, insertAfter: aref, insertBefore: bref, rel, type } = {}) { @@ -66,7 +68,7 @@ } loadStyle(window.envSettings.agentApiUrl + "/v2/static/splash.css") - loadStyle(window.envSettings.agentApiUrl + "/v2/favicon.ico", {rel: "icon", type: "image/x-icon"})

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI
    \ No newline at end of file + }).catch(() => {}) \ No newline at end of file diff --git a/web/gui/v2/npm.react.dom.443419261632ac7d4e78.js b/web/gui/v2/npm.react.dom.443419261632ac7d4e78.js new file mode 100644 index 00000000000000..cd2dbc0774571a --- /dev/null +++ b/web/gui/v2/npm.react.dom.443419261632ac7d4e78.js @@ -0,0 +1,2 @@ +/*! For license information please see npm.react.dom.443419261632ac7d4e78.js.LICENSE.txt */ +!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="f6a435a9-dd1f-4fb3-8007-5bc1aa5eda29",e._sentryDebugIdIdentifier="sentry-dbid-f6a435a9-dd1f-4fb3-8007-5bc1aa5eda29")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"8b7b01b6c705cf50f8050ce0fb8fc6b58fe68567"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1967],{64448:function(e,n,t){var r=t(67294),l=t(63840);function a(e){for(var n="https://reactjs.org/docs/error-decoder.html?invariant="+e,t=1;t